]>
Commit | Line | Data |
---|---|---|
296af7c9 BS |
1 | /* |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* Needed early for CONFIG_BSD etc. */ | |
26 | #include "config-host.h" | |
27 | ||
28 | #include "monitor.h" | |
29 | #include "sysemu.h" | |
30 | #include "gdbstub.h" | |
31 | #include "dma.h" | |
32 | #include "kvm.h" | |
262ea18e | 33 | #include "exec-all.h" |
296af7c9 BS |
34 | |
35 | #include "cpus.h" | |
a8486bc9 | 36 | #include "compatfd.h" |
296af7c9 | 37 | |
7277e027 BS |
38 | #ifdef SIGRTMIN |
39 | #define SIG_IPI (SIGRTMIN+4) | |
40 | #else | |
41 | #define SIG_IPI SIGUSR1 | |
42 | #endif | |
43 | ||
6d9cb73c JK |
44 | #ifdef CONFIG_LINUX |
45 | ||
46 | #include <sys/prctl.h> | |
47 | ||
c0532a76 MT |
48 | #ifndef PR_MCE_KILL |
49 | #define PR_MCE_KILL 33 | |
50 | #endif | |
51 | ||
6d9cb73c JK |
52 | #ifndef PR_MCE_KILL_SET |
53 | #define PR_MCE_KILL_SET 1 | |
54 | #endif | |
55 | ||
56 | #ifndef PR_MCE_KILL_EARLY | |
57 | #define PR_MCE_KILL_EARLY 1 | |
58 | #endif | |
59 | ||
60 | #endif /* CONFIG_LINUX */ | |
61 | ||
296af7c9 BS |
62 | static CPUState *next_cpu; |
63 | ||
64 | /***********************************************************/ | |
65 | void hw_error(const char *fmt, ...) | |
66 | { | |
67 | va_list ap; | |
68 | CPUState *env; | |
69 | ||
70 | va_start(ap, fmt); | |
71 | fprintf(stderr, "qemu: hardware error: "); | |
72 | vfprintf(stderr, fmt, ap); | |
73 | fprintf(stderr, "\n"); | |
74 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
75 | fprintf(stderr, "CPU #%d:\n", env->cpu_index); | |
76 | #ifdef TARGET_I386 | |
77 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); | |
78 | #else | |
79 | cpu_dump_state(env, stderr, fprintf, 0); | |
80 | #endif | |
81 | } | |
82 | va_end(ap); | |
83 | abort(); | |
84 | } | |
85 | ||
86 | void cpu_synchronize_all_states(void) | |
87 | { | |
88 | CPUState *cpu; | |
89 | ||
90 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
91 | cpu_synchronize_state(cpu); | |
92 | } | |
93 | } | |
94 | ||
95 | void cpu_synchronize_all_post_reset(void) | |
96 | { | |
97 | CPUState *cpu; | |
98 | ||
99 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
100 | cpu_synchronize_post_reset(cpu); | |
101 | } | |
102 | } | |
103 | ||
104 | void cpu_synchronize_all_post_init(void) | |
105 | { | |
106 | CPUState *cpu; | |
107 | ||
108 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
109 | cpu_synchronize_post_init(cpu); | |
110 | } | |
111 | } | |
112 | ||
3ae9501c MT |
113 | int cpu_is_stopped(CPUState *env) |
114 | { | |
115 | return !vm_running || env->stopped; | |
116 | } | |
117 | ||
296af7c9 BS |
118 | static void do_vm_stop(int reason) |
119 | { | |
120 | if (vm_running) { | |
121 | cpu_disable_ticks(); | |
122 | vm_running = 0; | |
123 | pause_all_vcpus(); | |
124 | vm_state_notify(0, reason); | |
55df6f33 MT |
125 | qemu_aio_flush(); |
126 | bdrv_flush_all(); | |
296af7c9 BS |
127 | monitor_protocol_event(QEVENT_STOP, NULL); |
128 | } | |
129 | } | |
130 | ||
131 | static int cpu_can_run(CPUState *env) | |
132 | { | |
0ab07c62 | 133 | if (env->stop) { |
296af7c9 | 134 | return 0; |
0ab07c62 JK |
135 | } |
136 | if (env->stopped || !vm_running) { | |
296af7c9 | 137 | return 0; |
0ab07c62 | 138 | } |
296af7c9 BS |
139 | return 1; |
140 | } | |
141 | ||
16400322 | 142 | static bool cpu_thread_is_idle(CPUState *env) |
296af7c9 | 143 | { |
16400322 JK |
144 | if (env->stop || env->queued_work_first) { |
145 | return false; | |
146 | } | |
147 | if (env->stopped || !vm_running) { | |
148 | return true; | |
149 | } | |
150 | if (!env->halted || qemu_cpu_has_work(env)) { | |
151 | return false; | |
152 | } | |
153 | return true; | |
296af7c9 BS |
154 | } |
155 | ||
16400322 | 156 | static bool all_cpu_threads_idle(void) |
296af7c9 BS |
157 | { |
158 | CPUState *env; | |
159 | ||
16400322 JK |
160 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
161 | if (!cpu_thread_is_idle(env)) { | |
162 | return false; | |
163 | } | |
164 | } | |
165 | return true; | |
296af7c9 BS |
166 | } |
167 | ||
83f338f7 JK |
168 | static CPUDebugExcpHandler *debug_excp_handler; |
169 | ||
170 | CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) | |
171 | { | |
172 | CPUDebugExcpHandler *old_handler = debug_excp_handler; | |
173 | ||
174 | debug_excp_handler = handler; | |
175 | return old_handler; | |
176 | } | |
177 | ||
178 | static void cpu_handle_debug_exception(CPUState *env) | |
3c638d06 | 179 | { |
83f338f7 JK |
180 | CPUWatchpoint *wp; |
181 | ||
182 | if (!env->watchpoint_hit) { | |
183 | QTAILQ_FOREACH(wp, &env->watchpoints, entry) { | |
184 | wp->flags &= ~BP_WATCHPOINT_HIT; | |
185 | } | |
186 | } | |
187 | if (debug_excp_handler) { | |
188 | debug_excp_handler(env); | |
189 | } | |
190 | ||
3c638d06 | 191 | gdb_set_stop_cpu(env); |
8cf71710 | 192 | qemu_system_debug_request(); |
83f338f7 JK |
193 | #ifdef CONFIG_IOTHREAD |
194 | env->stopped = 1; | |
195 | #endif | |
3c638d06 JK |
196 | } |
197 | ||
6d9cb73c JK |
198 | #ifdef CONFIG_LINUX |
199 | static void sigbus_reraise(void) | |
200 | { | |
201 | sigset_t set; | |
202 | struct sigaction action; | |
203 | ||
204 | memset(&action, 0, sizeof(action)); | |
205 | action.sa_handler = SIG_DFL; | |
206 | if (!sigaction(SIGBUS, &action, NULL)) { | |
207 | raise(SIGBUS); | |
208 | sigemptyset(&set); | |
209 | sigaddset(&set, SIGBUS); | |
210 | sigprocmask(SIG_UNBLOCK, &set, NULL); | |
211 | } | |
212 | perror("Failed to re-raise SIGBUS!\n"); | |
213 | abort(); | |
214 | } | |
215 | ||
216 | static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, | |
217 | void *ctx) | |
218 | { | |
219 | if (kvm_on_sigbus(siginfo->ssi_code, | |
220 | (void *)(intptr_t)siginfo->ssi_addr)) { | |
221 | sigbus_reraise(); | |
222 | } | |
223 | } | |
224 | ||
225 | static void qemu_init_sigbus(void) | |
226 | { | |
227 | struct sigaction action; | |
228 | ||
229 | memset(&action, 0, sizeof(action)); | |
230 | action.sa_flags = SA_SIGINFO; | |
231 | action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; | |
232 | sigaction(SIGBUS, &action, NULL); | |
233 | ||
234 | prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); | |
235 | } | |
236 | ||
237 | #else /* !CONFIG_LINUX */ | |
238 | ||
239 | static void qemu_init_sigbus(void) | |
240 | { | |
241 | } | |
242 | #endif /* !CONFIG_LINUX */ | |
243 | ||
296af7c9 BS |
244 | #ifndef _WIN32 |
245 | static int io_thread_fd = -1; | |
246 | ||
247 | static void qemu_event_increment(void) | |
248 | { | |
249 | /* Write 8 bytes to be compatible with eventfd. */ | |
26a82330 | 250 | static const uint64_t val = 1; |
296af7c9 BS |
251 | ssize_t ret; |
252 | ||
0ab07c62 | 253 | if (io_thread_fd == -1) { |
296af7c9 | 254 | return; |
0ab07c62 | 255 | } |
296af7c9 BS |
256 | do { |
257 | ret = write(io_thread_fd, &val, sizeof(val)); | |
258 | } while (ret < 0 && errno == EINTR); | |
259 | ||
260 | /* EAGAIN is fine, a read must be pending. */ | |
261 | if (ret < 0 && errno != EAGAIN) { | |
262 | fprintf(stderr, "qemu_event_increment: write() filed: %s\n", | |
263 | strerror(errno)); | |
264 | exit (1); | |
265 | } | |
266 | } | |
267 | ||
268 | static void qemu_event_read(void *opaque) | |
269 | { | |
270 | int fd = (unsigned long)opaque; | |
271 | ssize_t len; | |
272 | char buffer[512]; | |
273 | ||
274 | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */ | |
275 | do { | |
276 | len = read(fd, buffer, sizeof(buffer)); | |
277 | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); | |
278 | } | |
279 | ||
280 | static int qemu_event_init(void) | |
281 | { | |
282 | int err; | |
283 | int fds[2]; | |
284 | ||
285 | err = qemu_eventfd(fds); | |
0ab07c62 | 286 | if (err == -1) { |
296af7c9 | 287 | return -errno; |
0ab07c62 | 288 | } |
296af7c9 | 289 | err = fcntl_setfl(fds[0], O_NONBLOCK); |
0ab07c62 | 290 | if (err < 0) { |
296af7c9 | 291 | goto fail; |
0ab07c62 | 292 | } |
296af7c9 | 293 | err = fcntl_setfl(fds[1], O_NONBLOCK); |
0ab07c62 | 294 | if (err < 0) { |
296af7c9 | 295 | goto fail; |
0ab07c62 | 296 | } |
296af7c9 BS |
297 | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
298 | (void *)(unsigned long)fds[0]); | |
299 | ||
300 | io_thread_fd = fds[1]; | |
301 | return 0; | |
302 | ||
303 | fail: | |
304 | close(fds[0]); | |
305 | close(fds[1]); | |
306 | return err; | |
307 | } | |
55f8d6ac | 308 | |
55f8d6ac JK |
309 | static void dummy_signal(int sig) |
310 | { | |
311 | } | |
55f8d6ac | 312 | |
d0f294ce JK |
313 | /* If we have signalfd, we mask out the signals we want to handle and then |
314 | * use signalfd to listen for them. We rely on whatever the current signal | |
315 | * handler is to dispatch the signals when we receive them. | |
316 | */ | |
317 | static void sigfd_handler(void *opaque) | |
318 | { | |
319 | int fd = (unsigned long) opaque; | |
320 | struct qemu_signalfd_siginfo info; | |
321 | struct sigaction action; | |
322 | ssize_t len; | |
323 | ||
324 | while (1) { | |
325 | do { | |
326 | len = read(fd, &info, sizeof(info)); | |
327 | } while (len == -1 && errno == EINTR); | |
328 | ||
329 | if (len == -1 && errno == EAGAIN) { | |
330 | break; | |
331 | } | |
332 | ||
333 | if (len != sizeof(info)) { | |
334 | printf("read from sigfd returned %zd: %m\n", len); | |
335 | return; | |
336 | } | |
337 | ||
338 | sigaction(info.ssi_signo, NULL, &action); | |
339 | if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) { | |
340 | action.sa_sigaction(info.ssi_signo, | |
341 | (siginfo_t *)&info, NULL); | |
342 | } else if (action.sa_handler) { | |
343 | action.sa_handler(info.ssi_signo); | |
344 | } | |
345 | } | |
346 | } | |
347 | ||
348 | static int qemu_signalfd_init(sigset_t mask) | |
349 | { | |
350 | int sigfd; | |
351 | ||
352 | sigfd = qemu_signalfd(&mask); | |
353 | if (sigfd == -1) { | |
354 | fprintf(stderr, "failed to create signalfd\n"); | |
355 | return -errno; | |
356 | } | |
357 | ||
358 | fcntl_setfl(sigfd, O_NONBLOCK); | |
359 | ||
360 | qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, | |
361 | (void *)(unsigned long) sigfd); | |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
9a36085b JK |
366 | static void qemu_kvm_eat_signals(CPUState *env) |
367 | { | |
368 | struct timespec ts = { 0, 0 }; | |
369 | siginfo_t siginfo; | |
370 | sigset_t waitset; | |
371 | sigset_t chkset; | |
372 | int r; | |
373 | ||
374 | sigemptyset(&waitset); | |
375 | sigaddset(&waitset, SIG_IPI); | |
376 | sigaddset(&waitset, SIGBUS); | |
377 | ||
378 | do { | |
379 | r = sigtimedwait(&waitset, &siginfo, &ts); | |
380 | if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { | |
381 | perror("sigtimedwait"); | |
382 | exit(1); | |
383 | } | |
384 | ||
385 | switch (r) { | |
9a36085b JK |
386 | case SIGBUS: |
387 | if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) { | |
388 | sigbus_reraise(); | |
389 | } | |
390 | break; | |
9a36085b JK |
391 | default: |
392 | break; | |
393 | } | |
394 | ||
395 | r = sigpending(&chkset); | |
396 | if (r == -1) { | |
397 | perror("sigpending"); | |
398 | exit(1); | |
399 | } | |
400 | } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS)); | |
de758970 JK |
401 | |
402 | #ifndef CONFIG_IOTHREAD | |
403 | if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) { | |
404 | qemu_notify_event(); | |
405 | } | |
406 | #endif | |
9a36085b JK |
407 | } |
408 | ||
55f8d6ac JK |
409 | #else /* _WIN32 */ |
410 | ||
296af7c9 BS |
411 | HANDLE qemu_event_handle; |
412 | ||
413 | static void dummy_event_handler(void *opaque) | |
414 | { | |
415 | } | |
416 | ||
417 | static int qemu_event_init(void) | |
418 | { | |
419 | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); | |
420 | if (!qemu_event_handle) { | |
421 | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError()); | |
422 | return -1; | |
423 | } | |
424 | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL); | |
425 | return 0; | |
426 | } | |
427 | ||
428 | static void qemu_event_increment(void) | |
429 | { | |
430 | if (!SetEvent(qemu_event_handle)) { | |
431 | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n", | |
432 | GetLastError()); | |
433 | exit (1); | |
434 | } | |
435 | } | |
9a36085b JK |
436 | |
437 | static void qemu_kvm_eat_signals(CPUState *env) | |
438 | { | |
439 | } | |
55f8d6ac | 440 | #endif /* _WIN32 */ |
296af7c9 BS |
441 | |
442 | #ifndef CONFIG_IOTHREAD | |
ff48eb5f JK |
443 | static void qemu_kvm_init_cpu_signals(CPUState *env) |
444 | { | |
445 | #ifndef _WIN32 | |
446 | int r; | |
447 | sigset_t set; | |
448 | struct sigaction sigact; | |
449 | ||
450 | memset(&sigact, 0, sizeof(sigact)); | |
451 | sigact.sa_handler = dummy_signal; | |
452 | sigaction(SIG_IPI, &sigact, NULL); | |
453 | ||
454 | sigemptyset(&set); | |
455 | sigaddset(&set, SIG_IPI); | |
de758970 JK |
456 | sigaddset(&set, SIGIO); |
457 | sigaddset(&set, SIGALRM); | |
ff48eb5f JK |
458 | pthread_sigmask(SIG_BLOCK, &set, NULL); |
459 | ||
460 | pthread_sigmask(SIG_BLOCK, NULL, &set); | |
461 | sigdelset(&set, SIG_IPI); | |
462 | sigdelset(&set, SIGBUS); | |
de758970 JK |
463 | sigdelset(&set, SIGIO); |
464 | sigdelset(&set, SIGALRM); | |
ff48eb5f JK |
465 | r = kvm_set_signal_mask(env, &set); |
466 | if (r) { | |
467 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
468 | exit(1); | |
469 | } | |
470 | #endif | |
471 | } | |
472 | ||
de758970 JK |
473 | #ifndef _WIN32 |
474 | static sigset_t block_synchronous_signals(void) | |
475 | { | |
476 | sigset_t set; | |
477 | ||
478 | sigemptyset(&set); | |
6d9cb73c | 479 | sigaddset(&set, SIGBUS); |
de758970 JK |
480 | if (kvm_enabled()) { |
481 | /* | |
482 | * We need to process timer signals synchronously to avoid a race | |
483 | * between exit_request check and KVM vcpu entry. | |
484 | */ | |
485 | sigaddset(&set, SIGIO); | |
486 | sigaddset(&set, SIGALRM); | |
487 | } | |
488 | ||
489 | return set; | |
490 | } | |
491 | #endif | |
492 | ||
296af7c9 BS |
493 | int qemu_init_main_loop(void) |
494 | { | |
d0f294ce JK |
495 | #ifndef _WIN32 |
496 | sigset_t blocked_signals; | |
497 | int ret; | |
498 | ||
de758970 | 499 | blocked_signals = block_synchronous_signals(); |
d0f294ce JK |
500 | |
501 | ret = qemu_signalfd_init(blocked_signals); | |
502 | if (ret) { | |
503 | return ret; | |
504 | } | |
505 | #endif | |
3c638d06 | 506 | |
6d9cb73c | 507 | qemu_init_sigbus(); |
3c638d06 | 508 | |
296af7c9 BS |
509 | return qemu_event_init(); |
510 | } | |
511 | ||
7277e027 BS |
512 | void qemu_main_loop_start(void) |
513 | { | |
514 | } | |
515 | ||
296af7c9 BS |
516 | void qemu_init_vcpu(void *_env) |
517 | { | |
518 | CPUState *env = _env; | |
84b4915d | 519 | int r; |
296af7c9 BS |
520 | |
521 | env->nr_cores = smp_cores; | |
522 | env->nr_threads = smp_threads; | |
84b4915d JK |
523 | |
524 | if (kvm_enabled()) { | |
525 | r = kvm_init_vcpu(env); | |
526 | if (r < 0) { | |
527 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
528 | exit(1); | |
529 | } | |
ff48eb5f | 530 | qemu_kvm_init_cpu_signals(env); |
84b4915d | 531 | } |
296af7c9 BS |
532 | } |
533 | ||
b7680cb6 | 534 | int qemu_cpu_is_self(void *env) |
296af7c9 BS |
535 | { |
536 | return 1; | |
537 | } | |
538 | ||
e82bcec2 MT |
539 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
540 | { | |
541 | func(data); | |
542 | } | |
543 | ||
296af7c9 BS |
544 | void resume_all_vcpus(void) |
545 | { | |
546 | } | |
547 | ||
548 | void pause_all_vcpus(void) | |
549 | { | |
550 | } | |
551 | ||
552 | void qemu_cpu_kick(void *env) | |
553 | { | |
296af7c9 BS |
554 | } |
555 | ||
46d62fac JK |
556 | void qemu_cpu_kick_self(void) |
557 | { | |
558 | #ifndef _WIN32 | |
559 | assert(cpu_single_env); | |
560 | ||
561 | raise(SIG_IPI); | |
562 | #else | |
563 | abort(); | |
564 | #endif | |
296af7c9 BS |
565 | } |
566 | ||
567 | void qemu_notify_event(void) | |
568 | { | |
569 | CPUState *env = cpu_single_env; | |
570 | ||
571 | qemu_event_increment (); | |
572 | if (env) { | |
573 | cpu_exit(env); | |
574 | } | |
575 | if (next_cpu && env != next_cpu) { | |
576 | cpu_exit(next_cpu); | |
577 | } | |
38145df2 | 578 | exit_request = 1; |
296af7c9 BS |
579 | } |
580 | ||
581 | void qemu_mutex_lock_iothread(void) {} | |
582 | void qemu_mutex_unlock_iothread(void) {} | |
583 | ||
b4a3d965 JK |
584 | void cpu_stop_current(void) |
585 | { | |
586 | } | |
587 | ||
296af7c9 BS |
588 | void vm_stop(int reason) |
589 | { | |
590 | do_vm_stop(reason); | |
591 | } | |
592 | ||
593 | #else /* CONFIG_IOTHREAD */ | |
594 | ||
595 | #include "qemu-thread.h" | |
596 | ||
597 | QemuMutex qemu_global_mutex; | |
598 | static QemuMutex qemu_fair_mutex; | |
599 | ||
600 | static QemuThread io_thread; | |
601 | ||
602 | static QemuThread *tcg_cpu_thread; | |
603 | static QemuCond *tcg_halt_cond; | |
604 | ||
605 | static int qemu_system_ready; | |
606 | /* cpu creation */ | |
607 | static QemuCond qemu_cpu_cond; | |
608 | /* system init */ | |
609 | static QemuCond qemu_system_cond; | |
610 | static QemuCond qemu_pause_cond; | |
e82bcec2 | 611 | static QemuCond qemu_work_cond; |
296af7c9 | 612 | |
55f8d6ac | 613 | static void cpu_signal(int sig) |
a8486bc9 | 614 | { |
55f8d6ac JK |
615 | if (cpu_single_env) { |
616 | cpu_exit(cpu_single_env); | |
617 | } | |
618 | exit_request = 1; | |
619 | } | |
a8486bc9 | 620 | |
55f8d6ac JK |
621 | static void qemu_kvm_init_cpu_signals(CPUState *env) |
622 | { | |
623 | int r; | |
624 | sigset_t set; | |
625 | struct sigaction sigact; | |
a8486bc9 | 626 | |
55f8d6ac JK |
627 | memset(&sigact, 0, sizeof(sigact)); |
628 | sigact.sa_handler = dummy_signal; | |
629 | sigaction(SIG_IPI, &sigact, NULL); | |
a8486bc9 | 630 | |
55f8d6ac JK |
631 | pthread_sigmask(SIG_BLOCK, NULL, &set); |
632 | sigdelset(&set, SIG_IPI); | |
633 | sigdelset(&set, SIGBUS); | |
634 | r = kvm_set_signal_mask(env, &set); | |
635 | if (r) { | |
636 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
637 | exit(1); | |
a8486bc9 MT |
638 | } |
639 | } | |
640 | ||
55f8d6ac | 641 | static void qemu_tcg_init_cpu_signals(void) |
a8486bc9 | 642 | { |
55f8d6ac JK |
643 | sigset_t set; |
644 | struct sigaction sigact; | |
a8486bc9 | 645 | |
55f8d6ac JK |
646 | memset(&sigact, 0, sizeof(sigact)); |
647 | sigact.sa_handler = cpu_signal; | |
648 | sigaction(SIG_IPI, &sigact, NULL); | |
a8486bc9 | 649 | |
55f8d6ac JK |
650 | sigemptyset(&set); |
651 | sigaddset(&set, SIG_IPI); | |
652 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
653 | } | |
a8486bc9 | 654 | |
55f8d6ac JK |
655 | static sigset_t block_io_signals(void) |
656 | { | |
657 | sigset_t set; | |
a8486bc9 | 658 | |
55f8d6ac JK |
659 | /* SIGUSR2 used by posix-aio-compat.c */ |
660 | sigemptyset(&set); | |
661 | sigaddset(&set, SIGUSR2); | |
662 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
663 | ||
664 | sigemptyset(&set); | |
665 | sigaddset(&set, SIGIO); | |
666 | sigaddset(&set, SIGALRM); | |
667 | sigaddset(&set, SIG_IPI); | |
668 | sigaddset(&set, SIGBUS); | |
669 | pthread_sigmask(SIG_BLOCK, &set, NULL); | |
670 | ||
55f8d6ac | 671 | return set; |
a8486bc9 | 672 | } |
296af7c9 BS |
673 | |
674 | int qemu_init_main_loop(void) | |
675 | { | |
676 | int ret; | |
a8486bc9 | 677 | sigset_t blocked_signals; |
296af7c9 | 678 | |
6d9cb73c | 679 | qemu_init_sigbus(); |
3c638d06 | 680 | |
a8486bc9 MT |
681 | blocked_signals = block_io_signals(); |
682 | ||
683 | ret = qemu_signalfd_init(blocked_signals); | |
0ab07c62 | 684 | if (ret) { |
a8486bc9 | 685 | return ret; |
0ab07c62 | 686 | } |
a8486bc9 MT |
687 | |
688 | /* Note eventfd must be drained before signalfd handlers run */ | |
296af7c9 | 689 | ret = qemu_event_init(); |
0ab07c62 | 690 | if (ret) { |
296af7c9 | 691 | return ret; |
0ab07c62 | 692 | } |
296af7c9 | 693 | |
ed94592b | 694 | qemu_cond_init(&qemu_cpu_cond); |
f8ca7b43 | 695 | qemu_cond_init(&qemu_system_cond); |
ed94592b AL |
696 | qemu_cond_init(&qemu_pause_cond); |
697 | qemu_cond_init(&qemu_work_cond); | |
296af7c9 BS |
698 | qemu_mutex_init(&qemu_fair_mutex); |
699 | qemu_mutex_init(&qemu_global_mutex); | |
700 | qemu_mutex_lock(&qemu_global_mutex); | |
701 | ||
b7680cb6 | 702 | qemu_thread_get_self(&io_thread); |
296af7c9 BS |
703 | |
704 | return 0; | |
705 | } | |
706 | ||
7277e027 BS |
707 | void qemu_main_loop_start(void) |
708 | { | |
709 | qemu_system_ready = 1; | |
710 | qemu_cond_broadcast(&qemu_system_cond); | |
711 | } | |
712 | ||
e82bcec2 MT |
713 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
714 | { | |
715 | struct qemu_work_item wi; | |
716 | ||
b7680cb6 | 717 | if (qemu_cpu_is_self(env)) { |
e82bcec2 MT |
718 | func(data); |
719 | return; | |
720 | } | |
721 | ||
722 | wi.func = func; | |
723 | wi.data = data; | |
0ab07c62 | 724 | if (!env->queued_work_first) { |
e82bcec2 | 725 | env->queued_work_first = &wi; |
0ab07c62 | 726 | } else { |
e82bcec2 | 727 | env->queued_work_last->next = &wi; |
0ab07c62 | 728 | } |
e82bcec2 MT |
729 | env->queued_work_last = &wi; |
730 | wi.next = NULL; | |
731 | wi.done = false; | |
732 | ||
733 | qemu_cpu_kick(env); | |
734 | while (!wi.done) { | |
735 | CPUState *self_env = cpu_single_env; | |
736 | ||
737 | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); | |
738 | cpu_single_env = self_env; | |
739 | } | |
740 | } | |
741 | ||
742 | static void flush_queued_work(CPUState *env) | |
743 | { | |
744 | struct qemu_work_item *wi; | |
745 | ||
0ab07c62 | 746 | if (!env->queued_work_first) { |
e82bcec2 | 747 | return; |
0ab07c62 | 748 | } |
e82bcec2 MT |
749 | |
750 | while ((wi = env->queued_work_first)) { | |
751 | env->queued_work_first = wi->next; | |
752 | wi->func(wi->data); | |
753 | wi->done = true; | |
754 | } | |
755 | env->queued_work_last = NULL; | |
756 | qemu_cond_broadcast(&qemu_work_cond); | |
757 | } | |
758 | ||
296af7c9 BS |
759 | static void qemu_wait_io_event_common(CPUState *env) |
760 | { | |
761 | if (env->stop) { | |
762 | env->stop = 0; | |
763 | env->stopped = 1; | |
764 | qemu_cond_signal(&qemu_pause_cond); | |
765 | } | |
e82bcec2 | 766 | flush_queued_work(env); |
aa2c364b | 767 | env->thread_kicked = false; |
296af7c9 BS |
768 | } |
769 | ||
6cabe1f3 | 770 | static void qemu_tcg_wait_io_event(void) |
296af7c9 | 771 | { |
6cabe1f3 JK |
772 | CPUState *env; |
773 | ||
16400322 | 774 | while (all_cpu_threads_idle()) { |
6cabe1f3 | 775 | qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000); |
16400322 | 776 | } |
296af7c9 BS |
777 | |
778 | qemu_mutex_unlock(&qemu_global_mutex); | |
779 | ||
780 | /* | |
781 | * Users of qemu_global_mutex can be starved, having no chance | |
782 | * to acquire it since this path will get to it first. | |
783 | * So use another lock to provide fairness. | |
784 | */ | |
785 | qemu_mutex_lock(&qemu_fair_mutex); | |
786 | qemu_mutex_unlock(&qemu_fair_mutex); | |
787 | ||
788 | qemu_mutex_lock(&qemu_global_mutex); | |
6cabe1f3 JK |
789 | |
790 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
791 | qemu_wait_io_event_common(env); | |
792 | } | |
296af7c9 BS |
793 | } |
794 | ||
296af7c9 BS |
795 | static void qemu_kvm_wait_io_event(CPUState *env) |
796 | { | |
16400322 | 797 | while (cpu_thread_is_idle(env)) { |
296af7c9 | 798 | qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000); |
16400322 | 799 | } |
296af7c9 | 800 | |
5db5bdac | 801 | qemu_kvm_eat_signals(env); |
296af7c9 BS |
802 | qemu_wait_io_event_common(env); |
803 | } | |
804 | ||
7e97cd88 | 805 | static void *qemu_kvm_cpu_thread_fn(void *arg) |
296af7c9 BS |
806 | { |
807 | CPUState *env = arg; | |
84b4915d | 808 | int r; |
296af7c9 | 809 | |
6164e6d6 | 810 | qemu_mutex_lock(&qemu_global_mutex); |
b7680cb6 | 811 | qemu_thread_get_self(env->thread); |
296af7c9 | 812 | |
84b4915d JK |
813 | r = kvm_init_vcpu(env); |
814 | if (r < 0) { | |
815 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
816 | exit(1); | |
817 | } | |
296af7c9 | 818 | |
55f8d6ac | 819 | qemu_kvm_init_cpu_signals(env); |
296af7c9 BS |
820 | |
821 | /* signal CPU creation */ | |
296af7c9 BS |
822 | env->created = 1; |
823 | qemu_cond_signal(&qemu_cpu_cond); | |
824 | ||
825 | /* and wait for machine initialization */ | |
0ab07c62 | 826 | while (!qemu_system_ready) { |
296af7c9 | 827 | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); |
0ab07c62 | 828 | } |
296af7c9 BS |
829 | |
830 | while (1) { | |
0ab07c62 | 831 | if (cpu_can_run(env)) { |
6792a57b | 832 | r = kvm_cpu_exec(env); |
83f338f7 JK |
833 | if (r == EXCP_DEBUG) { |
834 | cpu_handle_debug_exception(env); | |
835 | } | |
0ab07c62 | 836 | } |
296af7c9 BS |
837 | qemu_kvm_wait_io_event(env); |
838 | } | |
839 | ||
840 | return NULL; | |
841 | } | |
842 | ||
7e97cd88 | 843 | static void *qemu_tcg_cpu_thread_fn(void *arg) |
296af7c9 BS |
844 | { |
845 | CPUState *env = arg; | |
846 | ||
55f8d6ac | 847 | qemu_tcg_init_cpu_signals(); |
b7680cb6 | 848 | qemu_thread_get_self(env->thread); |
296af7c9 BS |
849 | |
850 | /* signal CPU creation */ | |
851 | qemu_mutex_lock(&qemu_global_mutex); | |
0ab07c62 | 852 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
296af7c9 | 853 | env->created = 1; |
0ab07c62 | 854 | } |
296af7c9 BS |
855 | qemu_cond_signal(&qemu_cpu_cond); |
856 | ||
857 | /* and wait for machine initialization */ | |
0ab07c62 | 858 | while (!qemu_system_ready) { |
296af7c9 | 859 | qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100); |
0ab07c62 | 860 | } |
296af7c9 BS |
861 | |
862 | while (1) { | |
472fb0c4 | 863 | cpu_exec_all(); |
6cabe1f3 | 864 | qemu_tcg_wait_io_event(); |
296af7c9 BS |
865 | } |
866 | ||
867 | return NULL; | |
868 | } | |
869 | ||
870 | void qemu_cpu_kick(void *_env) | |
871 | { | |
872 | CPUState *env = _env; | |
296af7c9 | 873 | |
296af7c9 | 874 | qemu_cond_broadcast(env->halt_cond); |
aa2c364b JK |
875 | if (!env->thread_kicked) { |
876 | qemu_thread_signal(env->thread, SIG_IPI); | |
877 | env->thread_kicked = true; | |
878 | } | |
296af7c9 BS |
879 | } |
880 | ||
46d62fac | 881 | void qemu_cpu_kick_self(void) |
296af7c9 | 882 | { |
46d62fac | 883 | assert(cpu_single_env); |
296af7c9 | 884 | |
46d62fac JK |
885 | if (!cpu_single_env->thread_kicked) { |
886 | qemu_thread_signal(cpu_single_env->thread, SIG_IPI); | |
887 | cpu_single_env->thread_kicked = true; | |
296af7c9 BS |
888 | } |
889 | } | |
890 | ||
b7680cb6 | 891 | int qemu_cpu_is_self(void *_env) |
296af7c9 | 892 | { |
296af7c9 | 893 | CPUState *env = _env; |
a8486bc9 | 894 | |
b7680cb6 | 895 | return qemu_thread_is_self(env->thread); |
296af7c9 BS |
896 | } |
897 | ||
296af7c9 BS |
898 | void qemu_mutex_lock_iothread(void) |
899 | { | |
900 | if (kvm_enabled()) { | |
296af7c9 | 901 | qemu_mutex_lock(&qemu_global_mutex); |
1a28cac3 MT |
902 | } else { |
903 | qemu_mutex_lock(&qemu_fair_mutex); | |
904 | if (qemu_mutex_trylock(&qemu_global_mutex)) { | |
905 | qemu_thread_signal(tcg_cpu_thread, SIG_IPI); | |
906 | qemu_mutex_lock(&qemu_global_mutex); | |
907 | } | |
908 | qemu_mutex_unlock(&qemu_fair_mutex); | |
909 | } | |
296af7c9 BS |
910 | } |
911 | ||
912 | void qemu_mutex_unlock_iothread(void) | |
913 | { | |
914 | qemu_mutex_unlock(&qemu_global_mutex); | |
915 | } | |
916 | ||
917 | static int all_vcpus_paused(void) | |
918 | { | |
919 | CPUState *penv = first_cpu; | |
920 | ||
921 | while (penv) { | |
0ab07c62 | 922 | if (!penv->stopped) { |
296af7c9 | 923 | return 0; |
0ab07c62 | 924 | } |
296af7c9 BS |
925 | penv = (CPUState *)penv->next_cpu; |
926 | } | |
927 | ||
928 | return 1; | |
929 | } | |
930 | ||
931 | void pause_all_vcpus(void) | |
932 | { | |
933 | CPUState *penv = first_cpu; | |
934 | ||
935 | while (penv) { | |
936 | penv->stop = 1; | |
296af7c9 BS |
937 | qemu_cpu_kick(penv); |
938 | penv = (CPUState *)penv->next_cpu; | |
939 | } | |
940 | ||
941 | while (!all_vcpus_paused()) { | |
942 | qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100); | |
943 | penv = first_cpu; | |
944 | while (penv) { | |
1fbb22e5 | 945 | qemu_cpu_kick(penv); |
296af7c9 BS |
946 | penv = (CPUState *)penv->next_cpu; |
947 | } | |
948 | } | |
949 | } | |
950 | ||
951 | void resume_all_vcpus(void) | |
952 | { | |
953 | CPUState *penv = first_cpu; | |
954 | ||
955 | while (penv) { | |
956 | penv->stop = 0; | |
957 | penv->stopped = 0; | |
296af7c9 BS |
958 | qemu_cpu_kick(penv); |
959 | penv = (CPUState *)penv->next_cpu; | |
960 | } | |
961 | } | |
962 | ||
7e97cd88 | 963 | static void qemu_tcg_init_vcpu(void *_env) |
296af7c9 BS |
964 | { |
965 | CPUState *env = _env; | |
0ab07c62 | 966 | |
296af7c9 BS |
967 | /* share a single thread for all cpus with TCG */ |
968 | if (!tcg_cpu_thread) { | |
969 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
970 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
971 | qemu_cond_init(env->halt_cond); | |
7e97cd88 | 972 | qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); |
0ab07c62 | 973 | while (env->created == 0) { |
296af7c9 | 974 | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); |
0ab07c62 | 975 | } |
296af7c9 BS |
976 | tcg_cpu_thread = env->thread; |
977 | tcg_halt_cond = env->halt_cond; | |
978 | } else { | |
979 | env->thread = tcg_cpu_thread; | |
980 | env->halt_cond = tcg_halt_cond; | |
981 | } | |
982 | } | |
983 | ||
7e97cd88 | 984 | static void qemu_kvm_start_vcpu(CPUState *env) |
296af7c9 BS |
985 | { |
986 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
987 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
988 | qemu_cond_init(env->halt_cond); | |
7e97cd88 | 989 | qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); |
0ab07c62 | 990 | while (env->created == 0) { |
296af7c9 | 991 | qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100); |
0ab07c62 | 992 | } |
296af7c9 BS |
993 | } |
994 | ||
995 | void qemu_init_vcpu(void *_env) | |
996 | { | |
997 | CPUState *env = _env; | |
998 | ||
999 | env->nr_cores = smp_cores; | |
1000 | env->nr_threads = smp_threads; | |
0ab07c62 | 1001 | if (kvm_enabled()) { |
7e97cd88 | 1002 | qemu_kvm_start_vcpu(env); |
0ab07c62 | 1003 | } else { |
7e97cd88 | 1004 | qemu_tcg_init_vcpu(env); |
0ab07c62 | 1005 | } |
296af7c9 BS |
1006 | } |
1007 | ||
1008 | void qemu_notify_event(void) | |
1009 | { | |
1010 | qemu_event_increment(); | |
1011 | } | |
1012 | ||
b4a3d965 | 1013 | void cpu_stop_current(void) |
296af7c9 | 1014 | { |
b4a3d965 JK |
1015 | if (cpu_single_env) { |
1016 | cpu_single_env->stopped = 1; | |
1017 | cpu_exit(cpu_single_env); | |
1018 | } | |
296af7c9 BS |
1019 | } |
1020 | ||
1021 | void vm_stop(int reason) | |
1022 | { | |
b7680cb6 | 1023 | if (!qemu_thread_is_self(&io_thread)) { |
296af7c9 BS |
1024 | qemu_system_vmstop_request(reason); |
1025 | /* | |
1026 | * FIXME: should not return to device code in case | |
1027 | * vm_stop() has been requested. | |
1028 | */ | |
b4a3d965 | 1029 | cpu_stop_current(); |
296af7c9 BS |
1030 | return; |
1031 | } | |
1032 | do_vm_stop(reason); | |
1033 | } | |
1034 | ||
1035 | #endif | |
1036 | ||
6792a57b | 1037 | static int tcg_cpu_exec(CPUState *env) |
296af7c9 BS |
1038 | { |
1039 | int ret; | |
1040 | #ifdef CONFIG_PROFILER | |
1041 | int64_t ti; | |
1042 | #endif | |
1043 | ||
1044 | #ifdef CONFIG_PROFILER | |
1045 | ti = profile_getclock(); | |
1046 | #endif | |
1047 | if (use_icount) { | |
1048 | int64_t count; | |
1049 | int decr; | |
1050 | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | |
1051 | env->icount_decr.u16.low = 0; | |
1052 | env->icount_extra = 0; | |
1053 | count = qemu_icount_round (qemu_next_deadline()); | |
1054 | qemu_icount += count; | |
1055 | decr = (count > 0xffff) ? 0xffff : count; | |
1056 | count -= decr; | |
1057 | env->icount_decr.u16.low = decr; | |
1058 | env->icount_extra = count; | |
1059 | } | |
1060 | ret = cpu_exec(env); | |
1061 | #ifdef CONFIG_PROFILER | |
1062 | qemu_time += profile_getclock() - ti; | |
1063 | #endif | |
1064 | if (use_icount) { | |
1065 | /* Fold pending instructions back into the | |
1066 | instruction counter, and clear the interrupt flag. */ | |
1067 | qemu_icount -= (env->icount_decr.u16.low | |
1068 | + env->icount_extra); | |
1069 | env->icount_decr.u32 = 0; | |
1070 | env->icount_extra = 0; | |
1071 | } | |
1072 | return ret; | |
1073 | } | |
1074 | ||
472fb0c4 | 1075 | bool cpu_exec_all(void) |
296af7c9 | 1076 | { |
9a36085b JK |
1077 | int r; |
1078 | ||
0ab07c62 | 1079 | if (next_cpu == NULL) { |
296af7c9 | 1080 | next_cpu = first_cpu; |
0ab07c62 | 1081 | } |
c629a4bc | 1082 | for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
345f4426 | 1083 | CPUState *env = next_cpu; |
296af7c9 BS |
1084 | |
1085 | qemu_clock_enable(vm_clock, | |
345f4426 | 1086 | (env->singlestep_enabled & SSTEP_NOTIMER) == 0); |
296af7c9 | 1087 | |
0ab07c62 | 1088 | if (qemu_alarm_pending()) { |
296af7c9 | 1089 | break; |
0ab07c62 | 1090 | } |
3c638d06 | 1091 | if (cpu_can_run(env)) { |
9a36085b | 1092 | if (kvm_enabled()) { |
6792a57b | 1093 | r = kvm_cpu_exec(env); |
9a36085b | 1094 | qemu_kvm_eat_signals(env); |
6792a57b JK |
1095 | } else { |
1096 | r = tcg_cpu_exec(env); | |
9a36085b JK |
1097 | } |
1098 | if (r == EXCP_DEBUG) { | |
83f338f7 | 1099 | cpu_handle_debug_exception(env); |
3c638d06 JK |
1100 | break; |
1101 | } | |
1102 | } else if (env->stop) { | |
296af7c9 BS |
1103 | break; |
1104 | } | |
1105 | } | |
c629a4bc | 1106 | exit_request = 0; |
16400322 | 1107 | return !all_cpu_threads_idle(); |
296af7c9 BS |
1108 | } |
1109 | ||
1110 | void set_numa_modes(void) | |
1111 | { | |
1112 | CPUState *env; | |
1113 | int i; | |
1114 | ||
1115 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
1116 | for (i = 0; i < nb_numa_nodes; i++) { | |
1117 | if (node_cpumask[i] & (1 << env->cpu_index)) { | |
1118 | env->numa_node = i; | |
1119 | } | |
1120 | } | |
1121 | } | |
1122 | } | |
1123 | ||
1124 | void set_cpu_log(const char *optarg) | |
1125 | { | |
1126 | int mask; | |
1127 | const CPULogItem *item; | |
1128 | ||
1129 | mask = cpu_str_to_log_mask(optarg); | |
1130 | if (!mask) { | |
1131 | printf("Log items (comma separated):\n"); | |
1132 | for (item = cpu_log_items; item->mask != 0; item++) { | |
1133 | printf("%-10s %s\n", item->name, item->help); | |
1134 | } | |
1135 | exit(1); | |
1136 | } | |
1137 | cpu_set_log(mask); | |
1138 | } | |
29e922b6 BS |
1139 | |
1140 | /* Return the virtual CPU time, based on the instruction counter. */ | |
1141 | int64_t cpu_get_icount(void) | |
1142 | { | |
1143 | int64_t icount; | |
1144 | CPUState *env = cpu_single_env;; | |
1145 | ||
1146 | icount = qemu_icount; | |
1147 | if (env) { | |
1148 | if (!can_do_io(env)) { | |
1149 | fprintf(stderr, "Bad clock read\n"); | |
1150 | } | |
1151 | icount -= (env->icount_decr.u16.low + env->icount_extra); | |
1152 | } | |
1153 | return qemu_icount_bias + (icount << icount_time_shift); | |
1154 | } | |
262353cb | 1155 | |
9a78eead | 1156 | void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) |
262353cb BS |
1157 | { |
1158 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
1159 | #if defined(cpu_list_id) | |
1160 | cpu_list_id(f, cpu_fprintf, optarg); | |
1161 | #elif defined(cpu_list) | |
1162 | cpu_list(f, cpu_fprintf); /* deprecated */ | |
1163 | #endif | |
1164 | } |