]>
Commit | Line | Data |
---|---|---|
296af7c9 BS |
1 | /* |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* Needed early for CONFIG_BSD etc. */ | |
26 | #include "config-host.h" | |
27 | ||
28 | #include "monitor.h" | |
29 | #include "sysemu.h" | |
30 | #include "gdbstub.h" | |
31 | #include "dma.h" | |
32 | #include "kvm.h" | |
262ea18e | 33 | #include "exec-all.h" |
296af7c9 | 34 | |
96284e89 | 35 | #include "qemu-thread.h" |
296af7c9 | 36 | #include "cpus.h" |
a8486bc9 | 37 | #include "compatfd.h" |
296af7c9 | 38 | |
7277e027 BS |
39 | #ifdef SIGRTMIN |
40 | #define SIG_IPI (SIGRTMIN+4) | |
41 | #else | |
42 | #define SIG_IPI SIGUSR1 | |
43 | #endif | |
44 | ||
6d9cb73c JK |
45 | #ifdef CONFIG_LINUX |
46 | ||
47 | #include <sys/prctl.h> | |
48 | ||
c0532a76 MT |
49 | #ifndef PR_MCE_KILL |
50 | #define PR_MCE_KILL 33 | |
51 | #endif | |
52 | ||
6d9cb73c JK |
53 | #ifndef PR_MCE_KILL_SET |
54 | #define PR_MCE_KILL_SET 1 | |
55 | #endif | |
56 | ||
57 | #ifndef PR_MCE_KILL_EARLY | |
58 | #define PR_MCE_KILL_EARLY 1 | |
59 | #endif | |
60 | ||
61 | #endif /* CONFIG_LINUX */ | |
62 | ||
296af7c9 BS |
63 | static CPUState *next_cpu; |
64 | ||
65 | /***********************************************************/ | |
66 | void hw_error(const char *fmt, ...) | |
67 | { | |
68 | va_list ap; | |
69 | CPUState *env; | |
70 | ||
71 | va_start(ap, fmt); | |
72 | fprintf(stderr, "qemu: hardware error: "); | |
73 | vfprintf(stderr, fmt, ap); | |
74 | fprintf(stderr, "\n"); | |
75 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
76 | fprintf(stderr, "CPU #%d:\n", env->cpu_index); | |
77 | #ifdef TARGET_I386 | |
78 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); | |
79 | #else | |
80 | cpu_dump_state(env, stderr, fprintf, 0); | |
81 | #endif | |
82 | } | |
83 | va_end(ap); | |
84 | abort(); | |
85 | } | |
86 | ||
87 | void cpu_synchronize_all_states(void) | |
88 | { | |
89 | CPUState *cpu; | |
90 | ||
91 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
92 | cpu_synchronize_state(cpu); | |
93 | } | |
94 | } | |
95 | ||
96 | void cpu_synchronize_all_post_reset(void) | |
97 | { | |
98 | CPUState *cpu; | |
99 | ||
100 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
101 | cpu_synchronize_post_reset(cpu); | |
102 | } | |
103 | } | |
104 | ||
105 | void cpu_synchronize_all_post_init(void) | |
106 | { | |
107 | CPUState *cpu; | |
108 | ||
109 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
110 | cpu_synchronize_post_init(cpu); | |
111 | } | |
112 | } | |
113 | ||
3ae9501c MT |
114 | int cpu_is_stopped(CPUState *env) |
115 | { | |
116 | return !vm_running || env->stopped; | |
117 | } | |
118 | ||
296af7c9 BS |
119 | static void do_vm_stop(int reason) |
120 | { | |
121 | if (vm_running) { | |
122 | cpu_disable_ticks(); | |
123 | vm_running = 0; | |
124 | pause_all_vcpus(); | |
125 | vm_state_notify(0, reason); | |
55df6f33 MT |
126 | qemu_aio_flush(); |
127 | bdrv_flush_all(); | |
296af7c9 BS |
128 | monitor_protocol_event(QEVENT_STOP, NULL); |
129 | } | |
130 | } | |
131 | ||
132 | static int cpu_can_run(CPUState *env) | |
133 | { | |
0ab07c62 | 134 | if (env->stop) { |
296af7c9 | 135 | return 0; |
0ab07c62 JK |
136 | } |
137 | if (env->stopped || !vm_running) { | |
296af7c9 | 138 | return 0; |
0ab07c62 | 139 | } |
296af7c9 BS |
140 | return 1; |
141 | } | |
142 | ||
16400322 | 143 | static bool cpu_thread_is_idle(CPUState *env) |
296af7c9 | 144 | { |
16400322 JK |
145 | if (env->stop || env->queued_work_first) { |
146 | return false; | |
147 | } | |
148 | if (env->stopped || !vm_running) { | |
149 | return true; | |
150 | } | |
f2c1cc81 JK |
151 | if (!env->halted || qemu_cpu_has_work(env) || |
152 | (kvm_enabled() && kvm_irqchip_in_kernel())) { | |
16400322 JK |
153 | return false; |
154 | } | |
155 | return true; | |
296af7c9 BS |
156 | } |
157 | ||
ab33fcda | 158 | bool all_cpu_threads_idle(void) |
296af7c9 BS |
159 | { |
160 | CPUState *env; | |
161 | ||
16400322 JK |
162 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
163 | if (!cpu_thread_is_idle(env)) { | |
164 | return false; | |
165 | } | |
166 | } | |
167 | return true; | |
296af7c9 BS |
168 | } |
169 | ||
1009d2ed | 170 | static void cpu_handle_guest_debug(CPUState *env) |
83f338f7 | 171 | { |
3c638d06 | 172 | gdb_set_stop_cpu(env); |
8cf71710 | 173 | qemu_system_debug_request(); |
83f338f7 JK |
174 | #ifdef CONFIG_IOTHREAD |
175 | env->stopped = 1; | |
176 | #endif | |
3c638d06 JK |
177 | } |
178 | ||
714bd040 PB |
179 | #ifdef CONFIG_IOTHREAD |
180 | static void cpu_signal(int sig) | |
181 | { | |
182 | if (cpu_single_env) { | |
183 | cpu_exit(cpu_single_env); | |
184 | } | |
185 | exit_request = 1; | |
186 | } | |
187 | #endif | |
188 | ||
6d9cb73c JK |
189 | #ifdef CONFIG_LINUX |
190 | static void sigbus_reraise(void) | |
191 | { | |
192 | sigset_t set; | |
193 | struct sigaction action; | |
194 | ||
195 | memset(&action, 0, sizeof(action)); | |
196 | action.sa_handler = SIG_DFL; | |
197 | if (!sigaction(SIGBUS, &action, NULL)) { | |
198 | raise(SIGBUS); | |
199 | sigemptyset(&set); | |
200 | sigaddset(&set, SIGBUS); | |
201 | sigprocmask(SIG_UNBLOCK, &set, NULL); | |
202 | } | |
203 | perror("Failed to re-raise SIGBUS!\n"); | |
204 | abort(); | |
205 | } | |
206 | ||
207 | static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, | |
208 | void *ctx) | |
209 | { | |
210 | if (kvm_on_sigbus(siginfo->ssi_code, | |
211 | (void *)(intptr_t)siginfo->ssi_addr)) { | |
212 | sigbus_reraise(); | |
213 | } | |
214 | } | |
215 | ||
216 | static void qemu_init_sigbus(void) | |
217 | { | |
218 | struct sigaction action; | |
219 | ||
220 | memset(&action, 0, sizeof(action)); | |
221 | action.sa_flags = SA_SIGINFO; | |
222 | action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; | |
223 | sigaction(SIGBUS, &action, NULL); | |
224 | ||
225 | prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); | |
226 | } | |
227 | ||
1ab3c6c0 JK |
228 | static void qemu_kvm_eat_signals(CPUState *env) |
229 | { | |
230 | struct timespec ts = { 0, 0 }; | |
231 | siginfo_t siginfo; | |
232 | sigset_t waitset; | |
233 | sigset_t chkset; | |
234 | int r; | |
235 | ||
236 | sigemptyset(&waitset); | |
237 | sigaddset(&waitset, SIG_IPI); | |
238 | sigaddset(&waitset, SIGBUS); | |
239 | ||
240 | do { | |
241 | r = sigtimedwait(&waitset, &siginfo, &ts); | |
242 | if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { | |
243 | perror("sigtimedwait"); | |
244 | exit(1); | |
245 | } | |
246 | ||
247 | switch (r) { | |
248 | case SIGBUS: | |
249 | if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) { | |
250 | sigbus_reraise(); | |
251 | } | |
252 | break; | |
253 | default: | |
254 | break; | |
255 | } | |
256 | ||
257 | r = sigpending(&chkset); | |
258 | if (r == -1) { | |
259 | perror("sigpending"); | |
260 | exit(1); | |
261 | } | |
262 | } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS)); | |
263 | ||
264 | #ifndef CONFIG_IOTHREAD | |
265 | if (sigismember(&chkset, SIGIO) || sigismember(&chkset, SIGALRM)) { | |
266 | qemu_notify_event(); | |
267 | } | |
268 | #endif | |
269 | } | |
270 | ||
6d9cb73c JK |
271 | #else /* !CONFIG_LINUX */ |
272 | ||
273 | static void qemu_init_sigbus(void) | |
274 | { | |
275 | } | |
1ab3c6c0 JK |
276 | |
277 | static void qemu_kvm_eat_signals(CPUState *env) | |
278 | { | |
279 | } | |
6d9cb73c JK |
280 | #endif /* !CONFIG_LINUX */ |
281 | ||
296af7c9 BS |
282 | #ifndef _WIN32 |
283 | static int io_thread_fd = -1; | |
284 | ||
285 | static void qemu_event_increment(void) | |
286 | { | |
287 | /* Write 8 bytes to be compatible with eventfd. */ | |
26a82330 | 288 | static const uint64_t val = 1; |
296af7c9 BS |
289 | ssize_t ret; |
290 | ||
0ab07c62 | 291 | if (io_thread_fd == -1) { |
296af7c9 | 292 | return; |
0ab07c62 | 293 | } |
296af7c9 BS |
294 | do { |
295 | ret = write(io_thread_fd, &val, sizeof(val)); | |
296 | } while (ret < 0 && errno == EINTR); | |
297 | ||
298 | /* EAGAIN is fine, a read must be pending. */ | |
299 | if (ret < 0 && errno != EAGAIN) { | |
300 | fprintf(stderr, "qemu_event_increment: write() filed: %s\n", | |
301 | strerror(errno)); | |
302 | exit (1); | |
303 | } | |
304 | } | |
305 | ||
306 | static void qemu_event_read(void *opaque) | |
307 | { | |
e0efb993 | 308 | int fd = (intptr_t)opaque; |
296af7c9 BS |
309 | ssize_t len; |
310 | char buffer[512]; | |
311 | ||
312 | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */ | |
313 | do { | |
314 | len = read(fd, buffer, sizeof(buffer)); | |
315 | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); | |
316 | } | |
317 | ||
318 | static int qemu_event_init(void) | |
319 | { | |
320 | int err; | |
321 | int fds[2]; | |
322 | ||
323 | err = qemu_eventfd(fds); | |
0ab07c62 | 324 | if (err == -1) { |
296af7c9 | 325 | return -errno; |
0ab07c62 | 326 | } |
296af7c9 | 327 | err = fcntl_setfl(fds[0], O_NONBLOCK); |
0ab07c62 | 328 | if (err < 0) { |
296af7c9 | 329 | goto fail; |
0ab07c62 | 330 | } |
296af7c9 | 331 | err = fcntl_setfl(fds[1], O_NONBLOCK); |
0ab07c62 | 332 | if (err < 0) { |
296af7c9 | 333 | goto fail; |
0ab07c62 | 334 | } |
296af7c9 | 335 | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
e0efb993 | 336 | (void *)(intptr_t)fds[0]); |
296af7c9 BS |
337 | |
338 | io_thread_fd = fds[1]; | |
339 | return 0; | |
340 | ||
341 | fail: | |
342 | close(fds[0]); | |
343 | close(fds[1]); | |
344 | return err; | |
345 | } | |
55f8d6ac | 346 | |
55f8d6ac JK |
347 | static void dummy_signal(int sig) |
348 | { | |
349 | } | |
55f8d6ac | 350 | |
d0f294ce JK |
351 | /* If we have signalfd, we mask out the signals we want to handle and then |
352 | * use signalfd to listen for them. We rely on whatever the current signal | |
353 | * handler is to dispatch the signals when we receive them. | |
354 | */ | |
355 | static void sigfd_handler(void *opaque) | |
356 | { | |
e0efb993 | 357 | int fd = (intptr_t)opaque; |
d0f294ce JK |
358 | struct qemu_signalfd_siginfo info; |
359 | struct sigaction action; | |
360 | ssize_t len; | |
361 | ||
362 | while (1) { | |
363 | do { | |
364 | len = read(fd, &info, sizeof(info)); | |
365 | } while (len == -1 && errno == EINTR); | |
366 | ||
367 | if (len == -1 && errno == EAGAIN) { | |
368 | break; | |
369 | } | |
370 | ||
371 | if (len != sizeof(info)) { | |
372 | printf("read from sigfd returned %zd: %m\n", len); | |
373 | return; | |
374 | } | |
375 | ||
376 | sigaction(info.ssi_signo, NULL, &action); | |
377 | if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) { | |
378 | action.sa_sigaction(info.ssi_signo, | |
379 | (siginfo_t *)&info, NULL); | |
380 | } else if (action.sa_handler) { | |
381 | action.sa_handler(info.ssi_signo); | |
382 | } | |
383 | } | |
384 | } | |
385 | ||
712ae480 | 386 | static int qemu_signal_init(void) |
d0f294ce JK |
387 | { |
388 | int sigfd; | |
712ae480 | 389 | sigset_t set; |
d0f294ce | 390 | |
712ae480 PB |
391 | #ifdef CONFIG_IOTHREAD |
392 | /* SIGUSR2 used by posix-aio-compat.c */ | |
393 | sigemptyset(&set); | |
394 | sigaddset(&set, SIGUSR2); | |
395 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
396 | ||
397 | sigemptyset(&set); | |
398 | sigaddset(&set, SIGIO); | |
399 | sigaddset(&set, SIGALRM); | |
400 | sigaddset(&set, SIG_IPI); | |
401 | sigaddset(&set, SIGBUS); | |
402 | pthread_sigmask(SIG_BLOCK, &set, NULL); | |
403 | #else | |
404 | sigemptyset(&set); | |
405 | sigaddset(&set, SIGBUS); | |
406 | if (kvm_enabled()) { | |
407 | /* | |
408 | * We need to process timer signals synchronously to avoid a race | |
409 | * between exit_request check and KVM vcpu entry. | |
410 | */ | |
411 | sigaddset(&set, SIGIO); | |
412 | sigaddset(&set, SIGALRM); | |
413 | } | |
414 | #endif | |
415 | ||
416 | sigfd = qemu_signalfd(&set); | |
d0f294ce JK |
417 | if (sigfd == -1) { |
418 | fprintf(stderr, "failed to create signalfd\n"); | |
419 | return -errno; | |
420 | } | |
421 | ||
422 | fcntl_setfl(sigfd, O_NONBLOCK); | |
423 | ||
424 | qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, | |
e0efb993 | 425 | (void *)(intptr_t)sigfd); |
d0f294ce JK |
426 | |
427 | return 0; | |
428 | } | |
429 | ||
714bd040 PB |
430 | static void qemu_kvm_init_cpu_signals(CPUState *env) |
431 | { | |
432 | int r; | |
433 | sigset_t set; | |
434 | struct sigaction sigact; | |
435 | ||
436 | memset(&sigact, 0, sizeof(sigact)); | |
437 | sigact.sa_handler = dummy_signal; | |
438 | sigaction(SIG_IPI, &sigact, NULL); | |
439 | ||
440 | #ifdef CONFIG_IOTHREAD | |
441 | pthread_sigmask(SIG_BLOCK, NULL, &set); | |
442 | sigdelset(&set, SIG_IPI); | |
443 | sigdelset(&set, SIGBUS); | |
444 | r = kvm_set_signal_mask(env, &set); | |
445 | if (r) { | |
446 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
447 | exit(1); | |
448 | } | |
449 | #else | |
450 | sigemptyset(&set); | |
451 | sigaddset(&set, SIG_IPI); | |
452 | sigaddset(&set, SIGIO); | |
453 | sigaddset(&set, SIGALRM); | |
454 | pthread_sigmask(SIG_BLOCK, &set, NULL); | |
455 | ||
456 | pthread_sigmask(SIG_BLOCK, NULL, &set); | |
457 | sigdelset(&set, SIGIO); | |
458 | sigdelset(&set, SIGALRM); | |
459 | #endif | |
460 | sigdelset(&set, SIG_IPI); | |
461 | sigdelset(&set, SIGBUS); | |
462 | r = kvm_set_signal_mask(env, &set); | |
463 | if (r) { | |
464 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
465 | exit(1); | |
466 | } | |
467 | } | |
468 | ||
469 | static void qemu_tcg_init_cpu_signals(void) | |
470 | { | |
471 | #ifdef CONFIG_IOTHREAD | |
472 | sigset_t set; | |
473 | struct sigaction sigact; | |
474 | ||
475 | memset(&sigact, 0, sizeof(sigact)); | |
476 | sigact.sa_handler = cpu_signal; | |
477 | sigaction(SIG_IPI, &sigact, NULL); | |
478 | ||
479 | sigemptyset(&set); | |
480 | sigaddset(&set, SIG_IPI); | |
481 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
482 | #endif | |
483 | } | |
484 | ||
55f8d6ac JK |
485 | #else /* _WIN32 */ |
486 | ||
296af7c9 BS |
487 | HANDLE qemu_event_handle; |
488 | ||
489 | static void dummy_event_handler(void *opaque) | |
490 | { | |
491 | } | |
492 | ||
493 | static int qemu_event_init(void) | |
494 | { | |
495 | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); | |
496 | if (!qemu_event_handle) { | |
497 | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError()); | |
498 | return -1; | |
499 | } | |
500 | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL); | |
501 | return 0; | |
502 | } | |
503 | ||
504 | static void qemu_event_increment(void) | |
505 | { | |
506 | if (!SetEvent(qemu_event_handle)) { | |
507 | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n", | |
508 | GetLastError()); | |
509 | exit (1); | |
510 | } | |
511 | } | |
9a36085b | 512 | |
712ae480 PB |
513 | static int qemu_signal_init(void) |
514 | { | |
515 | return 0; | |
516 | } | |
517 | ||
ff48eb5f JK |
518 | static void qemu_kvm_init_cpu_signals(CPUState *env) |
519 | { | |
714bd040 PB |
520 | abort(); |
521 | } | |
ff48eb5f | 522 | |
714bd040 PB |
523 | static void qemu_tcg_init_cpu_signals(void) |
524 | { | |
ff48eb5f | 525 | } |
714bd040 | 526 | #endif /* _WIN32 */ |
ff48eb5f | 527 | |
714bd040 | 528 | #ifndef CONFIG_IOTHREAD |
296af7c9 BS |
529 | int qemu_init_main_loop(void) |
530 | { | |
d0f294ce JK |
531 | int ret; |
532 | ||
712ae480 | 533 | ret = qemu_signal_init(); |
d0f294ce JK |
534 | if (ret) { |
535 | return ret; | |
536 | } | |
3c638d06 | 537 | |
6d9cb73c | 538 | qemu_init_sigbus(); |
3c638d06 | 539 | |
296af7c9 BS |
540 | return qemu_event_init(); |
541 | } | |
542 | ||
7277e027 BS |
543 | void qemu_main_loop_start(void) |
544 | { | |
545 | } | |
546 | ||
296af7c9 BS |
547 | void qemu_init_vcpu(void *_env) |
548 | { | |
549 | CPUState *env = _env; | |
84b4915d | 550 | int r; |
296af7c9 BS |
551 | |
552 | env->nr_cores = smp_cores; | |
553 | env->nr_threads = smp_threads; | |
84b4915d JK |
554 | |
555 | if (kvm_enabled()) { | |
556 | r = kvm_init_vcpu(env); | |
557 | if (r < 0) { | |
558 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
559 | exit(1); | |
560 | } | |
ff48eb5f | 561 | qemu_kvm_init_cpu_signals(env); |
714bd040 PB |
562 | } else { |
563 | qemu_tcg_init_cpu_signals(); | |
84b4915d | 564 | } |
296af7c9 BS |
565 | } |
566 | ||
b7680cb6 | 567 | int qemu_cpu_is_self(void *env) |
296af7c9 BS |
568 | { |
569 | return 1; | |
570 | } | |
571 | ||
e82bcec2 MT |
572 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
573 | { | |
574 | func(data); | |
575 | } | |
576 | ||
296af7c9 BS |
577 | void resume_all_vcpus(void) |
578 | { | |
579 | } | |
580 | ||
581 | void pause_all_vcpus(void) | |
582 | { | |
583 | } | |
584 | ||
585 | void qemu_cpu_kick(void *env) | |
586 | { | |
296af7c9 BS |
587 | } |
588 | ||
46d62fac JK |
589 | void qemu_cpu_kick_self(void) |
590 | { | |
591 | #ifndef _WIN32 | |
592 | assert(cpu_single_env); | |
593 | ||
594 | raise(SIG_IPI); | |
595 | #else | |
596 | abort(); | |
597 | #endif | |
296af7c9 BS |
598 | } |
599 | ||
600 | void qemu_notify_event(void) | |
601 | { | |
602 | CPUState *env = cpu_single_env; | |
603 | ||
604 | qemu_event_increment (); | |
605 | if (env) { | |
606 | cpu_exit(env); | |
607 | } | |
608 | if (next_cpu && env != next_cpu) { | |
609 | cpu_exit(next_cpu); | |
610 | } | |
38145df2 | 611 | exit_request = 1; |
296af7c9 BS |
612 | } |
613 | ||
614 | void qemu_mutex_lock_iothread(void) {} | |
615 | void qemu_mutex_unlock_iothread(void) {} | |
616 | ||
b4a3d965 JK |
617 | void cpu_stop_current(void) |
618 | { | |
619 | } | |
620 | ||
296af7c9 BS |
621 | void vm_stop(int reason) |
622 | { | |
623 | do_vm_stop(reason); | |
624 | } | |
625 | ||
626 | #else /* CONFIG_IOTHREAD */ | |
627 | ||
296af7c9 BS |
628 | QemuMutex qemu_global_mutex; |
629 | static QemuMutex qemu_fair_mutex; | |
630 | ||
631 | static QemuThread io_thread; | |
632 | ||
633 | static QemuThread *tcg_cpu_thread; | |
634 | static QemuCond *tcg_halt_cond; | |
635 | ||
636 | static int qemu_system_ready; | |
637 | /* cpu creation */ | |
638 | static QemuCond qemu_cpu_cond; | |
639 | /* system init */ | |
640 | static QemuCond qemu_system_cond; | |
641 | static QemuCond qemu_pause_cond; | |
e82bcec2 | 642 | static QemuCond qemu_work_cond; |
296af7c9 | 643 | |
296af7c9 BS |
644 | int qemu_init_main_loop(void) |
645 | { | |
646 | int ret; | |
647 | ||
6d9cb73c | 648 | qemu_init_sigbus(); |
3c638d06 | 649 | |
712ae480 | 650 | ret = qemu_signal_init(); |
0ab07c62 | 651 | if (ret) { |
a8486bc9 | 652 | return ret; |
0ab07c62 | 653 | } |
a8486bc9 MT |
654 | |
655 | /* Note eventfd must be drained before signalfd handlers run */ | |
296af7c9 | 656 | ret = qemu_event_init(); |
0ab07c62 | 657 | if (ret) { |
296af7c9 | 658 | return ret; |
0ab07c62 | 659 | } |
296af7c9 | 660 | |
ed94592b | 661 | qemu_cond_init(&qemu_cpu_cond); |
f8ca7b43 | 662 | qemu_cond_init(&qemu_system_cond); |
ed94592b AL |
663 | qemu_cond_init(&qemu_pause_cond); |
664 | qemu_cond_init(&qemu_work_cond); | |
296af7c9 BS |
665 | qemu_mutex_init(&qemu_fair_mutex); |
666 | qemu_mutex_init(&qemu_global_mutex); | |
667 | qemu_mutex_lock(&qemu_global_mutex); | |
668 | ||
b7680cb6 | 669 | qemu_thread_get_self(&io_thread); |
296af7c9 BS |
670 | |
671 | return 0; | |
672 | } | |
673 | ||
7277e027 BS |
674 | void qemu_main_loop_start(void) |
675 | { | |
676 | qemu_system_ready = 1; | |
677 | qemu_cond_broadcast(&qemu_system_cond); | |
678 | } | |
679 | ||
e82bcec2 MT |
680 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
681 | { | |
682 | struct qemu_work_item wi; | |
683 | ||
b7680cb6 | 684 | if (qemu_cpu_is_self(env)) { |
e82bcec2 MT |
685 | func(data); |
686 | return; | |
687 | } | |
688 | ||
689 | wi.func = func; | |
690 | wi.data = data; | |
0ab07c62 | 691 | if (!env->queued_work_first) { |
e82bcec2 | 692 | env->queued_work_first = &wi; |
0ab07c62 | 693 | } else { |
e82bcec2 | 694 | env->queued_work_last->next = &wi; |
0ab07c62 | 695 | } |
e82bcec2 MT |
696 | env->queued_work_last = &wi; |
697 | wi.next = NULL; | |
698 | wi.done = false; | |
699 | ||
700 | qemu_cpu_kick(env); | |
701 | while (!wi.done) { | |
702 | CPUState *self_env = cpu_single_env; | |
703 | ||
704 | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); | |
705 | cpu_single_env = self_env; | |
706 | } | |
707 | } | |
708 | ||
709 | static void flush_queued_work(CPUState *env) | |
710 | { | |
711 | struct qemu_work_item *wi; | |
712 | ||
0ab07c62 | 713 | if (!env->queued_work_first) { |
e82bcec2 | 714 | return; |
0ab07c62 | 715 | } |
e82bcec2 MT |
716 | |
717 | while ((wi = env->queued_work_first)) { | |
718 | env->queued_work_first = wi->next; | |
719 | wi->func(wi->data); | |
720 | wi->done = true; | |
721 | } | |
722 | env->queued_work_last = NULL; | |
723 | qemu_cond_broadcast(&qemu_work_cond); | |
724 | } | |
725 | ||
296af7c9 BS |
726 | static void qemu_wait_io_event_common(CPUState *env) |
727 | { | |
728 | if (env->stop) { | |
729 | env->stop = 0; | |
730 | env->stopped = 1; | |
731 | qemu_cond_signal(&qemu_pause_cond); | |
732 | } | |
e82bcec2 | 733 | flush_queued_work(env); |
aa2c364b | 734 | env->thread_kicked = false; |
296af7c9 BS |
735 | } |
736 | ||
6cabe1f3 | 737 | static void qemu_tcg_wait_io_event(void) |
296af7c9 | 738 | { |
6cabe1f3 JK |
739 | CPUState *env; |
740 | ||
16400322 | 741 | while (all_cpu_threads_idle()) { |
ab33fcda PB |
742 | /* Start accounting real time to the virtual clock if the CPUs |
743 | are idle. */ | |
744 | qemu_clock_warp(vm_clock); | |
9705fbb5 | 745 | qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); |
16400322 | 746 | } |
296af7c9 BS |
747 | |
748 | qemu_mutex_unlock(&qemu_global_mutex); | |
749 | ||
750 | /* | |
751 | * Users of qemu_global_mutex can be starved, having no chance | |
752 | * to acquire it since this path will get to it first. | |
753 | * So use another lock to provide fairness. | |
754 | */ | |
755 | qemu_mutex_lock(&qemu_fair_mutex); | |
756 | qemu_mutex_unlock(&qemu_fair_mutex); | |
757 | ||
758 | qemu_mutex_lock(&qemu_global_mutex); | |
6cabe1f3 JK |
759 | |
760 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
761 | qemu_wait_io_event_common(env); | |
762 | } | |
296af7c9 BS |
763 | } |
764 | ||
296af7c9 BS |
765 | static void qemu_kvm_wait_io_event(CPUState *env) |
766 | { | |
16400322 | 767 | while (cpu_thread_is_idle(env)) { |
9705fbb5 | 768 | qemu_cond_wait(env->halt_cond, &qemu_global_mutex); |
16400322 | 769 | } |
296af7c9 | 770 | |
5db5bdac | 771 | qemu_kvm_eat_signals(env); |
296af7c9 BS |
772 | qemu_wait_io_event_common(env); |
773 | } | |
774 | ||
7e97cd88 | 775 | static void *qemu_kvm_cpu_thread_fn(void *arg) |
296af7c9 BS |
776 | { |
777 | CPUState *env = arg; | |
84b4915d | 778 | int r; |
296af7c9 | 779 | |
6164e6d6 | 780 | qemu_mutex_lock(&qemu_global_mutex); |
b7680cb6 | 781 | qemu_thread_get_self(env->thread); |
dc7a09cf | 782 | env->thread_id = qemu_get_thread_id(); |
296af7c9 | 783 | |
84b4915d JK |
784 | r = kvm_init_vcpu(env); |
785 | if (r < 0) { | |
786 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
787 | exit(1); | |
788 | } | |
296af7c9 | 789 | |
55f8d6ac | 790 | qemu_kvm_init_cpu_signals(env); |
296af7c9 BS |
791 | |
792 | /* signal CPU creation */ | |
296af7c9 BS |
793 | env->created = 1; |
794 | qemu_cond_signal(&qemu_cpu_cond); | |
795 | ||
796 | /* and wait for machine initialization */ | |
0ab07c62 | 797 | while (!qemu_system_ready) { |
e009894f | 798 | qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
0ab07c62 | 799 | } |
296af7c9 BS |
800 | |
801 | while (1) { | |
0ab07c62 | 802 | if (cpu_can_run(env)) { |
6792a57b | 803 | r = kvm_cpu_exec(env); |
83f338f7 | 804 | if (r == EXCP_DEBUG) { |
1009d2ed | 805 | cpu_handle_guest_debug(env); |
83f338f7 | 806 | } |
0ab07c62 | 807 | } |
296af7c9 BS |
808 | qemu_kvm_wait_io_event(env); |
809 | } | |
810 | ||
811 | return NULL; | |
812 | } | |
813 | ||
7e97cd88 | 814 | static void *qemu_tcg_cpu_thread_fn(void *arg) |
296af7c9 BS |
815 | { |
816 | CPUState *env = arg; | |
817 | ||
55f8d6ac | 818 | qemu_tcg_init_cpu_signals(); |
b7680cb6 | 819 | qemu_thread_get_self(env->thread); |
296af7c9 BS |
820 | |
821 | /* signal CPU creation */ | |
822 | qemu_mutex_lock(&qemu_global_mutex); | |
0ab07c62 | 823 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
dc7a09cf | 824 | env->thread_id = qemu_get_thread_id(); |
296af7c9 | 825 | env->created = 1; |
0ab07c62 | 826 | } |
296af7c9 BS |
827 | qemu_cond_signal(&qemu_cpu_cond); |
828 | ||
829 | /* and wait for machine initialization */ | |
0ab07c62 | 830 | while (!qemu_system_ready) { |
e009894f | 831 | qemu_cond_wait(&qemu_system_cond, &qemu_global_mutex); |
0ab07c62 | 832 | } |
296af7c9 BS |
833 | |
834 | while (1) { | |
472fb0c4 | 835 | cpu_exec_all(); |
cb842c90 | 836 | if (use_icount && qemu_next_icount_deadline() <= 0) { |
3b2319a3 PB |
837 | qemu_notify_event(); |
838 | } | |
6cabe1f3 | 839 | qemu_tcg_wait_io_event(); |
296af7c9 BS |
840 | } |
841 | ||
842 | return NULL; | |
843 | } | |
844 | ||
cc015e9a PB |
845 | static void qemu_cpu_kick_thread(CPUState *env) |
846 | { | |
847 | #ifndef _WIN32 | |
848 | int err; | |
849 | ||
850 | err = pthread_kill(env->thread->thread, SIG_IPI); | |
851 | if (err) { | |
852 | fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); | |
853 | exit(1); | |
854 | } | |
855 | #else /* _WIN32 */ | |
856 | if (!qemu_cpu_is_self(env)) { | |
857 | SuspendThread(env->thread->thread); | |
858 | cpu_signal(0); | |
859 | ResumeThread(env->thread->thread); | |
860 | } | |
861 | #endif | |
862 | } | |
863 | ||
296af7c9 BS |
864 | void qemu_cpu_kick(void *_env) |
865 | { | |
866 | CPUState *env = _env; | |
296af7c9 | 867 | |
296af7c9 | 868 | qemu_cond_broadcast(env->halt_cond); |
aa2c364b | 869 | if (!env->thread_kicked) { |
cc015e9a | 870 | qemu_cpu_kick_thread(env); |
aa2c364b JK |
871 | env->thread_kicked = true; |
872 | } | |
296af7c9 BS |
873 | } |
874 | ||
46d62fac | 875 | void qemu_cpu_kick_self(void) |
296af7c9 | 876 | { |
b55c22c6 | 877 | #ifndef _WIN32 |
46d62fac | 878 | assert(cpu_single_env); |
296af7c9 | 879 | |
46d62fac | 880 | if (!cpu_single_env->thread_kicked) { |
cc015e9a | 881 | qemu_cpu_kick_thread(cpu_single_env); |
46d62fac | 882 | cpu_single_env->thread_kicked = true; |
296af7c9 | 883 | } |
b55c22c6 PB |
884 | #else |
885 | abort(); | |
886 | #endif | |
296af7c9 BS |
887 | } |
888 | ||
b7680cb6 | 889 | int qemu_cpu_is_self(void *_env) |
296af7c9 | 890 | { |
296af7c9 | 891 | CPUState *env = _env; |
a8486bc9 | 892 | |
b7680cb6 | 893 | return qemu_thread_is_self(env->thread); |
296af7c9 BS |
894 | } |
895 | ||
296af7c9 BS |
896 | void qemu_mutex_lock_iothread(void) |
897 | { | |
898 | if (kvm_enabled()) { | |
296af7c9 | 899 | qemu_mutex_lock(&qemu_global_mutex); |
1a28cac3 MT |
900 | } else { |
901 | qemu_mutex_lock(&qemu_fair_mutex); | |
902 | if (qemu_mutex_trylock(&qemu_global_mutex)) { | |
cc015e9a | 903 | qemu_cpu_kick_thread(first_cpu); |
1a28cac3 MT |
904 | qemu_mutex_lock(&qemu_global_mutex); |
905 | } | |
906 | qemu_mutex_unlock(&qemu_fair_mutex); | |
907 | } | |
296af7c9 BS |
908 | } |
909 | ||
910 | void qemu_mutex_unlock_iothread(void) | |
911 | { | |
912 | qemu_mutex_unlock(&qemu_global_mutex); | |
913 | } | |
914 | ||
915 | static int all_vcpus_paused(void) | |
916 | { | |
917 | CPUState *penv = first_cpu; | |
918 | ||
919 | while (penv) { | |
0ab07c62 | 920 | if (!penv->stopped) { |
296af7c9 | 921 | return 0; |
0ab07c62 | 922 | } |
296af7c9 BS |
923 | penv = (CPUState *)penv->next_cpu; |
924 | } | |
925 | ||
926 | return 1; | |
927 | } | |
928 | ||
929 | void pause_all_vcpus(void) | |
930 | { | |
931 | CPUState *penv = first_cpu; | |
932 | ||
933 | while (penv) { | |
934 | penv->stop = 1; | |
296af7c9 BS |
935 | qemu_cpu_kick(penv); |
936 | penv = (CPUState *)penv->next_cpu; | |
937 | } | |
938 | ||
939 | while (!all_vcpus_paused()) { | |
be7d6c57 | 940 | qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); |
296af7c9 BS |
941 | penv = first_cpu; |
942 | while (penv) { | |
1fbb22e5 | 943 | qemu_cpu_kick(penv); |
296af7c9 BS |
944 | penv = (CPUState *)penv->next_cpu; |
945 | } | |
946 | } | |
947 | } | |
948 | ||
949 | void resume_all_vcpus(void) | |
950 | { | |
951 | CPUState *penv = first_cpu; | |
952 | ||
953 | while (penv) { | |
954 | penv->stop = 0; | |
955 | penv->stopped = 0; | |
296af7c9 BS |
956 | qemu_cpu_kick(penv); |
957 | penv = (CPUState *)penv->next_cpu; | |
958 | } | |
959 | } | |
960 | ||
7e97cd88 | 961 | static void qemu_tcg_init_vcpu(void *_env) |
296af7c9 BS |
962 | { |
963 | CPUState *env = _env; | |
0ab07c62 | 964 | |
296af7c9 BS |
965 | /* share a single thread for all cpus with TCG */ |
966 | if (!tcg_cpu_thread) { | |
967 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
968 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
969 | qemu_cond_init(env->halt_cond); | |
7e97cd88 | 970 | qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); |
0ab07c62 | 971 | while (env->created == 0) { |
18a85728 | 972 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
0ab07c62 | 973 | } |
296af7c9 BS |
974 | tcg_cpu_thread = env->thread; |
975 | tcg_halt_cond = env->halt_cond; | |
976 | } else { | |
977 | env->thread = tcg_cpu_thread; | |
978 | env->halt_cond = tcg_halt_cond; | |
979 | } | |
980 | } | |
981 | ||
7e97cd88 | 982 | static void qemu_kvm_start_vcpu(CPUState *env) |
296af7c9 BS |
983 | { |
984 | env->thread = qemu_mallocz(sizeof(QemuThread)); | |
985 | env->halt_cond = qemu_mallocz(sizeof(QemuCond)); | |
986 | qemu_cond_init(env->halt_cond); | |
7e97cd88 | 987 | qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); |
0ab07c62 | 988 | while (env->created == 0) { |
18a85728 | 989 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
0ab07c62 | 990 | } |
296af7c9 BS |
991 | } |
992 | ||
993 | void qemu_init_vcpu(void *_env) | |
994 | { | |
995 | CPUState *env = _env; | |
996 | ||
997 | env->nr_cores = smp_cores; | |
998 | env->nr_threads = smp_threads; | |
0ab07c62 | 999 | if (kvm_enabled()) { |
7e97cd88 | 1000 | qemu_kvm_start_vcpu(env); |
0ab07c62 | 1001 | } else { |
7e97cd88 | 1002 | qemu_tcg_init_vcpu(env); |
0ab07c62 | 1003 | } |
296af7c9 BS |
1004 | } |
1005 | ||
1006 | void qemu_notify_event(void) | |
1007 | { | |
1008 | qemu_event_increment(); | |
1009 | } | |
1010 | ||
b4a3d965 | 1011 | void cpu_stop_current(void) |
296af7c9 | 1012 | { |
b4a3d965 | 1013 | if (cpu_single_env) { |
67bb172f | 1014 | cpu_single_env->stop = 0; |
b4a3d965 JK |
1015 | cpu_single_env->stopped = 1; |
1016 | cpu_exit(cpu_single_env); | |
67bb172f | 1017 | qemu_cond_signal(&qemu_pause_cond); |
b4a3d965 | 1018 | } |
296af7c9 BS |
1019 | } |
1020 | ||
1021 | void vm_stop(int reason) | |
1022 | { | |
b7680cb6 | 1023 | if (!qemu_thread_is_self(&io_thread)) { |
296af7c9 BS |
1024 | qemu_system_vmstop_request(reason); |
1025 | /* | |
1026 | * FIXME: should not return to device code in case | |
1027 | * vm_stop() has been requested. | |
1028 | */ | |
b4a3d965 | 1029 | cpu_stop_current(); |
296af7c9 BS |
1030 | return; |
1031 | } | |
1032 | do_vm_stop(reason); | |
1033 | } | |
1034 | ||
1035 | #endif | |
1036 | ||
6792a57b | 1037 | static int tcg_cpu_exec(CPUState *env) |
296af7c9 BS |
1038 | { |
1039 | int ret; | |
1040 | #ifdef CONFIG_PROFILER | |
1041 | int64_t ti; | |
1042 | #endif | |
1043 | ||
1044 | #ifdef CONFIG_PROFILER | |
1045 | ti = profile_getclock(); | |
1046 | #endif | |
1047 | if (use_icount) { | |
1048 | int64_t count; | |
1049 | int decr; | |
1050 | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | |
1051 | env->icount_decr.u16.low = 0; | |
1052 | env->icount_extra = 0; | |
cb842c90 | 1053 | count = qemu_icount_round(qemu_next_icount_deadline()); |
296af7c9 BS |
1054 | qemu_icount += count; |
1055 | decr = (count > 0xffff) ? 0xffff : count; | |
1056 | count -= decr; | |
1057 | env->icount_decr.u16.low = decr; | |
1058 | env->icount_extra = count; | |
1059 | } | |
1060 | ret = cpu_exec(env); | |
1061 | #ifdef CONFIG_PROFILER | |
1062 | qemu_time += profile_getclock() - ti; | |
1063 | #endif | |
1064 | if (use_icount) { | |
1065 | /* Fold pending instructions back into the | |
1066 | instruction counter, and clear the interrupt flag. */ | |
1067 | qemu_icount -= (env->icount_decr.u16.low | |
1068 | + env->icount_extra); | |
1069 | env->icount_decr.u32 = 0; | |
1070 | env->icount_extra = 0; | |
1071 | } | |
1072 | return ret; | |
1073 | } | |
1074 | ||
472fb0c4 | 1075 | bool cpu_exec_all(void) |
296af7c9 | 1076 | { |
9a36085b JK |
1077 | int r; |
1078 | ||
ab33fcda PB |
1079 | /* Account partial waits to the vm_clock. */ |
1080 | qemu_clock_warp(vm_clock); | |
1081 | ||
0ab07c62 | 1082 | if (next_cpu == NULL) { |
296af7c9 | 1083 | next_cpu = first_cpu; |
0ab07c62 | 1084 | } |
c629a4bc | 1085 | for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
345f4426 | 1086 | CPUState *env = next_cpu; |
296af7c9 BS |
1087 | |
1088 | qemu_clock_enable(vm_clock, | |
345f4426 | 1089 | (env->singlestep_enabled & SSTEP_NOTIMER) == 0); |
296af7c9 | 1090 | |
8cf3f22b | 1091 | #ifndef CONFIG_IOTHREAD |
0ab07c62 | 1092 | if (qemu_alarm_pending()) { |
296af7c9 | 1093 | break; |
0ab07c62 | 1094 | } |
8cf3f22b | 1095 | #endif |
3c638d06 | 1096 | if (cpu_can_run(env)) { |
9a36085b | 1097 | if (kvm_enabled()) { |
6792a57b | 1098 | r = kvm_cpu_exec(env); |
9a36085b | 1099 | qemu_kvm_eat_signals(env); |
6792a57b JK |
1100 | } else { |
1101 | r = tcg_cpu_exec(env); | |
9a36085b JK |
1102 | } |
1103 | if (r == EXCP_DEBUG) { | |
1009d2ed | 1104 | cpu_handle_guest_debug(env); |
3c638d06 JK |
1105 | break; |
1106 | } | |
df646dfd | 1107 | } else if (env->stop || env->stopped) { |
296af7c9 BS |
1108 | break; |
1109 | } | |
1110 | } | |
c629a4bc | 1111 | exit_request = 0; |
16400322 | 1112 | return !all_cpu_threads_idle(); |
296af7c9 BS |
1113 | } |
1114 | ||
1115 | void set_numa_modes(void) | |
1116 | { | |
1117 | CPUState *env; | |
1118 | int i; | |
1119 | ||
1120 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
1121 | for (i = 0; i < nb_numa_nodes; i++) { | |
1122 | if (node_cpumask[i] & (1 << env->cpu_index)) { | |
1123 | env->numa_node = i; | |
1124 | } | |
1125 | } | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | void set_cpu_log(const char *optarg) | |
1130 | { | |
1131 | int mask; | |
1132 | const CPULogItem *item; | |
1133 | ||
1134 | mask = cpu_str_to_log_mask(optarg); | |
1135 | if (!mask) { | |
1136 | printf("Log items (comma separated):\n"); | |
1137 | for (item = cpu_log_items; item->mask != 0; item++) { | |
1138 | printf("%-10s %s\n", item->name, item->help); | |
1139 | } | |
1140 | exit(1); | |
1141 | } | |
1142 | cpu_set_log(mask); | |
1143 | } | |
29e922b6 BS |
1144 | |
1145 | /* Return the virtual CPU time, based on the instruction counter. */ | |
1146 | int64_t cpu_get_icount(void) | |
1147 | { | |
1148 | int64_t icount; | |
1149 | CPUState *env = cpu_single_env;; | |
1150 | ||
1151 | icount = qemu_icount; | |
1152 | if (env) { | |
1153 | if (!can_do_io(env)) { | |
1154 | fprintf(stderr, "Bad clock read\n"); | |
1155 | } | |
1156 | icount -= (env->icount_decr.u16.low + env->icount_extra); | |
1157 | } | |
1158 | return qemu_icount_bias + (icount << icount_time_shift); | |
1159 | } | |
262353cb | 1160 | |
9a78eead | 1161 | void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) |
262353cb BS |
1162 | { |
1163 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
1164 | #if defined(cpu_list_id) | |
1165 | cpu_list_id(f, cpu_fprintf, optarg); | |
1166 | #elif defined(cpu_list) | |
1167 | cpu_list(f, cpu_fprintf); /* deprecated */ | |
1168 | #endif | |
1169 | } |