]>
Commit | Line | Data |
---|---|---|
296af7c9 BS |
1 | /* |
2 | * QEMU System Emulator | |
3 | * | |
4 | * Copyright (c) 2003-2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* Needed early for CONFIG_BSD etc. */ | |
26 | #include "config-host.h" | |
27 | ||
28 | #include "monitor.h" | |
29 | #include "sysemu.h" | |
30 | #include "gdbstub.h" | |
31 | #include "dma.h" | |
32 | #include "kvm.h" | |
33 | ||
96284e89 | 34 | #include "qemu-thread.h" |
296af7c9 | 35 | #include "cpus.h" |
0ff0fc19 JK |
36 | |
37 | #ifndef _WIN32 | |
a8486bc9 | 38 | #include "compatfd.h" |
0ff0fc19 | 39 | #endif |
296af7c9 | 40 | |
7277e027 BS |
41 | #ifdef SIGRTMIN |
42 | #define SIG_IPI (SIGRTMIN+4) | |
43 | #else | |
44 | #define SIG_IPI SIGUSR1 | |
45 | #endif | |
46 | ||
6d9cb73c JK |
47 | #ifdef CONFIG_LINUX |
48 | ||
49 | #include <sys/prctl.h> | |
50 | ||
c0532a76 MT |
51 | #ifndef PR_MCE_KILL |
52 | #define PR_MCE_KILL 33 | |
53 | #endif | |
54 | ||
6d9cb73c JK |
55 | #ifndef PR_MCE_KILL_SET |
56 | #define PR_MCE_KILL_SET 1 | |
57 | #endif | |
58 | ||
59 | #ifndef PR_MCE_KILL_EARLY | |
60 | #define PR_MCE_KILL_EARLY 1 | |
61 | #endif | |
62 | ||
63 | #endif /* CONFIG_LINUX */ | |
64 | ||
296af7c9 BS |
65 | static CPUState *next_cpu; |
66 | ||
67 | /***********************************************************/ | |
68 | void hw_error(const char *fmt, ...) | |
69 | { | |
70 | va_list ap; | |
71 | CPUState *env; | |
72 | ||
73 | va_start(ap, fmt); | |
74 | fprintf(stderr, "qemu: hardware error: "); | |
75 | vfprintf(stderr, fmt, ap); | |
76 | fprintf(stderr, "\n"); | |
77 | for(env = first_cpu; env != NULL; env = env->next_cpu) { | |
78 | fprintf(stderr, "CPU #%d:\n", env->cpu_index); | |
79 | #ifdef TARGET_I386 | |
80 | cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU); | |
81 | #else | |
82 | cpu_dump_state(env, stderr, fprintf, 0); | |
83 | #endif | |
84 | } | |
85 | va_end(ap); | |
86 | abort(); | |
87 | } | |
88 | ||
89 | void cpu_synchronize_all_states(void) | |
90 | { | |
91 | CPUState *cpu; | |
92 | ||
93 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
94 | cpu_synchronize_state(cpu); | |
95 | } | |
96 | } | |
97 | ||
98 | void cpu_synchronize_all_post_reset(void) | |
99 | { | |
100 | CPUState *cpu; | |
101 | ||
102 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
103 | cpu_synchronize_post_reset(cpu); | |
104 | } | |
105 | } | |
106 | ||
107 | void cpu_synchronize_all_post_init(void) | |
108 | { | |
109 | CPUState *cpu; | |
110 | ||
111 | for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) { | |
112 | cpu_synchronize_post_init(cpu); | |
113 | } | |
114 | } | |
115 | ||
3ae9501c MT |
116 | int cpu_is_stopped(CPUState *env) |
117 | { | |
1354869c | 118 | return !runstate_is_running() || env->stopped; |
3ae9501c MT |
119 | } |
120 | ||
1dfb4dd9 | 121 | static void do_vm_stop(RunState state) |
296af7c9 | 122 | { |
1354869c | 123 | if (runstate_is_running()) { |
296af7c9 | 124 | cpu_disable_ticks(); |
296af7c9 | 125 | pause_all_vcpus(); |
f5bbfba1 | 126 | runstate_set(state); |
1dfb4dd9 | 127 | vm_state_notify(0, state); |
55df6f33 MT |
128 | qemu_aio_flush(); |
129 | bdrv_flush_all(); | |
296af7c9 BS |
130 | monitor_protocol_event(QEVENT_STOP, NULL); |
131 | } | |
132 | } | |
133 | ||
134 | static int cpu_can_run(CPUState *env) | |
135 | { | |
0ab07c62 | 136 | if (env->stop) { |
296af7c9 | 137 | return 0; |
0ab07c62 | 138 | } |
1354869c | 139 | if (env->stopped || !runstate_is_running()) { |
296af7c9 | 140 | return 0; |
0ab07c62 | 141 | } |
296af7c9 BS |
142 | return 1; |
143 | } | |
144 | ||
16400322 | 145 | static bool cpu_thread_is_idle(CPUState *env) |
296af7c9 | 146 | { |
16400322 JK |
147 | if (env->stop || env->queued_work_first) { |
148 | return false; | |
149 | } | |
1354869c | 150 | if (env->stopped || !runstate_is_running()) { |
16400322 JK |
151 | return true; |
152 | } | |
f2c1cc81 JK |
153 | if (!env->halted || qemu_cpu_has_work(env) || |
154 | (kvm_enabled() && kvm_irqchip_in_kernel())) { | |
16400322 JK |
155 | return false; |
156 | } | |
157 | return true; | |
296af7c9 BS |
158 | } |
159 | ||
ab33fcda | 160 | bool all_cpu_threads_idle(void) |
296af7c9 BS |
161 | { |
162 | CPUState *env; | |
163 | ||
16400322 JK |
164 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
165 | if (!cpu_thread_is_idle(env)) { | |
166 | return false; | |
167 | } | |
168 | } | |
169 | return true; | |
296af7c9 BS |
170 | } |
171 | ||
1009d2ed | 172 | static void cpu_handle_guest_debug(CPUState *env) |
83f338f7 | 173 | { |
3c638d06 | 174 | gdb_set_stop_cpu(env); |
8cf71710 | 175 | qemu_system_debug_request(); |
83f338f7 | 176 | env->stopped = 1; |
3c638d06 JK |
177 | } |
178 | ||
714bd040 PB |
179 | static void cpu_signal(int sig) |
180 | { | |
181 | if (cpu_single_env) { | |
182 | cpu_exit(cpu_single_env); | |
183 | } | |
184 | exit_request = 1; | |
185 | } | |
714bd040 | 186 | |
6d9cb73c JK |
187 | #ifdef CONFIG_LINUX |
188 | static void sigbus_reraise(void) | |
189 | { | |
190 | sigset_t set; | |
191 | struct sigaction action; | |
192 | ||
193 | memset(&action, 0, sizeof(action)); | |
194 | action.sa_handler = SIG_DFL; | |
195 | if (!sigaction(SIGBUS, &action, NULL)) { | |
196 | raise(SIGBUS); | |
197 | sigemptyset(&set); | |
198 | sigaddset(&set, SIGBUS); | |
199 | sigprocmask(SIG_UNBLOCK, &set, NULL); | |
200 | } | |
201 | perror("Failed to re-raise SIGBUS!\n"); | |
202 | abort(); | |
203 | } | |
204 | ||
205 | static void sigbus_handler(int n, struct qemu_signalfd_siginfo *siginfo, | |
206 | void *ctx) | |
207 | { | |
208 | if (kvm_on_sigbus(siginfo->ssi_code, | |
209 | (void *)(intptr_t)siginfo->ssi_addr)) { | |
210 | sigbus_reraise(); | |
211 | } | |
212 | } | |
213 | ||
214 | static void qemu_init_sigbus(void) | |
215 | { | |
216 | struct sigaction action; | |
217 | ||
218 | memset(&action, 0, sizeof(action)); | |
219 | action.sa_flags = SA_SIGINFO; | |
220 | action.sa_sigaction = (void (*)(int, siginfo_t*, void*))sigbus_handler; | |
221 | sigaction(SIGBUS, &action, NULL); | |
222 | ||
223 | prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0); | |
224 | } | |
225 | ||
1ab3c6c0 JK |
226 | static void qemu_kvm_eat_signals(CPUState *env) |
227 | { | |
228 | struct timespec ts = { 0, 0 }; | |
229 | siginfo_t siginfo; | |
230 | sigset_t waitset; | |
231 | sigset_t chkset; | |
232 | int r; | |
233 | ||
234 | sigemptyset(&waitset); | |
235 | sigaddset(&waitset, SIG_IPI); | |
236 | sigaddset(&waitset, SIGBUS); | |
237 | ||
238 | do { | |
239 | r = sigtimedwait(&waitset, &siginfo, &ts); | |
240 | if (r == -1 && !(errno == EAGAIN || errno == EINTR)) { | |
241 | perror("sigtimedwait"); | |
242 | exit(1); | |
243 | } | |
244 | ||
245 | switch (r) { | |
246 | case SIGBUS: | |
247 | if (kvm_on_sigbus_vcpu(env, siginfo.si_code, siginfo.si_addr)) { | |
248 | sigbus_reraise(); | |
249 | } | |
250 | break; | |
251 | default: | |
252 | break; | |
253 | } | |
254 | ||
255 | r = sigpending(&chkset); | |
256 | if (r == -1) { | |
257 | perror("sigpending"); | |
258 | exit(1); | |
259 | } | |
260 | } while (sigismember(&chkset, SIG_IPI) || sigismember(&chkset, SIGBUS)); | |
1ab3c6c0 JK |
261 | } |
262 | ||
6d9cb73c JK |
263 | #else /* !CONFIG_LINUX */ |
264 | ||
265 | static void qemu_init_sigbus(void) | |
266 | { | |
267 | } | |
1ab3c6c0 JK |
268 | |
269 | static void qemu_kvm_eat_signals(CPUState *env) | |
270 | { | |
271 | } | |
6d9cb73c JK |
272 | #endif /* !CONFIG_LINUX */ |
273 | ||
296af7c9 BS |
274 | #ifndef _WIN32 |
275 | static int io_thread_fd = -1; | |
276 | ||
277 | static void qemu_event_increment(void) | |
278 | { | |
279 | /* Write 8 bytes to be compatible with eventfd. */ | |
26a82330 | 280 | static const uint64_t val = 1; |
296af7c9 BS |
281 | ssize_t ret; |
282 | ||
0ab07c62 | 283 | if (io_thread_fd == -1) { |
296af7c9 | 284 | return; |
0ab07c62 | 285 | } |
296af7c9 BS |
286 | do { |
287 | ret = write(io_thread_fd, &val, sizeof(val)); | |
288 | } while (ret < 0 && errno == EINTR); | |
289 | ||
290 | /* EAGAIN is fine, a read must be pending. */ | |
291 | if (ret < 0 && errno != EAGAIN) { | |
77bec686 | 292 | fprintf(stderr, "qemu_event_increment: write() failed: %s\n", |
296af7c9 BS |
293 | strerror(errno)); |
294 | exit (1); | |
295 | } | |
296 | } | |
297 | ||
298 | static void qemu_event_read(void *opaque) | |
299 | { | |
e0efb993 | 300 | int fd = (intptr_t)opaque; |
296af7c9 BS |
301 | ssize_t len; |
302 | char buffer[512]; | |
303 | ||
304 | /* Drain the notify pipe. For eventfd, only 8 bytes will be read. */ | |
305 | do { | |
306 | len = read(fd, buffer, sizeof(buffer)); | |
307 | } while ((len == -1 && errno == EINTR) || len == sizeof(buffer)); | |
308 | } | |
309 | ||
310 | static int qemu_event_init(void) | |
311 | { | |
312 | int err; | |
313 | int fds[2]; | |
314 | ||
315 | err = qemu_eventfd(fds); | |
0ab07c62 | 316 | if (err == -1) { |
296af7c9 | 317 | return -errno; |
0ab07c62 | 318 | } |
296af7c9 | 319 | err = fcntl_setfl(fds[0], O_NONBLOCK); |
0ab07c62 | 320 | if (err < 0) { |
296af7c9 | 321 | goto fail; |
0ab07c62 | 322 | } |
296af7c9 | 323 | err = fcntl_setfl(fds[1], O_NONBLOCK); |
0ab07c62 | 324 | if (err < 0) { |
296af7c9 | 325 | goto fail; |
0ab07c62 | 326 | } |
296af7c9 | 327 | qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL, |
e0efb993 | 328 | (void *)(intptr_t)fds[0]); |
296af7c9 BS |
329 | |
330 | io_thread_fd = fds[1]; | |
331 | return 0; | |
332 | ||
333 | fail: | |
334 | close(fds[0]); | |
335 | close(fds[1]); | |
336 | return err; | |
337 | } | |
55f8d6ac | 338 | |
55f8d6ac JK |
339 | static void dummy_signal(int sig) |
340 | { | |
341 | } | |
55f8d6ac | 342 | |
d0f294ce JK |
343 | /* If we have signalfd, we mask out the signals we want to handle and then |
344 | * use signalfd to listen for them. We rely on whatever the current signal | |
345 | * handler is to dispatch the signals when we receive them. | |
346 | */ | |
347 | static void sigfd_handler(void *opaque) | |
348 | { | |
e0efb993 | 349 | int fd = (intptr_t)opaque; |
d0f294ce JK |
350 | struct qemu_signalfd_siginfo info; |
351 | struct sigaction action; | |
352 | ssize_t len; | |
353 | ||
354 | while (1) { | |
355 | do { | |
356 | len = read(fd, &info, sizeof(info)); | |
357 | } while (len == -1 && errno == EINTR); | |
358 | ||
359 | if (len == -1 && errno == EAGAIN) { | |
360 | break; | |
361 | } | |
362 | ||
363 | if (len != sizeof(info)) { | |
364 | printf("read from sigfd returned %zd: %m\n", len); | |
365 | return; | |
366 | } | |
367 | ||
368 | sigaction(info.ssi_signo, NULL, &action); | |
369 | if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) { | |
370 | action.sa_sigaction(info.ssi_signo, | |
371 | (siginfo_t *)&info, NULL); | |
372 | } else if (action.sa_handler) { | |
373 | action.sa_handler(info.ssi_signo); | |
374 | } | |
375 | } | |
376 | } | |
377 | ||
712ae480 | 378 | static int qemu_signal_init(void) |
d0f294ce JK |
379 | { |
380 | int sigfd; | |
712ae480 | 381 | sigset_t set; |
d0f294ce | 382 | |
89b9ba66 AR |
383 | /* |
384 | * SIG_IPI must be blocked in the main thread and must not be caught | |
385 | * by sigwait() in the signal thread. Otherwise, the cpu thread will | |
386 | * not catch it reliably. | |
387 | */ | |
388 | sigemptyset(&set); | |
389 | sigaddset(&set, SIG_IPI); | |
390 | pthread_sigmask(SIG_BLOCK, &set, NULL); | |
391 | ||
712ae480 PB |
392 | sigemptyset(&set); |
393 | sigaddset(&set, SIGIO); | |
394 | sigaddset(&set, SIGALRM); | |
712ae480 | 395 | sigaddset(&set, SIGBUS); |
5664aed9 | 396 | pthread_sigmask(SIG_BLOCK, &set, NULL); |
712ae480 PB |
397 | |
398 | sigfd = qemu_signalfd(&set); | |
d0f294ce JK |
399 | if (sigfd == -1) { |
400 | fprintf(stderr, "failed to create signalfd\n"); | |
401 | return -errno; | |
402 | } | |
403 | ||
404 | fcntl_setfl(sigfd, O_NONBLOCK); | |
405 | ||
406 | qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, | |
e0efb993 | 407 | (void *)(intptr_t)sigfd); |
d0f294ce JK |
408 | |
409 | return 0; | |
410 | } | |
411 | ||
714bd040 PB |
412 | static void qemu_kvm_init_cpu_signals(CPUState *env) |
413 | { | |
414 | int r; | |
415 | sigset_t set; | |
416 | struct sigaction sigact; | |
417 | ||
418 | memset(&sigact, 0, sizeof(sigact)); | |
419 | sigact.sa_handler = dummy_signal; | |
420 | sigaction(SIG_IPI, &sigact, NULL); | |
421 | ||
714bd040 PB |
422 | pthread_sigmask(SIG_BLOCK, NULL, &set); |
423 | sigdelset(&set, SIG_IPI); | |
424 | sigdelset(&set, SIGBUS); | |
425 | r = kvm_set_signal_mask(env, &set); | |
426 | if (r) { | |
427 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
428 | exit(1); | |
429 | } | |
714bd040 | 430 | |
714bd040 PB |
431 | sigdelset(&set, SIG_IPI); |
432 | sigdelset(&set, SIGBUS); | |
433 | r = kvm_set_signal_mask(env, &set); | |
434 | if (r) { | |
435 | fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r)); | |
436 | exit(1); | |
437 | } | |
438 | } | |
439 | ||
440 | static void qemu_tcg_init_cpu_signals(void) | |
441 | { | |
714bd040 PB |
442 | sigset_t set; |
443 | struct sigaction sigact; | |
444 | ||
445 | memset(&sigact, 0, sizeof(sigact)); | |
446 | sigact.sa_handler = cpu_signal; | |
447 | sigaction(SIG_IPI, &sigact, NULL); | |
448 | ||
449 | sigemptyset(&set); | |
450 | sigaddset(&set, SIG_IPI); | |
451 | pthread_sigmask(SIG_UNBLOCK, &set, NULL); | |
714bd040 PB |
452 | } |
453 | ||
55f8d6ac JK |
454 | #else /* _WIN32 */ |
455 | ||
296af7c9 BS |
456 | HANDLE qemu_event_handle; |
457 | ||
458 | static void dummy_event_handler(void *opaque) | |
459 | { | |
460 | } | |
461 | ||
462 | static int qemu_event_init(void) | |
463 | { | |
464 | qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL); | |
465 | if (!qemu_event_handle) { | |
466 | fprintf(stderr, "Failed CreateEvent: %ld\n", GetLastError()); | |
467 | return -1; | |
468 | } | |
469 | qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL); | |
470 | return 0; | |
471 | } | |
472 | ||
473 | static void qemu_event_increment(void) | |
474 | { | |
475 | if (!SetEvent(qemu_event_handle)) { | |
476 | fprintf(stderr, "qemu_event_increment: SetEvent failed: %ld\n", | |
477 | GetLastError()); | |
478 | exit (1); | |
479 | } | |
480 | } | |
9a36085b | 481 | |
712ae480 PB |
482 | static int qemu_signal_init(void) |
483 | { | |
484 | return 0; | |
485 | } | |
486 | ||
ff48eb5f JK |
487 | static void qemu_kvm_init_cpu_signals(CPUState *env) |
488 | { | |
714bd040 PB |
489 | abort(); |
490 | } | |
ff48eb5f | 491 | |
714bd040 PB |
492 | static void qemu_tcg_init_cpu_signals(void) |
493 | { | |
ff48eb5f | 494 | } |
714bd040 | 495 | #endif /* _WIN32 */ |
ff48eb5f | 496 | |
296af7c9 | 497 | QemuMutex qemu_global_mutex; |
46daff13 PB |
498 | static QemuCond qemu_io_proceeded_cond; |
499 | static bool iothread_requesting_mutex; | |
296af7c9 BS |
500 | |
501 | static QemuThread io_thread; | |
502 | ||
503 | static QemuThread *tcg_cpu_thread; | |
504 | static QemuCond *tcg_halt_cond; | |
505 | ||
296af7c9 BS |
506 | /* cpu creation */ |
507 | static QemuCond qemu_cpu_cond; | |
508 | /* system init */ | |
296af7c9 | 509 | static QemuCond qemu_pause_cond; |
e82bcec2 | 510 | static QemuCond qemu_work_cond; |
296af7c9 | 511 | |
296af7c9 BS |
512 | int qemu_init_main_loop(void) |
513 | { | |
514 | int ret; | |
515 | ||
6d9cb73c | 516 | qemu_init_sigbus(); |
3c638d06 | 517 | |
712ae480 | 518 | ret = qemu_signal_init(); |
0ab07c62 | 519 | if (ret) { |
a8486bc9 | 520 | return ret; |
0ab07c62 | 521 | } |
a8486bc9 MT |
522 | |
523 | /* Note eventfd must be drained before signalfd handlers run */ | |
296af7c9 | 524 | ret = qemu_event_init(); |
0ab07c62 | 525 | if (ret) { |
296af7c9 | 526 | return ret; |
0ab07c62 | 527 | } |
296af7c9 | 528 | |
ed94592b | 529 | qemu_cond_init(&qemu_cpu_cond); |
ed94592b AL |
530 | qemu_cond_init(&qemu_pause_cond); |
531 | qemu_cond_init(&qemu_work_cond); | |
46daff13 | 532 | qemu_cond_init(&qemu_io_proceeded_cond); |
296af7c9 BS |
533 | qemu_mutex_init(&qemu_global_mutex); |
534 | qemu_mutex_lock(&qemu_global_mutex); | |
535 | ||
b7680cb6 | 536 | qemu_thread_get_self(&io_thread); |
296af7c9 BS |
537 | |
538 | return 0; | |
539 | } | |
540 | ||
7277e027 BS |
541 | void qemu_main_loop_start(void) |
542 | { | |
fa7d1867 | 543 | resume_all_vcpus(); |
7277e027 BS |
544 | } |
545 | ||
e82bcec2 MT |
546 | void run_on_cpu(CPUState *env, void (*func)(void *data), void *data) |
547 | { | |
548 | struct qemu_work_item wi; | |
549 | ||
b7680cb6 | 550 | if (qemu_cpu_is_self(env)) { |
e82bcec2 MT |
551 | func(data); |
552 | return; | |
553 | } | |
554 | ||
555 | wi.func = func; | |
556 | wi.data = data; | |
0ab07c62 | 557 | if (!env->queued_work_first) { |
e82bcec2 | 558 | env->queued_work_first = &wi; |
0ab07c62 | 559 | } else { |
e82bcec2 | 560 | env->queued_work_last->next = &wi; |
0ab07c62 | 561 | } |
e82bcec2 MT |
562 | env->queued_work_last = &wi; |
563 | wi.next = NULL; | |
564 | wi.done = false; | |
565 | ||
566 | qemu_cpu_kick(env); | |
567 | while (!wi.done) { | |
568 | CPUState *self_env = cpu_single_env; | |
569 | ||
570 | qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); | |
571 | cpu_single_env = self_env; | |
572 | } | |
573 | } | |
574 | ||
575 | static void flush_queued_work(CPUState *env) | |
576 | { | |
577 | struct qemu_work_item *wi; | |
578 | ||
0ab07c62 | 579 | if (!env->queued_work_first) { |
e82bcec2 | 580 | return; |
0ab07c62 | 581 | } |
e82bcec2 MT |
582 | |
583 | while ((wi = env->queued_work_first)) { | |
584 | env->queued_work_first = wi->next; | |
585 | wi->func(wi->data); | |
586 | wi->done = true; | |
587 | } | |
588 | env->queued_work_last = NULL; | |
589 | qemu_cond_broadcast(&qemu_work_cond); | |
590 | } | |
591 | ||
296af7c9 BS |
592 | static void qemu_wait_io_event_common(CPUState *env) |
593 | { | |
594 | if (env->stop) { | |
595 | env->stop = 0; | |
596 | env->stopped = 1; | |
597 | qemu_cond_signal(&qemu_pause_cond); | |
598 | } | |
e82bcec2 | 599 | flush_queued_work(env); |
aa2c364b | 600 | env->thread_kicked = false; |
296af7c9 BS |
601 | } |
602 | ||
6cabe1f3 | 603 | static void qemu_tcg_wait_io_event(void) |
296af7c9 | 604 | { |
6cabe1f3 JK |
605 | CPUState *env; |
606 | ||
16400322 | 607 | while (all_cpu_threads_idle()) { |
ab33fcda PB |
608 | /* Start accounting real time to the virtual clock if the CPUs |
609 | are idle. */ | |
610 | qemu_clock_warp(vm_clock); | |
9705fbb5 | 611 | qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); |
16400322 | 612 | } |
296af7c9 | 613 | |
46daff13 PB |
614 | while (iothread_requesting_mutex) { |
615 | qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); | |
616 | } | |
6cabe1f3 JK |
617 | |
618 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
619 | qemu_wait_io_event_common(env); | |
620 | } | |
296af7c9 BS |
621 | } |
622 | ||
296af7c9 BS |
623 | static void qemu_kvm_wait_io_event(CPUState *env) |
624 | { | |
16400322 | 625 | while (cpu_thread_is_idle(env)) { |
9705fbb5 | 626 | qemu_cond_wait(env->halt_cond, &qemu_global_mutex); |
16400322 | 627 | } |
296af7c9 | 628 | |
5db5bdac | 629 | qemu_kvm_eat_signals(env); |
296af7c9 BS |
630 | qemu_wait_io_event_common(env); |
631 | } | |
632 | ||
7e97cd88 | 633 | static void *qemu_kvm_cpu_thread_fn(void *arg) |
296af7c9 BS |
634 | { |
635 | CPUState *env = arg; | |
84b4915d | 636 | int r; |
296af7c9 | 637 | |
6164e6d6 | 638 | qemu_mutex_lock(&qemu_global_mutex); |
b7680cb6 | 639 | qemu_thread_get_self(env->thread); |
dc7a09cf | 640 | env->thread_id = qemu_get_thread_id(); |
296af7c9 | 641 | |
84b4915d JK |
642 | r = kvm_init_vcpu(env); |
643 | if (r < 0) { | |
644 | fprintf(stderr, "kvm_init_vcpu failed: %s\n", strerror(-r)); | |
645 | exit(1); | |
646 | } | |
296af7c9 | 647 | |
55f8d6ac | 648 | qemu_kvm_init_cpu_signals(env); |
296af7c9 BS |
649 | |
650 | /* signal CPU creation */ | |
296af7c9 BS |
651 | env->created = 1; |
652 | qemu_cond_signal(&qemu_cpu_cond); | |
653 | ||
296af7c9 | 654 | while (1) { |
0ab07c62 | 655 | if (cpu_can_run(env)) { |
6792a57b | 656 | r = kvm_cpu_exec(env); |
83f338f7 | 657 | if (r == EXCP_DEBUG) { |
1009d2ed | 658 | cpu_handle_guest_debug(env); |
83f338f7 | 659 | } |
0ab07c62 | 660 | } |
296af7c9 BS |
661 | qemu_kvm_wait_io_event(env); |
662 | } | |
663 | ||
664 | return NULL; | |
665 | } | |
666 | ||
7e97cd88 | 667 | static void *qemu_tcg_cpu_thread_fn(void *arg) |
296af7c9 BS |
668 | { |
669 | CPUState *env = arg; | |
670 | ||
55f8d6ac | 671 | qemu_tcg_init_cpu_signals(); |
b7680cb6 | 672 | qemu_thread_get_self(env->thread); |
296af7c9 BS |
673 | |
674 | /* signal CPU creation */ | |
675 | qemu_mutex_lock(&qemu_global_mutex); | |
0ab07c62 | 676 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
dc7a09cf | 677 | env->thread_id = qemu_get_thread_id(); |
296af7c9 | 678 | env->created = 1; |
0ab07c62 | 679 | } |
296af7c9 BS |
680 | qemu_cond_signal(&qemu_cpu_cond); |
681 | ||
fa7d1867 JK |
682 | /* wait for initial kick-off after machine start */ |
683 | while (first_cpu->stopped) { | |
684 | qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); | |
0ab07c62 | 685 | } |
296af7c9 BS |
686 | |
687 | while (1) { | |
472fb0c4 | 688 | cpu_exec_all(); |
cb842c90 | 689 | if (use_icount && qemu_next_icount_deadline() <= 0) { |
3b2319a3 PB |
690 | qemu_notify_event(); |
691 | } | |
6cabe1f3 | 692 | qemu_tcg_wait_io_event(); |
296af7c9 BS |
693 | } |
694 | ||
695 | return NULL; | |
696 | } | |
697 | ||
cc015e9a PB |
698 | static void qemu_cpu_kick_thread(CPUState *env) |
699 | { | |
700 | #ifndef _WIN32 | |
701 | int err; | |
702 | ||
703 | err = pthread_kill(env->thread->thread, SIG_IPI); | |
704 | if (err) { | |
705 | fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); | |
706 | exit(1); | |
707 | } | |
708 | #else /* _WIN32 */ | |
709 | if (!qemu_cpu_is_self(env)) { | |
710 | SuspendThread(env->thread->thread); | |
711 | cpu_signal(0); | |
712 | ResumeThread(env->thread->thread); | |
713 | } | |
714 | #endif | |
715 | } | |
716 | ||
296af7c9 BS |
717 | void qemu_cpu_kick(void *_env) |
718 | { | |
719 | CPUState *env = _env; | |
296af7c9 | 720 | |
296af7c9 | 721 | qemu_cond_broadcast(env->halt_cond); |
eae74cf9 | 722 | if (kvm_enabled() && !env->thread_kicked) { |
cc015e9a | 723 | qemu_cpu_kick_thread(env); |
aa2c364b JK |
724 | env->thread_kicked = true; |
725 | } | |
296af7c9 BS |
726 | } |
727 | ||
46d62fac | 728 | void qemu_cpu_kick_self(void) |
296af7c9 | 729 | { |
b55c22c6 | 730 | #ifndef _WIN32 |
46d62fac | 731 | assert(cpu_single_env); |
296af7c9 | 732 | |
46d62fac | 733 | if (!cpu_single_env->thread_kicked) { |
cc015e9a | 734 | qemu_cpu_kick_thread(cpu_single_env); |
46d62fac | 735 | cpu_single_env->thread_kicked = true; |
296af7c9 | 736 | } |
b55c22c6 PB |
737 | #else |
738 | abort(); | |
739 | #endif | |
296af7c9 BS |
740 | } |
741 | ||
b7680cb6 | 742 | int qemu_cpu_is_self(void *_env) |
296af7c9 | 743 | { |
296af7c9 | 744 | CPUState *env = _env; |
a8486bc9 | 745 | |
b7680cb6 | 746 | return qemu_thread_is_self(env->thread); |
296af7c9 BS |
747 | } |
748 | ||
296af7c9 BS |
749 | void qemu_mutex_lock_iothread(void) |
750 | { | |
751 | if (kvm_enabled()) { | |
296af7c9 | 752 | qemu_mutex_lock(&qemu_global_mutex); |
1a28cac3 | 753 | } else { |
46daff13 | 754 | iothread_requesting_mutex = true; |
1a28cac3 | 755 | if (qemu_mutex_trylock(&qemu_global_mutex)) { |
cc015e9a | 756 | qemu_cpu_kick_thread(first_cpu); |
1a28cac3 MT |
757 | qemu_mutex_lock(&qemu_global_mutex); |
758 | } | |
46daff13 PB |
759 | iothread_requesting_mutex = false; |
760 | qemu_cond_broadcast(&qemu_io_proceeded_cond); | |
1a28cac3 | 761 | } |
296af7c9 BS |
762 | } |
763 | ||
764 | void qemu_mutex_unlock_iothread(void) | |
765 | { | |
766 | qemu_mutex_unlock(&qemu_global_mutex); | |
767 | } | |
768 | ||
769 | static int all_vcpus_paused(void) | |
770 | { | |
771 | CPUState *penv = first_cpu; | |
772 | ||
773 | while (penv) { | |
0ab07c62 | 774 | if (!penv->stopped) { |
296af7c9 | 775 | return 0; |
0ab07c62 | 776 | } |
296af7c9 BS |
777 | penv = (CPUState *)penv->next_cpu; |
778 | } | |
779 | ||
780 | return 1; | |
781 | } | |
782 | ||
783 | void pause_all_vcpus(void) | |
784 | { | |
785 | CPUState *penv = first_cpu; | |
786 | ||
787 | while (penv) { | |
788 | penv->stop = 1; | |
296af7c9 BS |
789 | qemu_cpu_kick(penv); |
790 | penv = (CPUState *)penv->next_cpu; | |
791 | } | |
792 | ||
793 | while (!all_vcpus_paused()) { | |
be7d6c57 | 794 | qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); |
296af7c9 BS |
795 | penv = first_cpu; |
796 | while (penv) { | |
1fbb22e5 | 797 | qemu_cpu_kick(penv); |
296af7c9 BS |
798 | penv = (CPUState *)penv->next_cpu; |
799 | } | |
800 | } | |
801 | } | |
802 | ||
803 | void resume_all_vcpus(void) | |
804 | { | |
805 | CPUState *penv = first_cpu; | |
806 | ||
807 | while (penv) { | |
808 | penv->stop = 0; | |
809 | penv->stopped = 0; | |
296af7c9 BS |
810 | qemu_cpu_kick(penv); |
811 | penv = (CPUState *)penv->next_cpu; | |
812 | } | |
813 | } | |
814 | ||
7e97cd88 | 815 | static void qemu_tcg_init_vcpu(void *_env) |
296af7c9 BS |
816 | { |
817 | CPUState *env = _env; | |
0ab07c62 | 818 | |
296af7c9 BS |
819 | /* share a single thread for all cpus with TCG */ |
820 | if (!tcg_cpu_thread) { | |
7267c094 AL |
821 | env->thread = g_malloc0(sizeof(QemuThread)); |
822 | env->halt_cond = g_malloc0(sizeof(QemuCond)); | |
296af7c9 | 823 | qemu_cond_init(env->halt_cond); |
fa7d1867 | 824 | tcg_halt_cond = env->halt_cond; |
7e97cd88 | 825 | qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env); |
0ab07c62 | 826 | while (env->created == 0) { |
18a85728 | 827 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
0ab07c62 | 828 | } |
296af7c9 | 829 | tcg_cpu_thread = env->thread; |
296af7c9 BS |
830 | } else { |
831 | env->thread = tcg_cpu_thread; | |
832 | env->halt_cond = tcg_halt_cond; | |
833 | } | |
834 | } | |
835 | ||
7e97cd88 | 836 | static void qemu_kvm_start_vcpu(CPUState *env) |
296af7c9 | 837 | { |
7267c094 AL |
838 | env->thread = g_malloc0(sizeof(QemuThread)); |
839 | env->halt_cond = g_malloc0(sizeof(QemuCond)); | |
296af7c9 | 840 | qemu_cond_init(env->halt_cond); |
7e97cd88 | 841 | qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env); |
0ab07c62 | 842 | while (env->created == 0) { |
18a85728 | 843 | qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); |
0ab07c62 | 844 | } |
296af7c9 BS |
845 | } |
846 | ||
847 | void qemu_init_vcpu(void *_env) | |
848 | { | |
849 | CPUState *env = _env; | |
850 | ||
851 | env->nr_cores = smp_cores; | |
852 | env->nr_threads = smp_threads; | |
fa7d1867 | 853 | env->stopped = 1; |
0ab07c62 | 854 | if (kvm_enabled()) { |
7e97cd88 | 855 | qemu_kvm_start_vcpu(env); |
0ab07c62 | 856 | } else { |
7e97cd88 | 857 | qemu_tcg_init_vcpu(env); |
0ab07c62 | 858 | } |
296af7c9 BS |
859 | } |
860 | ||
861 | void qemu_notify_event(void) | |
862 | { | |
863 | qemu_event_increment(); | |
864 | } | |
865 | ||
b4a3d965 | 866 | void cpu_stop_current(void) |
296af7c9 | 867 | { |
b4a3d965 | 868 | if (cpu_single_env) { |
67bb172f | 869 | cpu_single_env->stop = 0; |
b4a3d965 JK |
870 | cpu_single_env->stopped = 1; |
871 | cpu_exit(cpu_single_env); | |
67bb172f | 872 | qemu_cond_signal(&qemu_pause_cond); |
b4a3d965 | 873 | } |
296af7c9 BS |
874 | } |
875 | ||
1dfb4dd9 | 876 | void vm_stop(RunState state) |
296af7c9 | 877 | { |
b7680cb6 | 878 | if (!qemu_thread_is_self(&io_thread)) { |
1dfb4dd9 | 879 | qemu_system_vmstop_request(state); |
296af7c9 BS |
880 | /* |
881 | * FIXME: should not return to device code in case | |
882 | * vm_stop() has been requested. | |
883 | */ | |
b4a3d965 | 884 | cpu_stop_current(); |
296af7c9 BS |
885 | return; |
886 | } | |
1dfb4dd9 | 887 | do_vm_stop(state); |
296af7c9 BS |
888 | } |
889 | ||
6792a57b | 890 | static int tcg_cpu_exec(CPUState *env) |
296af7c9 BS |
891 | { |
892 | int ret; | |
893 | #ifdef CONFIG_PROFILER | |
894 | int64_t ti; | |
895 | #endif | |
896 | ||
897 | #ifdef CONFIG_PROFILER | |
898 | ti = profile_getclock(); | |
899 | #endif | |
900 | if (use_icount) { | |
901 | int64_t count; | |
902 | int decr; | |
903 | qemu_icount -= (env->icount_decr.u16.low + env->icount_extra); | |
904 | env->icount_decr.u16.low = 0; | |
905 | env->icount_extra = 0; | |
cb842c90 | 906 | count = qemu_icount_round(qemu_next_icount_deadline()); |
296af7c9 BS |
907 | qemu_icount += count; |
908 | decr = (count > 0xffff) ? 0xffff : count; | |
909 | count -= decr; | |
910 | env->icount_decr.u16.low = decr; | |
911 | env->icount_extra = count; | |
912 | } | |
913 | ret = cpu_exec(env); | |
914 | #ifdef CONFIG_PROFILER | |
915 | qemu_time += profile_getclock() - ti; | |
916 | #endif | |
917 | if (use_icount) { | |
918 | /* Fold pending instructions back into the | |
919 | instruction counter, and clear the interrupt flag. */ | |
920 | qemu_icount -= (env->icount_decr.u16.low | |
921 | + env->icount_extra); | |
922 | env->icount_decr.u32 = 0; | |
923 | env->icount_extra = 0; | |
924 | } | |
925 | return ret; | |
926 | } | |
927 | ||
472fb0c4 | 928 | bool cpu_exec_all(void) |
296af7c9 | 929 | { |
9a36085b JK |
930 | int r; |
931 | ||
ab33fcda PB |
932 | /* Account partial waits to the vm_clock. */ |
933 | qemu_clock_warp(vm_clock); | |
934 | ||
0ab07c62 | 935 | if (next_cpu == NULL) { |
296af7c9 | 936 | next_cpu = first_cpu; |
0ab07c62 | 937 | } |
c629a4bc | 938 | for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { |
345f4426 | 939 | CPUState *env = next_cpu; |
296af7c9 BS |
940 | |
941 | qemu_clock_enable(vm_clock, | |
345f4426 | 942 | (env->singlestep_enabled & SSTEP_NOTIMER) == 0); |
296af7c9 | 943 | |
3c638d06 | 944 | if (cpu_can_run(env)) { |
9a36085b | 945 | if (kvm_enabled()) { |
6792a57b | 946 | r = kvm_cpu_exec(env); |
9a36085b | 947 | qemu_kvm_eat_signals(env); |
6792a57b JK |
948 | } else { |
949 | r = tcg_cpu_exec(env); | |
9a36085b JK |
950 | } |
951 | if (r == EXCP_DEBUG) { | |
1009d2ed | 952 | cpu_handle_guest_debug(env); |
3c638d06 JK |
953 | break; |
954 | } | |
df646dfd | 955 | } else if (env->stop || env->stopped) { |
296af7c9 BS |
956 | break; |
957 | } | |
958 | } | |
c629a4bc | 959 | exit_request = 0; |
16400322 | 960 | return !all_cpu_threads_idle(); |
296af7c9 BS |
961 | } |
962 | ||
963 | void set_numa_modes(void) | |
964 | { | |
965 | CPUState *env; | |
966 | int i; | |
967 | ||
968 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
969 | for (i = 0; i < nb_numa_nodes; i++) { | |
970 | if (node_cpumask[i] & (1 << env->cpu_index)) { | |
971 | env->numa_node = i; | |
972 | } | |
973 | } | |
974 | } | |
975 | } | |
976 | ||
977 | void set_cpu_log(const char *optarg) | |
978 | { | |
979 | int mask; | |
980 | const CPULogItem *item; | |
981 | ||
982 | mask = cpu_str_to_log_mask(optarg); | |
983 | if (!mask) { | |
984 | printf("Log items (comma separated):\n"); | |
985 | for (item = cpu_log_items; item->mask != 0; item++) { | |
986 | printf("%-10s %s\n", item->name, item->help); | |
987 | } | |
988 | exit(1); | |
989 | } | |
990 | cpu_set_log(mask); | |
991 | } | |
29e922b6 | 992 | |
c235d738 MF |
993 | void set_cpu_log_filename(const char *optarg) |
994 | { | |
995 | cpu_set_log_filename(optarg); | |
996 | } | |
997 | ||
29e922b6 BS |
998 | /* Return the virtual CPU time, based on the instruction counter. */ |
999 | int64_t cpu_get_icount(void) | |
1000 | { | |
1001 | int64_t icount; | |
1002 | CPUState *env = cpu_single_env;; | |
1003 | ||
1004 | icount = qemu_icount; | |
1005 | if (env) { | |
1006 | if (!can_do_io(env)) { | |
1007 | fprintf(stderr, "Bad clock read\n"); | |
1008 | } | |
1009 | icount -= (env->icount_decr.u16.low + env->icount_extra); | |
1010 | } | |
1011 | return qemu_icount_bias + (icount << icount_time_shift); | |
1012 | } | |
262353cb | 1013 | |
9a78eead | 1014 | void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) |
262353cb BS |
1015 | { |
1016 | /* XXX: implement xxx_cpu_list for targets that still miss it */ | |
1017 | #if defined(cpu_list_id) | |
1018 | cpu_list_id(f, cpu_fprintf, optarg); | |
1019 | #elif defined(cpu_list) | |
1020 | cpu_list(f, cpu_fprintf); /* deprecated */ | |
1021 | #endif | |
1022 | } |