return ti;
}
-/* return the offset between the host clock and virtual CPU clock */
-int64_t cpu_get_clock_offset(void)
-{
- int64_t ti;
- unsigned start;
-
- do {
- start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
- ti = timers_state.cpu_clock_offset;
- if (!timers_state.cpu_ticks_enabled) {
- ti -= get_clock();
- }
- } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
-
- return -ti;
-}
-
/* enable cpu_get_ticks()
* Caller must hold BQL which server as mutex for vm_clock_seqlock.
*/
void qtest_clock_warp(int64_t dest)
{
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ AioContext *aio_context;
assert(qtest_enabled());
+ aio_context = qemu_get_aio_context();
while (clock < dest) {
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
int64_t warp = qemu_soonest_timeout(dest - clock, deadline);
+
seqlock_write_lock(&timers_state.vm_clock_seqlock);
timers_state.qemu_icount_bias += warp;
seqlock_write_unlock(&timers_state.vm_clock_seqlock);
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
+ timerlist_run_timers(aio_context->tlg.tl[QEMU_CLOCK_VIRTUAL]);
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
static QemuMutex qemu_global_mutex;
static QemuCond qemu_io_proceeded_cond;
-static bool iothread_requesting_mutex;
+static unsigned iothread_requesting_mutex;
static QemuThread io_thread;
qemu_cond_signal(&qemu_cpu_cond);
/* wait for initial kick-off after machine start */
- while (QTAILQ_FIRST(&cpus)->stopped) {
+ while (first_cpu->stopped) {
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
/* process any pending work */
}
}
+ /* process any pending work */
+ exit_request = 1;
+
while (1) {
tcg_exec_all();
return qemu_thread_is_self(cpu->thread);
}
-static bool qemu_in_vcpu_thread(void)
+bool qemu_in_vcpu_thread(void)
{
return current_cpu && qemu_cpu_is_self(current_cpu);
}
void qemu_mutex_lock_iothread(void)
{
- if (!tcg_enabled()) {
+ atomic_inc(&iothread_requesting_mutex);
+ if (!tcg_enabled() || !first_cpu || !first_cpu->thread) {
qemu_mutex_lock(&qemu_global_mutex);
+ atomic_dec(&iothread_requesting_mutex);
} else {
- iothread_requesting_mutex = true;
if (qemu_mutex_trylock(&qemu_global_mutex)) {
qemu_cpu_kick_thread(first_cpu);
qemu_mutex_lock(&qemu_global_mutex);
}
- iothread_requesting_mutex = false;
+ atomic_dec(&iothread_requesting_mutex);
qemu_cond_broadcast(&qemu_io_proceeded_cond);
}
}
}
ret = cpu_exec(env);
#ifdef CONFIG_PROFILER
- qemu_time += profile_getclock() - ti;
+ tcg_time += profile_getclock() - ti;
#endif
if (use_icount) {
/* Fold pending instructions back into the
info->value->CPU = cpu->cpu_index;
info->value->current = (cpu == first_cpu);
info->value->halted = cpu->halted;
+ info->value->qom_path = object_get_canonical_path(OBJECT(cpu));
info->value->thread_id = cpu->thread_id;
#if defined(TARGET_I386)
info->value->has_pc = true;
uint32_t l;
CPUState *cpu;
uint8_t buf[1024];
+ int64_t orig_addr = addr, orig_size = size;
if (!has_cpu) {
cpu_index = 0;
if (l > size)
l = size;
if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
- error_setg(errp, "Invalid addr 0x%016" PRIx64 "specified", addr);
+ error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
+ " specified", orig_addr, orig_size);
goto exit;
}
if (fwrite(buf, 1, l, f) != l) {