#include <sys/param.h>
#endif
-#ifdef __linux__
-#include <sys/ioctl.h>
-#include <linux/rtc.h>
-/* For the benefit of older linux systems which don't supply it,
- we use a local copy of hpet.h. */
-/* #include <linux/hpet.h> */
-#include "hpet.h"
-#endif
-
#ifdef _WIN32
#include <windows.h>
#include <mmsystem.h>
}
}
+#ifndef CONFIG_IOTHREAD
static int64_t qemu_icount_delta(void)
{
- if (use_icount == 1) {
+ if (!use_icount) {
+ return 5000 * (int64_t) 1000000;
+ } else if (use_icount == 1) {
/* When not using an adaptive execution frequency
we tend to get badly out of sync with real time,
so just delay for a reasonable amount of time. */
return cpu_get_icount() - cpu_get_clock();
}
}
+#endif
/* enable cpu_get_ticks() */
void cpu_enable_ticks(void)
struct QEMUClock {
int type;
int enabled;
- /* XXX: add frequency */
+
+ QEMUTimer *warp_timer;
+
+ NotifierList reset_notifiers;
+ int64_t last;
};
struct QEMUTimer {
QEMUClock *clock;
- int64_t expire_time;
+ int64_t expire_time; /* in nanoseconds */
+ int scale;
QEMUTimerCB *cb;
void *opaque;
struct QEMUTimer *next;
int (*start)(struct qemu_alarm_timer *t);
void (*stop)(struct qemu_alarm_timer *t);
void (*rearm)(struct qemu_alarm_timer *t);
- void *priv;
-
+#if defined(__linux__)
+ int fd;
+ timer_t timer;
+#elif defined(_WIN32)
+ HANDLE timer;
+#endif
char expired;
char pending;
};
static struct qemu_alarm_timer *alarm_timer;
+static bool qemu_timer_expired_ns(QEMUTimer *timer_head, int64_t current_time)
+{
+ return timer_head && (timer_head->expire_time <= current_time);
+}
+
int qemu_alarm_pending(void)
{
return alarm_timer->pending;
#ifdef _WIN32
+static int mm_start_timer(struct qemu_alarm_timer *t);
+static void mm_stop_timer(struct qemu_alarm_timer *t);
+static void mm_rearm_timer(struct qemu_alarm_timer *t);
+
static int win32_start_timer(struct qemu_alarm_timer *t);
static void win32_stop_timer(struct qemu_alarm_timer *t);
static void win32_rearm_timer(struct qemu_alarm_timer *t);
static int unix_start_timer(struct qemu_alarm_timer *t);
static void unix_stop_timer(struct qemu_alarm_timer *t);
+static void unix_rearm_timer(struct qemu_alarm_timer *t);
#ifdef __linux__
static void dynticks_stop_timer(struct qemu_alarm_timer *t);
static void dynticks_rearm_timer(struct qemu_alarm_timer *t);
-static int hpet_start_timer(struct qemu_alarm_timer *t);
-static void hpet_stop_timer(struct qemu_alarm_timer *t);
-
-static int rtc_start_timer(struct qemu_alarm_timer *t);
-static void rtc_stop_timer(struct qemu_alarm_timer *t);
-
#endif /* __linux__ */
#endif /* _WIN32 */
#ifndef _WIN32
#ifdef __linux__
{"dynticks", dynticks_start_timer,
- dynticks_stop_timer, dynticks_rearm_timer, NULL},
- /* HPET - if available - is preferred */
- {"hpet", hpet_start_timer, hpet_stop_timer, NULL, NULL},
- /* ...otherwise try RTC */
- {"rtc", rtc_start_timer, rtc_stop_timer, NULL, NULL},
+ dynticks_stop_timer, dynticks_rearm_timer},
#endif
- {"unix", unix_start_timer, unix_stop_timer, NULL, NULL},
+ {"unix", unix_start_timer, unix_stop_timer, unix_rearm_timer},
#else
- {"dynticks", win32_start_timer,
- win32_stop_timer, win32_rearm_timer, NULL},
- {"win32", win32_start_timer,
- win32_stop_timer, NULL, NULL},
+ {"mmtimer", mm_start_timer, mm_stop_timer, NULL},
+ {"mmtimer2", mm_start_timer, mm_stop_timer, mm_rearm_timer},
+ {"dynticks", win32_start_timer, win32_stop_timer, win32_rearm_timer},
+ {"win32", win32_start_timer, win32_stop_timer, NULL},
#endif
{NULL, }
};
static QEMUClock *qemu_new_clock(int type)
{
QEMUClock *clock;
+
clock = qemu_mallocz(sizeof(QEMUClock));
clock->type = type;
clock->enabled = 1;
+ notifier_list_init(&clock->reset_notifiers);
+ /* required to detect & report backward jumps */
+ if (type == QEMU_CLOCK_HOST) {
+ clock->last = get_clock_realtime();
+ }
return clock;
}
clock->enabled = enabled;
}
-QEMUTimer *qemu_new_timer(QEMUClock *clock, QEMUTimerCB *cb, void *opaque)
+static int64_t vm_clock_warp_start;
+
+static void icount_warp_rt(void *opaque)
+{
+ if (vm_clock_warp_start == -1) {
+ return;
+ }
+
+ if (vm_running) {
+ int64_t clock = qemu_get_clock_ns(rt_clock);
+ int64_t warp_delta = clock - vm_clock_warp_start;
+ if (use_icount == 1) {
+ qemu_icount_bias += warp_delta;
+ } else {
+ /*
+ * In adaptive mode, do not let the vm_clock run too
+ * far ahead of real time.
+ */
+ int64_t cur_time = cpu_get_clock();
+ int64_t cur_icount = qemu_get_clock_ns(vm_clock);
+ int64_t delta = cur_time - cur_icount;
+ qemu_icount_bias += MIN(warp_delta, delta);
+ }
+ if (qemu_timer_expired(active_timers[QEMU_CLOCK_VIRTUAL],
+ qemu_get_clock_ns(vm_clock))) {
+ qemu_notify_event();
+ }
+ }
+ vm_clock_warp_start = -1;
+}
+
+void qemu_clock_warp(QEMUClock *clock)
+{
+ int64_t deadline;
+
+ if (!clock->warp_timer) {
+ return;
+ }
+
+ /*
+ * There are too many global variables to make the "warp" behavior
+ * applicable to other clocks. But a clock argument removes the
+ * need for if statements all over the place.
+ */
+ assert(clock == vm_clock);
+
+ /*
+ * If the CPUs have been sleeping, advance the vm_clock timer now. This
+ * ensures that the deadline for the timer is computed correctly below.
+ * This also makes sure that the insn counter is synchronized before the
+ * CPU starts running, in case the CPU is woken by an event other than
+ * the earliest vm_clock timer.
+ */
+ icount_warp_rt(NULL);
+ if (!all_cpu_threads_idle() || !active_timers[clock->type]) {
+ qemu_del_timer(clock->warp_timer);
+ return;
+ }
+
+ vm_clock_warp_start = qemu_get_clock_ns(rt_clock);
+ deadline = qemu_next_icount_deadline();
+ if (deadline > 0) {
+ /*
+ * Ensure the vm_clock proceeds even when the virtual CPU goes to
+ * sleep. Otherwise, the CPU might be waiting for a future timer
+ * interrupt to wake it up, but the interrupt never comes because
+ * the vCPU isn't running any insns and thus doesn't advance the
+ * vm_clock.
+ *
+ * An extreme solution for this problem would be to never let VCPUs
+ * sleep in icount mode if there is a pending vm_clock timer; rather
+ * time could just advance to the next vm_clock event. Instead, we
+ * do stop VCPUs and only advance vm_clock after some "real" time,
+ * (related to the time left until the next event) has passed. This
+ * rt_clock timer will do this. This avoids that the warps are too
+ * visible externally---for example, you will not be sending network
+ * packets continously instead of every 100ms.
+ */
+ qemu_mod_timer(clock->warp_timer, vm_clock_warp_start + deadline);
+ } else {
+ qemu_notify_event();
+ }
+}
+
+QEMUTimer *qemu_new_timer(QEMUClock *clock, int scale,
+ QEMUTimerCB *cb, void *opaque)
{
QEMUTimer *ts;
ts->clock = clock;
ts->cb = cb;
ts->opaque = opaque;
+ ts->scale = scale;
return ts;
}
/* modify the current timer so that it will be fired when current_time
>= expire_time. The corresponding callback will be called. */
-void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
+static void qemu_mod_timer_ns(QEMUTimer *ts, int64_t expire_time)
{
QEMUTimer **pt, *t;
pt = &active_timers[ts->clock->type];
for(;;) {
t = *pt;
- if (!t)
- break;
- if (t->expire_time > expire_time)
+ if (!qemu_timer_expired_ns(t, expire_time)) {
break;
+ }
pt = &t->next;
}
ts->expire_time = expire_time;
qemu_rearm_alarm_timer(alarm_timer);
}
/* Interrupt execution to force deadline recalculation. */
- if (use_icount)
+ qemu_clock_warp(ts->clock);
+ if (use_icount) {
qemu_notify_event();
+ }
}
}
+/* modify the current timer so that it will be fired when current_time
+ >= expire_time. The corresponding callback will be called. */
+void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
+{
+ qemu_mod_timer_ns(ts, expire_time * ts->scale);
+}
+
int qemu_timer_pending(QEMUTimer *ts)
{
QEMUTimer *t;
int qemu_timer_expired(QEMUTimer *timer_head, int64_t current_time)
{
- if (!timer_head)
- return 0;
- return (timer_head->expire_time <= current_time);
+ return qemu_timer_expired_ns(timer_head, current_time * timer_head->scale);
}
static void qemu_run_timers(QEMUClock *clock)
if (!clock->enabled)
return;
- current_time = qemu_get_clock (clock);
+ current_time = qemu_get_clock_ns(clock);
ptimer_head = &active_timers[clock->type];
for(;;) {
ts = *ptimer_head;
- if (!ts || ts->expire_time > current_time)
+ if (!qemu_timer_expired_ns(ts, current_time)) {
break;
+ }
/* remove timer from the list before calling the callback */
*ptimer_head = ts->next;
ts->next = NULL;
}
}
-int64_t qemu_get_clock(QEMUClock *clock)
+int64_t qemu_get_clock_ns(QEMUClock *clock)
{
+ int64_t now, last;
+
switch(clock->type) {
case QEMU_CLOCK_REALTIME:
- return get_clock() / 1000000;
+ return get_clock();
default:
case QEMU_CLOCK_VIRTUAL:
if (use_icount) {
return cpu_get_clock();
}
case QEMU_CLOCK_HOST:
- return get_clock_realtime();
+ now = get_clock_realtime();
+ last = clock->last;
+ clock->last = now;
+ if (now < last) {
+ notifier_list_notify(&clock->reset_notifiers, &now);
+ }
+ return now;
}
}
-int64_t qemu_get_clock_ns(QEMUClock *clock)
+void qemu_register_clock_reset_notifier(QEMUClock *clock, Notifier *notifier)
{
- switch(clock->type) {
- case QEMU_CLOCK_REALTIME:
- return get_clock();
- default:
- case QEMU_CLOCK_VIRTUAL:
- if (use_icount) {
- return cpu_get_icount();
- } else {
- return cpu_get_clock();
- }
- case QEMU_CLOCK_HOST:
- return get_clock_realtime();
- }
+ notifier_list_add(&clock->reset_notifiers, notifier);
+}
+
+void qemu_unregister_clock_reset_notifier(QEMUClock *clock, Notifier *notifier)
+{
+ notifier_list_remove(&clock->reset_notifiers, notifier);
}
void init_clocks(void)
expire_time = qemu_get_be64(f);
if (expire_time != -1) {
- qemu_mod_timer(ts, expire_time);
+ qemu_mod_timer_ns(ts, expire_time);
} else {
qemu_del_timer(ts);
}
if (!option)
return;
+#ifdef CONFIG_IOTHREAD
+ vm_clock->warp_timer = qemu_new_timer_ns(rt_clock, icount_warp_rt, NULL);
+#endif
+
if (strcmp(option, "auto") != 0) {
icount_time_shift = strtol(option, NULL, 0);
use_icount = 1;
}
}
-int64_t qemu_next_deadline(void)
+int64_t qemu_next_icount_deadline(void)
{
/* To avoid problems with overflow limit this to 2^32. */
int64_t delta = INT32_MAX;
+ assert(use_icount);
if (active_timers[QEMU_CLOCK_VIRTUAL]) {
delta = active_timers[QEMU_CLOCK_VIRTUAL]->expire_time -
qemu_get_clock_ns(vm_clock);
}
- if (active_timers[QEMU_CLOCK_HOST]) {
- int64_t hdelta = active_timers[QEMU_CLOCK_HOST]->expire_time -
- qemu_get_clock_ns(host_clock);
- if (hdelta < delta)
- delta = hdelta;
- }
if (delta < 0)
delta = 0;
delta = hdelta;
}
if (active_timers[QEMU_CLOCK_REALTIME]) {
- rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time * 1000000 -
+ rtdelta = (active_timers[QEMU_CLOCK_REALTIME]->expire_time -
qemu_get_clock_ns(rt_clock));
if (rtdelta < delta)
delta = rtdelta;
#if defined(__linux__)
-#define RTC_FREQ 1024
-
-static void enable_sigio_timer(int fd)
-{
- struct sigaction act;
-
- /* timer signal */
- sigfillset(&act.sa_mask);
- act.sa_flags = 0;
- act.sa_handler = host_alarm_handler;
-
- sigaction(SIGIO, &act, NULL);
- fcntl_setfl(fd, O_ASYNC);
- fcntl(fd, F_SETOWN, getpid());
-}
-
-static int hpet_start_timer(struct qemu_alarm_timer *t)
-{
- struct hpet_info info;
- int r, fd;
-
- fd = qemu_open("/dev/hpet", O_RDONLY);
- if (fd < 0)
- return -1;
-
- /* Set frequency */
- r = ioctl(fd, HPET_IRQFREQ, RTC_FREQ);
- if (r < 0) {
- fprintf(stderr, "Could not configure '/dev/hpet' to have a 1024Hz timer. This is not a fatal\n"
- "error, but for better emulation accuracy type:\n"
- "'echo 1024 > /proc/sys/dev/hpet/max-user-freq' as root.\n");
- goto fail;
- }
-
- /* Check capabilities */
- r = ioctl(fd, HPET_INFO, &info);
- if (r < 0)
- goto fail;
-
- /* Enable periodic mode */
- r = ioctl(fd, HPET_EPI, 0);
- if (info.hi_flags && (r < 0))
- goto fail;
-
- /* Enable interrupt */
- r = ioctl(fd, HPET_IE_ON, 0);
- if (r < 0)
- goto fail;
-
- enable_sigio_timer(fd);
- t->priv = (void *)(long)fd;
-
- return 0;
-fail:
- close(fd);
- return -1;
-}
-
-static void hpet_stop_timer(struct qemu_alarm_timer *t)
-{
- int fd = (long)t->priv;
-
- close(fd);
-}
-
-static int rtc_start_timer(struct qemu_alarm_timer *t)
-{
- int rtc_fd;
- unsigned long current_rtc_freq = 0;
-
- TFR(rtc_fd = qemu_open("/dev/rtc", O_RDONLY));
- if (rtc_fd < 0)
- return -1;
- ioctl(rtc_fd, RTC_IRQP_READ, ¤t_rtc_freq);
- if (current_rtc_freq != RTC_FREQ &&
- ioctl(rtc_fd, RTC_IRQP_SET, RTC_FREQ) < 0) {
- fprintf(stderr, "Could not configure '/dev/rtc' to have a 1024 Hz timer. This is not a fatal\n"
- "error, but for better emulation accuracy either use a 2.6 host Linux kernel or\n"
- "type 'echo 1024 > /proc/sys/dev/rtc/max-user-freq' as root.\n");
- goto fail;
- }
- if (ioctl(rtc_fd, RTC_PIE_ON, 0) < 0) {
- fail:
- close(rtc_fd);
- return -1;
- }
-
- enable_sigio_timer(rtc_fd);
-
- t->priv = (void *)(long)rtc_fd;
-
- return 0;
-}
-
-static void rtc_stop_timer(struct qemu_alarm_timer *t)
-{
- int rtc_fd = (long)t->priv;
-
- close(rtc_fd);
-}
+#include "compatfd.h"
static int dynticks_start_timer(struct qemu_alarm_timer *t)
{
memset(&ev, 0, sizeof(ev));
ev.sigev_value.sival_int = 0;
ev.sigev_notify = SIGEV_SIGNAL;
+#ifdef SIGEV_THREAD_ID
+ if (qemu_signalfd_available()) {
+ ev.sigev_notify = SIGEV_THREAD_ID;
+ ev._sigev_un._tid = qemu_get_thread_id();
+ }
+#endif /* SIGEV_THREAD_ID */
ev.sigev_signo = SIGALRM;
if (timer_create(CLOCK_REALTIME, &ev, &host_timer)) {
return -1;
}
- t->priv = (void *)(long)host_timer;
+ t->timer = host_timer;
return 0;
}
static void dynticks_stop_timer(struct qemu_alarm_timer *t)
{
- timer_t host_timer = (timer_t)(long)t->priv;
+ timer_t host_timer = t->timer;
timer_delete(host_timer);
}
static void dynticks_rearm_timer(struct qemu_alarm_timer *t)
{
- timer_t host_timer = (timer_t)(long)t->priv;
+ timer_t host_timer = t->timer;
struct itimerspec timeout;
int64_t nearest_delta_ns = INT64_MAX;
int64_t current_ns;
static int unix_start_timer(struct qemu_alarm_timer *t)
{
struct sigaction act;
- struct itimerval itv;
- int err;
/* timer signal */
sigfillset(&act.sa_mask);
act.sa_handler = host_alarm_handler;
sigaction(SIGALRM, &act, NULL);
+ return 0;
+}
- itv.it_interval.tv_sec = 0;
- /* for i386 kernel 2.6 to get 1 ms */
- itv.it_interval.tv_usec = 999;
- itv.it_value.tv_sec = 0;
- itv.it_value.tv_usec = 10 * 1000;
+static void unix_rearm_timer(struct qemu_alarm_timer *t)
+{
+ struct itimerval itv;
+ int64_t nearest_delta_ns = INT64_MAX;
+ int err;
- err = setitimer(ITIMER_REAL, &itv, NULL);
- if (err)
- return -1;
+ assert(alarm_has_dynticks(t));
+ if (!active_timers[QEMU_CLOCK_REALTIME] &&
+ !active_timers[QEMU_CLOCK_VIRTUAL] &&
+ !active_timers[QEMU_CLOCK_HOST])
+ return;
- return 0;
+ nearest_delta_ns = qemu_next_alarm_deadline();
+ if (nearest_delta_ns < MIN_TIMER_REARM_NS)
+ nearest_delta_ns = MIN_TIMER_REARM_NS;
+
+ itv.it_interval.tv_sec = 0;
+ itv.it_interval.tv_usec = 0; /* 0 for one-shot timer */
+ itv.it_value.tv_sec = nearest_delta_ns / 1000000000;
+ itv.it_value.tv_usec = (nearest_delta_ns % 1000000000) / 1000;
+ err = setitimer(ITIMER_REAL, &itv, NULL);
+ if (err) {
+ perror("setitimer");
+ fprintf(stderr, "Internal timer error: aborting\n");
+ exit(1);
+ }
}
static void unix_stop_timer(struct qemu_alarm_timer *t)
#ifdef _WIN32
+static MMRESULT mm_timer;
+static unsigned mm_period;
+
+static void CALLBACK mm_alarm_handler(UINT uTimerID, UINT uMsg,
+ DWORD_PTR dwUser, DWORD_PTR dw1,
+ DWORD_PTR dw2)
+{
+ struct qemu_alarm_timer *t = alarm_timer;
+ if (!t) {
+ return;
+ }
+ if (alarm_has_dynticks(t) || qemu_next_alarm_deadline() <= 0) {
+ t->expired = alarm_has_dynticks(t);
+ t->pending = 1;
+ qemu_notify_event();
+ }
+}
+
+static int mm_start_timer(struct qemu_alarm_timer *t)
+{
+ TIMECAPS tc;
+ UINT flags;
+
+ memset(&tc, 0, sizeof(tc));
+ timeGetDevCaps(&tc, sizeof(tc));
+
+ mm_period = tc.wPeriodMin;
+ timeBeginPeriod(mm_period);
+
+ flags = TIME_CALLBACK_FUNCTION;
+ if (alarm_has_dynticks(t)) {
+ flags |= TIME_ONESHOT;
+ } else {
+ flags |= TIME_PERIODIC;
+ }
+
+ mm_timer = timeSetEvent(1, /* interval (ms) */
+ mm_period, /* resolution */
+ mm_alarm_handler, /* function */
+ (DWORD_PTR)t, /* parameter */
+ flags);
+
+ if (!mm_timer) {
+ fprintf(stderr, "Failed to initialize win32 alarm timer: %ld\n",
+ GetLastError());
+ timeEndPeriod(mm_period);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mm_stop_timer(struct qemu_alarm_timer *t)
+{
+ timeKillEvent(mm_timer);
+ timeEndPeriod(mm_period);
+}
+
+static void mm_rearm_timer(struct qemu_alarm_timer *t)
+{
+ int nearest_delta_ms;
+
+ assert(alarm_has_dynticks(t));
+ if (!active_timers[QEMU_CLOCK_REALTIME] &&
+ !active_timers[QEMU_CLOCK_VIRTUAL] &&
+ !active_timers[QEMU_CLOCK_HOST]) {
+ return;
+ }
+
+ timeKillEvent(mm_timer);
+
+ nearest_delta_ms = (qemu_next_alarm_deadline() + 999999) / 1000000;
+ if (nearest_delta_ms < 1) {
+ nearest_delta_ms = 1;
+ }
+ mm_timer = timeSetEvent(nearest_delta_ms,
+ mm_period,
+ mm_alarm_handler,
+ (DWORD_PTR)t,
+ TIME_ONESHOT | TIME_CALLBACK_FUNCTION);
+
+ if (!mm_timer) {
+ fprintf(stderr, "Failed to re-arm win32 alarm timer %ld\n",
+ GetLastError());
+
+ timeEndPeriod(mm_period);
+ exit(1);
+ }
+}
+
static int win32_start_timer(struct qemu_alarm_timer *t)
{
HANDLE hTimer;
return -1;
}
- t->priv = (PVOID) hTimer;
+ t->timer = hTimer;
return 0;
}
static void win32_stop_timer(struct qemu_alarm_timer *t)
{
- HANDLE hTimer = t->priv;
+ HANDLE hTimer = t->timer;
if (hTimer) {
DeleteTimerQueueTimer(NULL, hTimer, NULL);
static void win32_rearm_timer(struct qemu_alarm_timer *t)
{
- HANDLE hTimer = t->priv;
+ HANDLE hTimer = t->timer;
int nearest_delta_ms;
BOOLEAN success;
int qemu_calculate_timeout(void)
{
+#ifndef CONFIG_IOTHREAD
int timeout;
- int64_t add;
- int64_t delta;
- /* When using icount, making forward progress with qemu_icount when the
- guest CPU is idle is critical. We only use the static io-thread timeout
- for non icount runs. */
- if (!use_icount || !vm_running) {
- return 5000;
- }
-
- /* Advance virtual time to the next event. */
- delta = qemu_icount_delta();
- if (delta > 0) {
- /* If virtual time is ahead of real time then just
- wait for IO. */
- timeout = (delta + 999999) / 1000000;
- } else {
- /* Wait for either IO to occur or the next
- timer event. */
- add = qemu_next_deadline();
- /* We advance the timer before checking for IO.
- Limit the amount we advance so that early IO
- activity won't get the guest too far ahead. */
- if (add > 10000000)
- add = 10000000;
- delta += add;
- qemu_icount += qemu_icount_round (add);
- timeout = delta / 1000000;
- if (timeout < 0)
- timeout = 0;
+ if (!vm_running)
+ timeout = 5000;
+ else {
+ /* XXX: use timeout computed from timers */
+ int64_t add;
+ int64_t delta;
+ /* Advance virtual time to the next event. */
+ delta = qemu_icount_delta();
+ if (delta > 0) {
+ /* If virtual time is ahead of real time then just
+ wait for IO. */
+ timeout = (delta + 999999) / 1000000;
+ } else {
+ /* Wait for either IO to occur or the next
+ timer event. */
+ add = qemu_next_icount_deadline();
+ /* We advance the timer before checking for IO.
+ Limit the amount we advance so that early IO
+ activity won't get the guest too far ahead. */
+ if (add > 10000000)
+ add = 10000000;
+ delta += add;
+ qemu_icount += qemu_icount_round (add);
+ timeout = delta / 1000000;
+ if (timeout < 0)
+ timeout = 0;
+ }
}
return timeout;
+#else /* CONFIG_IOTHREAD */
+ return 1000;
+#endif
}