]>
Commit | Line | Data |
---|---|---|
5fafdf24 | 1 | /* |
423f0742 PB |
2 | * General purpose implementation of a simple periodic countdown timer. |
3 | * | |
4 | * Copyright (c) 2007 CodeSourcery. | |
5 | * | |
8e31bf38 | 6 | * This code is licensed under the GNU LGPL. |
423f0742 | 7 | */ |
83c9f4ca | 8 | #include "hw/hw.h" |
1de7afc9 | 9 | #include "qemu/timer.h" |
83c9f4ca | 10 | #include "hw/ptimer.h" |
1de7afc9 | 11 | #include "qemu/host-utils.h" |
423f0742 PB |
12 | |
13 | struct ptimer_state | |
14 | { | |
852f771e | 15 | uint8_t enabled; /* 0 = disabled, 1 = periodic, 2 = oneshot. */ |
8d05ea8a BS |
16 | uint64_t limit; |
17 | uint64_t delta; | |
423f0742 PB |
18 | uint32_t period_frac; |
19 | int64_t period; | |
20 | int64_t last_event; | |
21 | int64_t next_event; | |
22 | QEMUBH *bh; | |
23 | QEMUTimer *timer; | |
24 | }; | |
25 | ||
26 | /* Use a bottom-half routine to avoid reentrancy issues. */ | |
27 | static void ptimer_trigger(ptimer_state *s) | |
28 | { | |
29 | if (s->bh) { | |
30 | qemu_bh_schedule(s->bh); | |
31 | } | |
32 | } | |
33 | ||
34 | static void ptimer_reload(ptimer_state *s) | |
35 | { | |
36 | if (s->delta == 0) { | |
37 | ptimer_trigger(s); | |
38 | s->delta = s->limit; | |
39 | } | |
40 | if (s->delta == 0 || s->period == 0) { | |
41 | fprintf(stderr, "Timer with period zero, disabling\n"); | |
42 | s->enabled = 0; | |
43 | return; | |
44 | } | |
45 | ||
46 | s->last_event = s->next_event; | |
47 | s->next_event = s->last_event + s->delta * s->period; | |
48 | if (s->period_frac) { | |
49 | s->next_event += ((int64_t)s->period_frac * s->delta) >> 32; | |
50 | } | |
51 | qemu_mod_timer(s->timer, s->next_event); | |
52 | } | |
53 | ||
54 | static void ptimer_tick(void *opaque) | |
55 | { | |
56 | ptimer_state *s = (ptimer_state *)opaque; | |
57 | ptimer_trigger(s); | |
58 | s->delta = 0; | |
59 | if (s->enabled == 2) { | |
60 | s->enabled = 0; | |
61 | } else { | |
62 | ptimer_reload(s); | |
63 | } | |
64 | } | |
65 | ||
8d05ea8a | 66 | uint64_t ptimer_get_count(ptimer_state *s) |
423f0742 PB |
67 | { |
68 | int64_t now; | |
8d05ea8a | 69 | uint64_t counter; |
423f0742 PB |
70 | |
71 | if (s->enabled) { | |
74475455 | 72 | now = qemu_get_clock_ns(vm_clock); |
423f0742 PB |
73 | /* Figure out the current counter value. */ |
74 | if (now - s->next_event > 0 | |
75 | || s->period == 0) { | |
76 | /* Prevent timer underflowing if it should already have | |
77 | triggered. */ | |
78 | counter = 0; | |
79 | } else { | |
8d05ea8a BS |
80 | uint64_t rem; |
81 | uint64_t div; | |
d0a981b2 PB |
82 | int clz1, clz2; |
83 | int shift; | |
84 | ||
85 | /* We need to divide time by period, where time is stored in | |
86 | rem (64-bit integer) and period is stored in period/period_frac | |
87 | (64.32 fixed point). | |
88 | ||
89 | Doing full precision division is hard, so scale values and | |
90 | do a 64-bit division. The result should be rounded down, | |
91 | so that the rounding error never causes the timer to go | |
92 | backwards. | |
93 | */ | |
423f0742 PB |
94 | |
95 | rem = s->next_event - now; | |
96 | div = s->period; | |
d0a981b2 PB |
97 | |
98 | clz1 = clz64(rem); | |
99 | clz2 = clz64(div); | |
100 | shift = clz1 < clz2 ? clz1 : clz2; | |
101 | ||
102 | rem <<= shift; | |
103 | div <<= shift; | |
104 | if (shift >= 32) { | |
105 | div |= ((uint64_t)s->period_frac << (shift - 32)); | |
106 | } else { | |
107 | if (shift != 0) | |
108 | div |= (s->period_frac >> (32 - shift)); | |
109 | /* Look at remaining bits of period_frac and round div up if | |
110 | necessary. */ | |
111 | if ((uint32_t)(s->period_frac << shift)) | |
112 | div += 1; | |
113 | } | |
423f0742 PB |
114 | counter = rem / div; |
115 | } | |
116 | } else { | |
117 | counter = s->delta; | |
118 | } | |
119 | return counter; | |
120 | } | |
121 | ||
8d05ea8a | 122 | void ptimer_set_count(ptimer_state *s, uint64_t count) |
423f0742 PB |
123 | { |
124 | s->delta = count; | |
125 | if (s->enabled) { | |
74475455 | 126 | s->next_event = qemu_get_clock_ns(vm_clock); |
423f0742 PB |
127 | ptimer_reload(s); |
128 | } | |
129 | } | |
130 | ||
131 | void ptimer_run(ptimer_state *s, int oneshot) | |
132 | { | |
98fc5614 PB |
133 | if (s->enabled) { |
134 | return; | |
135 | } | |
423f0742 PB |
136 | if (s->period == 0) { |
137 | fprintf(stderr, "Timer with period zero, disabling\n"); | |
138 | return; | |
139 | } | |
140 | s->enabled = oneshot ? 2 : 1; | |
74475455 | 141 | s->next_event = qemu_get_clock_ns(vm_clock); |
423f0742 PB |
142 | ptimer_reload(s); |
143 | } | |
144 | ||
8d05ea8a | 145 | /* Pause a timer. Note that this may cause it to "lose" time, even if it |
423f0742 PB |
146 | is immediately restarted. */ |
147 | void ptimer_stop(ptimer_state *s) | |
148 | { | |
149 | if (!s->enabled) | |
150 | return; | |
151 | ||
152 | s->delta = ptimer_get_count(s); | |
153 | qemu_del_timer(s->timer); | |
154 | s->enabled = 0; | |
155 | } | |
156 | ||
157 | /* Set counter increment interval in nanoseconds. */ | |
158 | void ptimer_set_period(ptimer_state *s, int64_t period) | |
159 | { | |
423f0742 PB |
160 | s->period = period; |
161 | s->period_frac = 0; | |
8d05ea8a | 162 | if (s->enabled) { |
74475455 | 163 | s->next_event = qemu_get_clock_ns(vm_clock); |
8d05ea8a BS |
164 | ptimer_reload(s); |
165 | } | |
423f0742 PB |
166 | } |
167 | ||
168 | /* Set counter frequency in Hz. */ | |
169 | void ptimer_set_freq(ptimer_state *s, uint32_t freq) | |
170 | { | |
423f0742 PB |
171 | s->period = 1000000000ll / freq; |
172 | s->period_frac = (1000000000ll << 32) / freq; | |
8d05ea8a | 173 | if (s->enabled) { |
74475455 | 174 | s->next_event = qemu_get_clock_ns(vm_clock); |
8d05ea8a BS |
175 | ptimer_reload(s); |
176 | } | |
423f0742 PB |
177 | } |
178 | ||
179 | /* Set the initial countdown value. If reload is nonzero then also set | |
180 | count = limit. */ | |
8d05ea8a | 181 | void ptimer_set_limit(ptimer_state *s, uint64_t limit, int reload) |
423f0742 | 182 | { |
cf36b31d PC |
183 | /* |
184 | * Artificially limit timeout rate to something | |
185 | * achievable under QEMU. Otherwise, QEMU spends all | |
186 | * its time generating timer interrupts, and there | |
187 | * is no forward progress. | |
188 | * About ten microseconds is the fastest that really works | |
189 | * on the current generation of host machines. | |
190 | */ | |
191 | ||
192 | if (limit * s->period < 10000 && s->period) { | |
193 | limit = 10000 / s->period; | |
194 | } | |
195 | ||
423f0742 PB |
196 | s->limit = limit; |
197 | if (reload) | |
198 | s->delta = limit; | |
62ea5b0b | 199 | if (s->enabled && reload) { |
74475455 | 200 | s->next_event = qemu_get_clock_ns(vm_clock); |
8d05ea8a BS |
201 | ptimer_reload(s); |
202 | } | |
203 | } | |
204 | ||
852f771e | 205 | const VMStateDescription vmstate_ptimer = { |
55a6e51f | 206 | .name = "ptimer", |
852f771e JQ |
207 | .version_id = 1, |
208 | .minimum_version_id = 1, | |
209 | .minimum_version_id_old = 1, | |
210 | .fields = (VMStateField[]) { | |
211 | VMSTATE_UINT8(enabled, ptimer_state), | |
212 | VMSTATE_UINT64(limit, ptimer_state), | |
213 | VMSTATE_UINT64(delta, ptimer_state), | |
214 | VMSTATE_UINT32(period_frac, ptimer_state), | |
215 | VMSTATE_INT64(period, ptimer_state), | |
216 | VMSTATE_INT64(last_event, ptimer_state), | |
217 | VMSTATE_INT64(next_event, ptimer_state), | |
218 | VMSTATE_TIMER(timer, ptimer_state), | |
219 | VMSTATE_END_OF_LIST() | |
220 | } | |
55a6e51f BS |
221 | }; |
222 | ||
423f0742 PB |
223 | ptimer_state *ptimer_init(QEMUBH *bh) |
224 | { | |
225 | ptimer_state *s; | |
226 | ||
7267c094 | 227 | s = (ptimer_state *)g_malloc0(sizeof(ptimer_state)); |
423f0742 | 228 | s->bh = bh; |
74475455 | 229 | s->timer = qemu_new_timer_ns(vm_clock, ptimer_tick, s); |
423f0742 PB |
230 | return s; |
231 | } |