]>
Commit | Line | Data |
---|---|---|
7837699f SY |
1 | /* |
2 | * 8253/8254 interval timer emulation | |
3 | * | |
4 | * Copyright (c) 2003-2004 Fabrice Bellard | |
5 | * Copyright (c) 2006 Intel Corporation | |
6 | * Copyright (c) 2007 Keir Fraser, XenSource Inc | |
7 | * Copyright (c) 2008 Intel Corporation | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
10 | * of this software and associated documentation files (the "Software"), to deal | |
11 | * in the Software without restriction, including without limitation the rights | |
12 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
13 | * copies of the Software, and to permit persons to whom the Software is | |
14 | * furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice shall be included in | |
17 | * all copies or substantial portions of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
24 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
25 | * THE SOFTWARE. | |
26 | * | |
27 | * Authors: | |
28 | * Sheng Yang <[email protected]> | |
29 | * Based on QEMU and Xen. | |
30 | */ | |
31 | ||
32 | #include <linux/kvm_host.h> | |
33 | ||
34 | #include "irq.h" | |
35 | #include "i8254.h" | |
36 | ||
37 | #ifndef CONFIG_X86_64 | |
6f6d6a1a | 38 | #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) |
7837699f SY |
39 | #else |
40 | #define mod_64(x, y) ((x) % (y)) | |
41 | #endif | |
42 | ||
43 | #define RW_STATE_LSB 1 | |
44 | #define RW_STATE_MSB 2 | |
45 | #define RW_STATE_WORD0 3 | |
46 | #define RW_STATE_WORD1 4 | |
47 | ||
48 | /* Compute with 96 bit intermediate result: (a*b)/c */ | |
49 | static u64 muldiv64(u64 a, u32 b, u32 c) | |
50 | { | |
51 | union { | |
52 | u64 ll; | |
53 | struct { | |
54 | u32 low, high; | |
55 | } l; | |
56 | } u, res; | |
57 | u64 rl, rh; | |
58 | ||
59 | u.ll = a; | |
60 | rl = (u64)u.l.low * (u64)b; | |
61 | rh = (u64)u.l.high * (u64)b; | |
62 | rh += (rl >> 32); | |
6f6d6a1a RZ |
63 | res.l.high = div64_u64(rh, c); |
64 | res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); | |
7837699f SY |
65 | return res.ll; |
66 | } | |
67 | ||
68 | static void pit_set_gate(struct kvm *kvm, int channel, u32 val) | |
69 | { | |
70 | struct kvm_kpit_channel_state *c = | |
71 | &kvm->arch.vpit->pit_state.channels[channel]; | |
72 | ||
73 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
74 | ||
75 | switch (c->mode) { | |
76 | default: | |
77 | case 0: | |
78 | case 4: | |
79 | /* XXX: just disable/enable counting */ | |
80 | break; | |
81 | case 1: | |
82 | case 2: | |
83 | case 3: | |
84 | case 5: | |
85 | /* Restart counting on rising edge. */ | |
86 | if (c->gate < val) | |
87 | c->count_load_time = ktime_get(); | |
88 | break; | |
89 | } | |
90 | ||
91 | c->gate = val; | |
92 | } | |
93 | ||
8b2cf73c | 94 | static int pit_get_gate(struct kvm *kvm, int channel) |
7837699f SY |
95 | { |
96 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
97 | ||
98 | return kvm->arch.vpit->pit_state.channels[channel].gate; | |
99 | } | |
100 | ||
fd668423 MT |
101 | static s64 __kpit_elapsed(struct kvm *kvm) |
102 | { | |
103 | s64 elapsed; | |
104 | ktime_t remaining; | |
105 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | |
106 | ||
0ff77873 MT |
107 | if (!ps->pit_timer.period) |
108 | return 0; | |
109 | ||
ede2ccc5 MT |
110 | /* |
111 | * The Counter does not stop when it reaches zero. In | |
112 | * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to | |
113 | * the highest count, either FFFF hex for binary counting | |
114 | * or 9999 for BCD counting, and continues counting. | |
115 | * Modes 2 and 3 are periodic; the Counter reloads | |
116 | * itself with the initial count and continues counting | |
117 | * from there. | |
118 | */ | |
fd668423 | 119 | remaining = hrtimer_expires_remaining(&ps->pit_timer.timer); |
ede2ccc5 MT |
120 | elapsed = ps->pit_timer.period - ktime_to_ns(remaining); |
121 | elapsed = mod_64(elapsed, ps->pit_timer.period); | |
fd668423 MT |
122 | |
123 | return elapsed; | |
124 | } | |
125 | ||
126 | static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, | |
127 | int channel) | |
128 | { | |
129 | if (channel == 0) | |
130 | return __kpit_elapsed(kvm); | |
131 | ||
132 | return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); | |
133 | } | |
134 | ||
7837699f SY |
135 | static int pit_get_count(struct kvm *kvm, int channel) |
136 | { | |
137 | struct kvm_kpit_channel_state *c = | |
138 | &kvm->arch.vpit->pit_state.channels[channel]; | |
139 | s64 d, t; | |
140 | int counter; | |
141 | ||
142 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
143 | ||
fd668423 | 144 | t = kpit_elapsed(kvm, c, channel); |
7837699f SY |
145 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); |
146 | ||
147 | switch (c->mode) { | |
148 | case 0: | |
149 | case 1: | |
150 | case 4: | |
151 | case 5: | |
152 | counter = (c->count - d) & 0xffff; | |
153 | break; | |
154 | case 3: | |
155 | /* XXX: may be incorrect for odd counts */ | |
156 | counter = c->count - (mod_64((2 * d), c->count)); | |
157 | break; | |
158 | default: | |
159 | counter = c->count - mod_64(d, c->count); | |
160 | break; | |
161 | } | |
162 | return counter; | |
163 | } | |
164 | ||
165 | static int pit_get_out(struct kvm *kvm, int channel) | |
166 | { | |
167 | struct kvm_kpit_channel_state *c = | |
168 | &kvm->arch.vpit->pit_state.channels[channel]; | |
169 | s64 d, t; | |
170 | int out; | |
171 | ||
172 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
173 | ||
fd668423 | 174 | t = kpit_elapsed(kvm, c, channel); |
7837699f SY |
175 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); |
176 | ||
177 | switch (c->mode) { | |
178 | default: | |
179 | case 0: | |
180 | out = (d >= c->count); | |
181 | break; | |
182 | case 1: | |
183 | out = (d < c->count); | |
184 | break; | |
185 | case 2: | |
186 | out = ((mod_64(d, c->count) == 0) && (d != 0)); | |
187 | break; | |
188 | case 3: | |
189 | out = (mod_64(d, c->count) < ((c->count + 1) >> 1)); | |
190 | break; | |
191 | case 4: | |
192 | case 5: | |
193 | out = (d == c->count); | |
194 | break; | |
195 | } | |
196 | ||
197 | return out; | |
198 | } | |
199 | ||
200 | static void pit_latch_count(struct kvm *kvm, int channel) | |
201 | { | |
202 | struct kvm_kpit_channel_state *c = | |
203 | &kvm->arch.vpit->pit_state.channels[channel]; | |
204 | ||
205 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
206 | ||
207 | if (!c->count_latched) { | |
208 | c->latched_count = pit_get_count(kvm, channel); | |
209 | c->count_latched = c->rw_mode; | |
210 | } | |
211 | } | |
212 | ||
213 | static void pit_latch_status(struct kvm *kvm, int channel) | |
214 | { | |
215 | struct kvm_kpit_channel_state *c = | |
216 | &kvm->arch.vpit->pit_state.channels[channel]; | |
217 | ||
218 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
219 | ||
220 | if (!c->status_latched) { | |
221 | /* TODO: Return NULL COUNT (bit 6). */ | |
222 | c->status = ((pit_get_out(kvm, channel) << 7) | | |
223 | (c->rw_mode << 4) | | |
224 | (c->mode << 1) | | |
225 | c->bcd); | |
226 | c->status_latched = 1; | |
227 | } | |
228 | } | |
229 | ||
3d80840d MT |
230 | int pit_has_pending_timer(struct kvm_vcpu *vcpu) |
231 | { | |
232 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | |
233 | ||
c5af89b6 | 234 | if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack) |
3d80840d | 235 | return atomic_read(&pit->pit_state.pit_timer.pending); |
3d80840d MT |
236 | return 0; |
237 | } | |
238 | ||
ee032c99 | 239 | static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) |
3cf57fed MT |
240 | { |
241 | struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, | |
242 | irq_ack_notifier); | |
243 | spin_lock(&ps->inject_lock); | |
244 | if (atomic_dec_return(&ps->pit_timer.pending) < 0) | |
dc7404ce | 245 | atomic_inc(&ps->pit_timer.pending); |
3cf57fed MT |
246 | ps->irq_ack = 1; |
247 | spin_unlock(&ps->inject_lock); | |
248 | } | |
249 | ||
2f599714 MT |
250 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) |
251 | { | |
252 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | |
253 | struct hrtimer *timer; | |
254 | ||
c5af89b6 | 255 | if (!kvm_vcpu_is_bsp(vcpu) || !pit) |
2f599714 MT |
256 | return; |
257 | ||
258 | timer = &pit->pit_state.pit_timer.timer; | |
259 | if (hrtimer_cancel(timer)) | |
beb20d52 | 260 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS); |
2f599714 MT |
261 | } |
262 | ||
d3c7b77d | 263 | static void destroy_pit_timer(struct kvm_timer *pt) |
7837699f SY |
264 | { |
265 | pr_debug("pit: execute del timer!\n"); | |
266 | hrtimer_cancel(&pt->timer); | |
267 | } | |
268 | ||
d3c7b77d MT |
269 | static bool kpit_is_periodic(struct kvm_timer *ktimer) |
270 | { | |
271 | struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state, | |
272 | pit_timer); | |
273 | return ps->is_periodic; | |
274 | } | |
275 | ||
386eb6e8 | 276 | static struct kvm_timer_ops kpit_ops = { |
d3c7b77d MT |
277 | .is_periodic = kpit_is_periodic, |
278 | }; | |
279 | ||
3cf57fed | 280 | static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) |
7837699f | 281 | { |
d3c7b77d | 282 | struct kvm_timer *pt = &ps->pit_timer; |
7837699f SY |
283 | s64 interval; |
284 | ||
285 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); | |
286 | ||
287 | pr_debug("pit: create pit timer, interval is %llu nsec\n", interval); | |
288 | ||
289 | /* TODO The new value only affected after the retriggered */ | |
290 | hrtimer_cancel(&pt->timer); | |
ede2ccc5 | 291 | pt->period = interval; |
d3c7b77d MT |
292 | ps->is_periodic = is_period; |
293 | ||
294 | pt->timer.function = kvm_timer_fn; | |
295 | pt->t_ops = &kpit_ops; | |
296 | pt->kvm = ps->pit->kvm; | |
1ed0ce00 | 297 | pt->vcpu = pt->kvm->bsp_vcpu; |
d3c7b77d | 298 | |
7837699f | 299 | atomic_set(&pt->pending, 0); |
3cf57fed | 300 | ps->irq_ack = 1; |
7837699f SY |
301 | |
302 | hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval), | |
303 | HRTIMER_MODE_ABS); | |
304 | } | |
305 | ||
306 | static void pit_load_count(struct kvm *kvm, int channel, u32 val) | |
307 | { | |
308 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | |
309 | ||
310 | WARN_ON(!mutex_is_locked(&ps->lock)); | |
311 | ||
312 | pr_debug("pit: load_count val is %d, channel is %d\n", val, channel); | |
313 | ||
314 | /* | |
ede2ccc5 MT |
315 | * The largest possible initial count is 0; this is equivalent |
316 | * to 216 for binary counting and 104 for BCD counting. | |
7837699f SY |
317 | */ |
318 | if (val == 0) | |
319 | val = 0x10000; | |
320 | ||
7837699f SY |
321 | ps->channels[channel].count = val; |
322 | ||
fd668423 MT |
323 | if (channel != 0) { |
324 | ps->channels[channel].count_load_time = ktime_get(); | |
7837699f | 325 | return; |
fd668423 | 326 | } |
7837699f SY |
327 | |
328 | /* Two types of timer | |
329 | * mode 1 is one shot, mode 2 is period, otherwise del timer */ | |
330 | switch (ps->channels[0].mode) { | |
ede2ccc5 | 331 | case 0: |
7837699f | 332 | case 1: |
ece15bab MT |
333 | /* FIXME: enhance mode 4 precision */ |
334 | case 4: | |
3cf57fed | 335 | create_pit_timer(ps, val, 0); |
7837699f SY |
336 | break; |
337 | case 2: | |
f6975545 | 338 | case 3: |
3cf57fed | 339 | create_pit_timer(ps, val, 1); |
7837699f SY |
340 | break; |
341 | default: | |
342 | destroy_pit_timer(&ps->pit_timer); | |
343 | } | |
344 | } | |
345 | ||
e0f63cb9 SY |
346 | void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val) |
347 | { | |
e0f63cb9 | 348 | pit_load_count(kvm, channel, val); |
e0f63cb9 SY |
349 | } |
350 | ||
d76685c4 GH |
351 | static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev) |
352 | { | |
353 | return container_of(dev, struct kvm_pit, dev); | |
354 | } | |
355 | ||
356 | static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev) | |
357 | { | |
358 | return container_of(dev, struct kvm_pit, speaker_dev); | |
359 | } | |
360 | ||
bda9020e MT |
361 | static inline int pit_in_range(gpa_t addr) |
362 | { | |
363 | return ((addr >= KVM_PIT_BASE_ADDRESS) && | |
364 | (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); | |
365 | } | |
366 | ||
367 | static int pit_ioport_write(struct kvm_io_device *this, | |
368 | gpa_t addr, int len, const void *data) | |
7837699f | 369 | { |
d76685c4 | 370 | struct kvm_pit *pit = dev_to_pit(this); |
7837699f SY |
371 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
372 | struct kvm *kvm = pit->kvm; | |
373 | int channel, access; | |
374 | struct kvm_kpit_channel_state *s; | |
375 | u32 val = *(u32 *) data; | |
bda9020e MT |
376 | if (!pit_in_range(addr)) |
377 | return -EOPNOTSUPP; | |
7837699f SY |
378 | |
379 | val &= 0xff; | |
380 | addr &= KVM_PIT_CHANNEL_MASK; | |
381 | ||
382 | mutex_lock(&pit_state->lock); | |
383 | ||
384 | if (val != 0) | |
385 | pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n", | |
386 | (unsigned int)addr, len, val); | |
387 | ||
388 | if (addr == 3) { | |
389 | channel = val >> 6; | |
390 | if (channel == 3) { | |
391 | /* Read-Back Command. */ | |
392 | for (channel = 0; channel < 3; channel++) { | |
393 | s = &pit_state->channels[channel]; | |
394 | if (val & (2 << channel)) { | |
395 | if (!(val & 0x20)) | |
396 | pit_latch_count(kvm, channel); | |
397 | if (!(val & 0x10)) | |
398 | pit_latch_status(kvm, channel); | |
399 | } | |
400 | } | |
401 | } else { | |
402 | /* Select Counter <channel>. */ | |
403 | s = &pit_state->channels[channel]; | |
404 | access = (val >> 4) & KVM_PIT_CHANNEL_MASK; | |
405 | if (access == 0) { | |
406 | pit_latch_count(kvm, channel); | |
407 | } else { | |
408 | s->rw_mode = access; | |
409 | s->read_state = access; | |
410 | s->write_state = access; | |
411 | s->mode = (val >> 1) & 7; | |
412 | if (s->mode > 5) | |
413 | s->mode -= 4; | |
414 | s->bcd = val & 1; | |
415 | } | |
416 | } | |
417 | } else { | |
418 | /* Write Count. */ | |
419 | s = &pit_state->channels[addr]; | |
420 | switch (s->write_state) { | |
421 | default: | |
422 | case RW_STATE_LSB: | |
423 | pit_load_count(kvm, addr, val); | |
424 | break; | |
425 | case RW_STATE_MSB: | |
426 | pit_load_count(kvm, addr, val << 8); | |
427 | break; | |
428 | case RW_STATE_WORD0: | |
429 | s->write_latch = val; | |
430 | s->write_state = RW_STATE_WORD1; | |
431 | break; | |
432 | case RW_STATE_WORD1: | |
433 | pit_load_count(kvm, addr, s->write_latch | (val << 8)); | |
434 | s->write_state = RW_STATE_WORD0; | |
435 | break; | |
436 | } | |
437 | } | |
438 | ||
439 | mutex_unlock(&pit_state->lock); | |
bda9020e | 440 | return 0; |
7837699f SY |
441 | } |
442 | ||
bda9020e MT |
443 | static int pit_ioport_read(struct kvm_io_device *this, |
444 | gpa_t addr, int len, void *data) | |
7837699f | 445 | { |
d76685c4 | 446 | struct kvm_pit *pit = dev_to_pit(this); |
7837699f SY |
447 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
448 | struct kvm *kvm = pit->kvm; | |
449 | int ret, count; | |
450 | struct kvm_kpit_channel_state *s; | |
bda9020e MT |
451 | if (!pit_in_range(addr)) |
452 | return -EOPNOTSUPP; | |
7837699f SY |
453 | |
454 | addr &= KVM_PIT_CHANNEL_MASK; | |
455 | s = &pit_state->channels[addr]; | |
456 | ||
457 | mutex_lock(&pit_state->lock); | |
458 | ||
459 | if (s->status_latched) { | |
460 | s->status_latched = 0; | |
461 | ret = s->status; | |
462 | } else if (s->count_latched) { | |
463 | switch (s->count_latched) { | |
464 | default: | |
465 | case RW_STATE_LSB: | |
466 | ret = s->latched_count & 0xff; | |
467 | s->count_latched = 0; | |
468 | break; | |
469 | case RW_STATE_MSB: | |
470 | ret = s->latched_count >> 8; | |
471 | s->count_latched = 0; | |
472 | break; | |
473 | case RW_STATE_WORD0: | |
474 | ret = s->latched_count & 0xff; | |
475 | s->count_latched = RW_STATE_MSB; | |
476 | break; | |
477 | } | |
478 | } else { | |
479 | switch (s->read_state) { | |
480 | default: | |
481 | case RW_STATE_LSB: | |
482 | count = pit_get_count(kvm, addr); | |
483 | ret = count & 0xff; | |
484 | break; | |
485 | case RW_STATE_MSB: | |
486 | count = pit_get_count(kvm, addr); | |
487 | ret = (count >> 8) & 0xff; | |
488 | break; | |
489 | case RW_STATE_WORD0: | |
490 | count = pit_get_count(kvm, addr); | |
491 | ret = count & 0xff; | |
492 | s->read_state = RW_STATE_WORD1; | |
493 | break; | |
494 | case RW_STATE_WORD1: | |
495 | count = pit_get_count(kvm, addr); | |
496 | ret = (count >> 8) & 0xff; | |
497 | s->read_state = RW_STATE_WORD0; | |
498 | break; | |
499 | } | |
500 | } | |
501 | ||
502 | if (len > sizeof(ret)) | |
503 | len = sizeof(ret); | |
504 | memcpy(data, (char *)&ret, len); | |
505 | ||
506 | mutex_unlock(&pit_state->lock); | |
bda9020e | 507 | return 0; |
7837699f SY |
508 | } |
509 | ||
bda9020e MT |
510 | static int speaker_ioport_write(struct kvm_io_device *this, |
511 | gpa_t addr, int len, const void *data) | |
7837699f | 512 | { |
d76685c4 | 513 | struct kvm_pit *pit = speaker_to_pit(this); |
7837699f SY |
514 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
515 | struct kvm *kvm = pit->kvm; | |
516 | u32 val = *(u32 *) data; | |
bda9020e MT |
517 | if (addr != KVM_SPEAKER_BASE_ADDRESS) |
518 | return -EOPNOTSUPP; | |
7837699f SY |
519 | |
520 | mutex_lock(&pit_state->lock); | |
521 | pit_state->speaker_data_on = (val >> 1) & 1; | |
522 | pit_set_gate(kvm, 2, val & 1); | |
523 | mutex_unlock(&pit_state->lock); | |
bda9020e | 524 | return 0; |
7837699f SY |
525 | } |
526 | ||
bda9020e MT |
527 | static int speaker_ioport_read(struct kvm_io_device *this, |
528 | gpa_t addr, int len, void *data) | |
7837699f | 529 | { |
d76685c4 | 530 | struct kvm_pit *pit = speaker_to_pit(this); |
7837699f SY |
531 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
532 | struct kvm *kvm = pit->kvm; | |
533 | unsigned int refresh_clock; | |
534 | int ret; | |
bda9020e MT |
535 | if (addr != KVM_SPEAKER_BASE_ADDRESS) |
536 | return -EOPNOTSUPP; | |
7837699f SY |
537 | |
538 | /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ | |
539 | refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; | |
540 | ||
541 | mutex_lock(&pit_state->lock); | |
542 | ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | | |
543 | (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); | |
544 | if (len > sizeof(ret)) | |
545 | len = sizeof(ret); | |
546 | memcpy(data, (char *)&ret, len); | |
547 | mutex_unlock(&pit_state->lock); | |
bda9020e | 548 | return 0; |
7837699f SY |
549 | } |
550 | ||
308b0f23 | 551 | void kvm_pit_reset(struct kvm_pit *pit) |
7837699f SY |
552 | { |
553 | int i; | |
308b0f23 SY |
554 | struct kvm_kpit_channel_state *c; |
555 | ||
556 | mutex_lock(&pit->pit_state.lock); | |
557 | for (i = 0; i < 3; i++) { | |
558 | c = &pit->pit_state.channels[i]; | |
559 | c->mode = 0xff; | |
560 | c->gate = (i != 2); | |
561 | pit_load_count(pit->kvm, i, 0); | |
562 | } | |
563 | mutex_unlock(&pit->pit_state.lock); | |
564 | ||
565 | atomic_set(&pit->pit_state.pit_timer.pending, 0); | |
3cf57fed | 566 | pit->pit_state.irq_ack = 1; |
308b0f23 SY |
567 | } |
568 | ||
4780c659 AK |
569 | static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) |
570 | { | |
571 | struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); | |
572 | ||
573 | if (!mask) { | |
574 | atomic_set(&pit->pit_state.pit_timer.pending, 0); | |
575 | pit->pit_state.irq_ack = 1; | |
576 | } | |
577 | } | |
578 | ||
d76685c4 GH |
579 | static const struct kvm_io_device_ops pit_dev_ops = { |
580 | .read = pit_ioport_read, | |
581 | .write = pit_ioport_write, | |
d76685c4 GH |
582 | }; |
583 | ||
584 | static const struct kvm_io_device_ops speaker_dev_ops = { | |
585 | .read = speaker_ioport_read, | |
586 | .write = speaker_ioport_write, | |
d76685c4 GH |
587 | }; |
588 | ||
6c474694 | 589 | /* Caller must have writers lock on slots_lock */ |
c5ff41ce | 590 | struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) |
308b0f23 | 591 | { |
7837699f SY |
592 | struct kvm_pit *pit; |
593 | struct kvm_kpit_state *pit_state; | |
7837699f SY |
594 | |
595 | pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); | |
596 | if (!pit) | |
597 | return NULL; | |
598 | ||
5550af4d | 599 | pit->irq_source_id = kvm_request_irq_source_id(kvm); |
e17d1dc0 AK |
600 | if (pit->irq_source_id < 0) { |
601 | kfree(pit); | |
5550af4d | 602 | return NULL; |
e17d1dc0 | 603 | } |
5550af4d | 604 | |
7837699f SY |
605 | mutex_init(&pit->pit_state.lock); |
606 | mutex_lock(&pit->pit_state.lock); | |
3cf57fed | 607 | spin_lock_init(&pit->pit_state.inject_lock); |
7837699f | 608 | |
7837699f SY |
609 | kvm->arch.vpit = pit; |
610 | pit->kvm = kvm; | |
611 | ||
612 | pit_state = &pit->pit_state; | |
613 | pit_state->pit = pit; | |
614 | hrtimer_init(&pit_state->pit_timer.timer, | |
615 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
3cf57fed MT |
616 | pit_state->irq_ack_notifier.gsi = 0; |
617 | pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; | |
618 | kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); | |
52d939a0 | 619 | pit_state->pit_timer.reinject = true; |
7837699f SY |
620 | mutex_unlock(&pit->pit_state.lock); |
621 | ||
308b0f23 | 622 | kvm_pit_reset(pit); |
7837699f | 623 | |
4780c659 AK |
624 | pit->mask_notifier.func = pit_mask_notifer; |
625 | kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); | |
626 | ||
6b66ac1a | 627 | kvm_iodevice_init(&pit->dev, &pit_dev_ops); |
6c474694 | 628 | __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev); |
6b66ac1a GH |
629 | |
630 | if (flags & KVM_PIT_SPEAKER_DUMMY) { | |
631 | kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); | |
6c474694 | 632 | __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev); |
6b66ac1a GH |
633 | } |
634 | ||
7837699f SY |
635 | return pit; |
636 | } | |
637 | ||
638 | void kvm_free_pit(struct kvm *kvm) | |
639 | { | |
640 | struct hrtimer *timer; | |
641 | ||
642 | if (kvm->arch.vpit) { | |
4780c659 AK |
643 | kvm_unregister_irq_mask_notifier(kvm, 0, |
644 | &kvm->arch.vpit->mask_notifier); | |
7837699f SY |
645 | mutex_lock(&kvm->arch.vpit->pit_state.lock); |
646 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; | |
647 | hrtimer_cancel(timer); | |
5550af4d | 648 | kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); |
7837699f SY |
649 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
650 | kfree(kvm->arch.vpit); | |
651 | } | |
652 | } | |
653 | ||
8b2cf73c | 654 | static void __inject_pit_timer_intr(struct kvm *kvm) |
7837699f | 655 | { |
23930f95 JK |
656 | struct kvm_vcpu *vcpu; |
657 | int i; | |
658 | ||
fa40a821 | 659 | mutex_lock(&kvm->irq_lock); |
5550af4d SY |
660 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
661 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); | |
fa40a821 | 662 | mutex_unlock(&kvm->irq_lock); |
23930f95 JK |
663 | |
664 | /* | |
8fdb2351 JK |
665 | * Provides NMI watchdog support via Virtual Wire mode. |
666 | * The route is: PIT -> PIC -> LVT0 in NMI mode. | |
667 | * | |
668 | * Note: Our Virtual Wire implementation is simplified, only | |
669 | * propagating PIT interrupts to all VCPUs when they have set | |
670 | * LVT0 to NMI delivery. Other PIC interrupts are just sent to | |
671 | * VCPU0, and only if its LVT0 is in EXTINT mode. | |
23930f95 | 672 | */ |
cc6e462c | 673 | if (kvm->arch.vapics_in_nmi_mode > 0) |
988a2cae GN |
674 | kvm_for_each_vcpu(i, vcpu, kvm) |
675 | kvm_apic_nmi_wd_deliver(vcpu); | |
7837699f SY |
676 | } |
677 | ||
678 | void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) | |
679 | { | |
680 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | |
681 | struct kvm *kvm = vcpu->kvm; | |
682 | struct kvm_kpit_state *ps; | |
683 | ||
684 | if (vcpu && pit) { | |
3cf57fed | 685 | int inject = 0; |
7837699f SY |
686 | ps = &pit->pit_state; |
687 | ||
3cf57fed MT |
688 | /* Try to inject pending interrupts when |
689 | * last one has been acked. | |
690 | */ | |
691 | spin_lock(&ps->inject_lock); | |
692 | if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { | |
693 | ps->irq_ack = 0; | |
694 | inject = 1; | |
7837699f | 695 | } |
3cf57fed MT |
696 | spin_unlock(&ps->inject_lock); |
697 | if (inject) | |
698 | __inject_pit_timer_intr(kvm); | |
7837699f SY |
699 | } |
700 | } |