]>
Commit | Line | Data |
---|---|---|
7837699f SY |
1 | /* |
2 | * 8253/8254 interval timer emulation | |
3 | * | |
4 | * Copyright (c) 2003-2004 Fabrice Bellard | |
5 | * Copyright (c) 2006 Intel Corporation | |
6 | * Copyright (c) 2007 Keir Fraser, XenSource Inc | |
7 | * Copyright (c) 2008 Intel Corporation | |
8 | * | |
9 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
10 | * of this software and associated documentation files (the "Software"), to deal | |
11 | * in the Software without restriction, including without limitation the rights | |
12 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
13 | * copies of the Software, and to permit persons to whom the Software is | |
14 | * furnished to do so, subject to the following conditions: | |
15 | * | |
16 | * The above copyright notice and this permission notice shall be included in | |
17 | * all copies or substantial portions of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
24 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
25 | * THE SOFTWARE. | |
26 | * | |
27 | * Authors: | |
28 | * Sheng Yang <[email protected]> | |
29 | * Based on QEMU and Xen. | |
30 | */ | |
31 | ||
a78d9626 JP |
32 | #define pr_fmt(fmt) "pit: " fmt |
33 | ||
7837699f SY |
34 | #include <linux/kvm_host.h> |
35 | ||
36 | #include "irq.h" | |
37 | #include "i8254.h" | |
38 | ||
39 | #ifndef CONFIG_X86_64 | |
6f6d6a1a | 40 | #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) |
7837699f SY |
41 | #else |
42 | #define mod_64(x, y) ((x) % (y)) | |
43 | #endif | |
44 | ||
45 | #define RW_STATE_LSB 1 | |
46 | #define RW_STATE_MSB 2 | |
47 | #define RW_STATE_WORD0 3 | |
48 | #define RW_STATE_WORD1 4 | |
49 | ||
50 | /* Compute with 96 bit intermediate result: (a*b)/c */ | |
51 | static u64 muldiv64(u64 a, u32 b, u32 c) | |
52 | { | |
53 | union { | |
54 | u64 ll; | |
55 | struct { | |
56 | u32 low, high; | |
57 | } l; | |
58 | } u, res; | |
59 | u64 rl, rh; | |
60 | ||
61 | u.ll = a; | |
62 | rl = (u64)u.l.low * (u64)b; | |
63 | rh = (u64)u.l.high * (u64)b; | |
64 | rh += (rl >> 32); | |
6f6d6a1a RZ |
65 | res.l.high = div64_u64(rh, c); |
66 | res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c); | |
7837699f SY |
67 | return res.ll; |
68 | } | |
69 | ||
70 | static void pit_set_gate(struct kvm *kvm, int channel, u32 val) | |
71 | { | |
72 | struct kvm_kpit_channel_state *c = | |
73 | &kvm->arch.vpit->pit_state.channels[channel]; | |
74 | ||
75 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
76 | ||
77 | switch (c->mode) { | |
78 | default: | |
79 | case 0: | |
80 | case 4: | |
81 | /* XXX: just disable/enable counting */ | |
82 | break; | |
83 | case 1: | |
84 | case 2: | |
85 | case 3: | |
86 | case 5: | |
87 | /* Restart counting on rising edge. */ | |
88 | if (c->gate < val) | |
89 | c->count_load_time = ktime_get(); | |
90 | break; | |
91 | } | |
92 | ||
93 | c->gate = val; | |
94 | } | |
95 | ||
8b2cf73c | 96 | static int pit_get_gate(struct kvm *kvm, int channel) |
7837699f SY |
97 | { |
98 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
99 | ||
100 | return kvm->arch.vpit->pit_state.channels[channel].gate; | |
101 | } | |
102 | ||
fd668423 MT |
103 | static s64 __kpit_elapsed(struct kvm *kvm) |
104 | { | |
105 | s64 elapsed; | |
106 | ktime_t remaining; | |
107 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | |
108 | ||
0ff77873 MT |
109 | if (!ps->pit_timer.period) |
110 | return 0; | |
111 | ||
ede2ccc5 MT |
112 | /* |
113 | * The Counter does not stop when it reaches zero. In | |
114 | * Modes 0, 1, 4, and 5 the Counter ``wraps around'' to | |
115 | * the highest count, either FFFF hex for binary counting | |
116 | * or 9999 for BCD counting, and continues counting. | |
117 | * Modes 2 and 3 are periodic; the Counter reloads | |
118 | * itself with the initial count and continues counting | |
119 | * from there. | |
120 | */ | |
ace15464 | 121 | remaining = hrtimer_get_remaining(&ps->pit_timer.timer); |
ede2ccc5 MT |
122 | elapsed = ps->pit_timer.period - ktime_to_ns(remaining); |
123 | elapsed = mod_64(elapsed, ps->pit_timer.period); | |
fd668423 MT |
124 | |
125 | return elapsed; | |
126 | } | |
127 | ||
128 | static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c, | |
129 | int channel) | |
130 | { | |
131 | if (channel == 0) | |
132 | return __kpit_elapsed(kvm); | |
133 | ||
134 | return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time)); | |
135 | } | |
136 | ||
7837699f SY |
137 | static int pit_get_count(struct kvm *kvm, int channel) |
138 | { | |
139 | struct kvm_kpit_channel_state *c = | |
140 | &kvm->arch.vpit->pit_state.channels[channel]; | |
141 | s64 d, t; | |
142 | int counter; | |
143 | ||
144 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
145 | ||
fd668423 | 146 | t = kpit_elapsed(kvm, c, channel); |
7837699f SY |
147 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); |
148 | ||
149 | switch (c->mode) { | |
150 | case 0: | |
151 | case 1: | |
152 | case 4: | |
153 | case 5: | |
154 | counter = (c->count - d) & 0xffff; | |
155 | break; | |
156 | case 3: | |
157 | /* XXX: may be incorrect for odd counts */ | |
158 | counter = c->count - (mod_64((2 * d), c->count)); | |
159 | break; | |
160 | default: | |
161 | counter = c->count - mod_64(d, c->count); | |
162 | break; | |
163 | } | |
164 | return counter; | |
165 | } | |
166 | ||
167 | static int pit_get_out(struct kvm *kvm, int channel) | |
168 | { | |
169 | struct kvm_kpit_channel_state *c = | |
170 | &kvm->arch.vpit->pit_state.channels[channel]; | |
171 | s64 d, t; | |
172 | int out; | |
173 | ||
174 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
175 | ||
fd668423 | 176 | t = kpit_elapsed(kvm, c, channel); |
7837699f SY |
177 | d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC); |
178 | ||
179 | switch (c->mode) { | |
180 | default: | |
181 | case 0: | |
182 | out = (d >= c->count); | |
183 | break; | |
184 | case 1: | |
185 | out = (d < c->count); | |
186 | break; | |
187 | case 2: | |
188 | out = ((mod_64(d, c->count) == 0) && (d != 0)); | |
189 | break; | |
190 | case 3: | |
191 | out = (mod_64(d, c->count) < ((c->count + 1) >> 1)); | |
192 | break; | |
193 | case 4: | |
194 | case 5: | |
195 | out = (d == c->count); | |
196 | break; | |
197 | } | |
198 | ||
199 | return out; | |
200 | } | |
201 | ||
202 | static void pit_latch_count(struct kvm *kvm, int channel) | |
203 | { | |
204 | struct kvm_kpit_channel_state *c = | |
205 | &kvm->arch.vpit->pit_state.channels[channel]; | |
206 | ||
207 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
208 | ||
209 | if (!c->count_latched) { | |
210 | c->latched_count = pit_get_count(kvm, channel); | |
211 | c->count_latched = c->rw_mode; | |
212 | } | |
213 | } | |
214 | ||
215 | static void pit_latch_status(struct kvm *kvm, int channel) | |
216 | { | |
217 | struct kvm_kpit_channel_state *c = | |
218 | &kvm->arch.vpit->pit_state.channels[channel]; | |
219 | ||
220 | WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock)); | |
221 | ||
222 | if (!c->status_latched) { | |
223 | /* TODO: Return NULL COUNT (bit 6). */ | |
224 | c->status = ((pit_get_out(kvm, channel) << 7) | | |
225 | (c->rw_mode << 4) | | |
226 | (c->mode << 1) | | |
227 | c->bcd); | |
228 | c->status_latched = 1; | |
229 | } | |
230 | } | |
231 | ||
3d80840d MT |
232 | int pit_has_pending_timer(struct kvm_vcpu *vcpu) |
233 | { | |
234 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | |
235 | ||
c5af89b6 | 236 | if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack) |
3d80840d | 237 | return atomic_read(&pit->pit_state.pit_timer.pending); |
3d80840d MT |
238 | return 0; |
239 | } | |
240 | ||
ee032c99 | 241 | static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian) |
3cf57fed MT |
242 | { |
243 | struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state, | |
244 | irq_ack_notifier); | |
245 | spin_lock(&ps->inject_lock); | |
246 | if (atomic_dec_return(&ps->pit_timer.pending) < 0) | |
dc7404ce | 247 | atomic_inc(&ps->pit_timer.pending); |
3cf57fed MT |
248 | ps->irq_ack = 1; |
249 | spin_unlock(&ps->inject_lock); | |
250 | } | |
251 | ||
2f599714 MT |
252 | void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) |
253 | { | |
254 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | |
255 | struct hrtimer *timer; | |
256 | ||
c5af89b6 | 257 | if (!kvm_vcpu_is_bsp(vcpu) || !pit) |
2f599714 MT |
258 | return; |
259 | ||
260 | timer = &pit->pit_state.pit_timer.timer; | |
261 | if (hrtimer_cancel(timer)) | |
beb20d52 | 262 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS); |
2f599714 MT |
263 | } |
264 | ||
d3c7b77d | 265 | static void destroy_pit_timer(struct kvm_timer *pt) |
7837699f | 266 | { |
a78d9626 | 267 | pr_debug("execute del timer!\n"); |
7837699f SY |
268 | hrtimer_cancel(&pt->timer); |
269 | } | |
270 | ||
d3c7b77d MT |
271 | static bool kpit_is_periodic(struct kvm_timer *ktimer) |
272 | { | |
273 | struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state, | |
274 | pit_timer); | |
275 | return ps->is_periodic; | |
276 | } | |
277 | ||
386eb6e8 | 278 | static struct kvm_timer_ops kpit_ops = { |
d3c7b77d MT |
279 | .is_periodic = kpit_is_periodic, |
280 | }; | |
281 | ||
3cf57fed | 282 | static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period) |
7837699f | 283 | { |
d3c7b77d | 284 | struct kvm_timer *pt = &ps->pit_timer; |
7837699f SY |
285 | s64 interval; |
286 | ||
287 | interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ); | |
288 | ||
a78d9626 | 289 | pr_debug("create pit timer, interval is %llu nsec\n", interval); |
7837699f SY |
290 | |
291 | /* TODO The new value only affected after the retriggered */ | |
292 | hrtimer_cancel(&pt->timer); | |
ede2ccc5 | 293 | pt->period = interval; |
d3c7b77d MT |
294 | ps->is_periodic = is_period; |
295 | ||
296 | pt->timer.function = kvm_timer_fn; | |
297 | pt->t_ops = &kpit_ops; | |
298 | pt->kvm = ps->pit->kvm; | |
1ed0ce00 | 299 | pt->vcpu = pt->kvm->bsp_vcpu; |
d3c7b77d | 300 | |
7837699f | 301 | atomic_set(&pt->pending, 0); |
3cf57fed | 302 | ps->irq_ack = 1; |
7837699f SY |
303 | |
304 | hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval), | |
305 | HRTIMER_MODE_ABS); | |
306 | } | |
307 | ||
308 | static void pit_load_count(struct kvm *kvm, int channel, u32 val) | |
309 | { | |
310 | struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state; | |
311 | ||
312 | WARN_ON(!mutex_is_locked(&ps->lock)); | |
313 | ||
a78d9626 | 314 | pr_debug("load_count val is %d, channel is %d\n", val, channel); |
7837699f SY |
315 | |
316 | /* | |
ede2ccc5 MT |
317 | * The largest possible initial count is 0; this is equivalent |
318 | * to 216 for binary counting and 104 for BCD counting. | |
7837699f SY |
319 | */ |
320 | if (val == 0) | |
321 | val = 0x10000; | |
322 | ||
7837699f SY |
323 | ps->channels[channel].count = val; |
324 | ||
fd668423 MT |
325 | if (channel != 0) { |
326 | ps->channels[channel].count_load_time = ktime_get(); | |
7837699f | 327 | return; |
fd668423 | 328 | } |
7837699f SY |
329 | |
330 | /* Two types of timer | |
331 | * mode 1 is one shot, mode 2 is period, otherwise del timer */ | |
332 | switch (ps->channels[0].mode) { | |
ede2ccc5 | 333 | case 0: |
7837699f | 334 | case 1: |
ece15bab MT |
335 | /* FIXME: enhance mode 4 precision */ |
336 | case 4: | |
e9f42757 BK |
337 | if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) { |
338 | create_pit_timer(ps, val, 0); | |
339 | } | |
7837699f SY |
340 | break; |
341 | case 2: | |
f6975545 | 342 | case 3: |
e9f42757 BK |
343 | if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){ |
344 | create_pit_timer(ps, val, 1); | |
345 | } | |
7837699f SY |
346 | break; |
347 | default: | |
348 | destroy_pit_timer(&ps->pit_timer); | |
349 | } | |
350 | } | |
351 | ||
e9f42757 | 352 | void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start) |
e0f63cb9 | 353 | { |
e9f42757 BK |
354 | u8 saved_mode; |
355 | if (hpet_legacy_start) { | |
356 | /* save existing mode for later reenablement */ | |
357 | saved_mode = kvm->arch.vpit->pit_state.channels[0].mode; | |
358 | kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */ | |
359 | pit_load_count(kvm, channel, val); | |
360 | kvm->arch.vpit->pit_state.channels[0].mode = saved_mode; | |
361 | } else { | |
362 | pit_load_count(kvm, channel, val); | |
363 | } | |
e0f63cb9 SY |
364 | } |
365 | ||
d76685c4 GH |
366 | static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev) |
367 | { | |
368 | return container_of(dev, struct kvm_pit, dev); | |
369 | } | |
370 | ||
371 | static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev) | |
372 | { | |
373 | return container_of(dev, struct kvm_pit, speaker_dev); | |
374 | } | |
375 | ||
bda9020e MT |
376 | static inline int pit_in_range(gpa_t addr) |
377 | { | |
378 | return ((addr >= KVM_PIT_BASE_ADDRESS) && | |
379 | (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH)); | |
380 | } | |
381 | ||
382 | static int pit_ioport_write(struct kvm_io_device *this, | |
383 | gpa_t addr, int len, const void *data) | |
7837699f | 384 | { |
d76685c4 | 385 | struct kvm_pit *pit = dev_to_pit(this); |
7837699f SY |
386 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
387 | struct kvm *kvm = pit->kvm; | |
388 | int channel, access; | |
389 | struct kvm_kpit_channel_state *s; | |
390 | u32 val = *(u32 *) data; | |
bda9020e MT |
391 | if (!pit_in_range(addr)) |
392 | return -EOPNOTSUPP; | |
7837699f SY |
393 | |
394 | val &= 0xff; | |
395 | addr &= KVM_PIT_CHANNEL_MASK; | |
396 | ||
397 | mutex_lock(&pit_state->lock); | |
398 | ||
399 | if (val != 0) | |
a78d9626 JP |
400 | pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n", |
401 | (unsigned int)addr, len, val); | |
7837699f SY |
402 | |
403 | if (addr == 3) { | |
404 | channel = val >> 6; | |
405 | if (channel == 3) { | |
406 | /* Read-Back Command. */ | |
407 | for (channel = 0; channel < 3; channel++) { | |
408 | s = &pit_state->channels[channel]; | |
409 | if (val & (2 << channel)) { | |
410 | if (!(val & 0x20)) | |
411 | pit_latch_count(kvm, channel); | |
412 | if (!(val & 0x10)) | |
413 | pit_latch_status(kvm, channel); | |
414 | } | |
415 | } | |
416 | } else { | |
417 | /* Select Counter <channel>. */ | |
418 | s = &pit_state->channels[channel]; | |
419 | access = (val >> 4) & KVM_PIT_CHANNEL_MASK; | |
420 | if (access == 0) { | |
421 | pit_latch_count(kvm, channel); | |
422 | } else { | |
423 | s->rw_mode = access; | |
424 | s->read_state = access; | |
425 | s->write_state = access; | |
426 | s->mode = (val >> 1) & 7; | |
427 | if (s->mode > 5) | |
428 | s->mode -= 4; | |
429 | s->bcd = val & 1; | |
430 | } | |
431 | } | |
432 | } else { | |
433 | /* Write Count. */ | |
434 | s = &pit_state->channels[addr]; | |
435 | switch (s->write_state) { | |
436 | default: | |
437 | case RW_STATE_LSB: | |
438 | pit_load_count(kvm, addr, val); | |
439 | break; | |
440 | case RW_STATE_MSB: | |
441 | pit_load_count(kvm, addr, val << 8); | |
442 | break; | |
443 | case RW_STATE_WORD0: | |
444 | s->write_latch = val; | |
445 | s->write_state = RW_STATE_WORD1; | |
446 | break; | |
447 | case RW_STATE_WORD1: | |
448 | pit_load_count(kvm, addr, s->write_latch | (val << 8)); | |
449 | s->write_state = RW_STATE_WORD0; | |
450 | break; | |
451 | } | |
452 | } | |
453 | ||
454 | mutex_unlock(&pit_state->lock); | |
bda9020e | 455 | return 0; |
7837699f SY |
456 | } |
457 | ||
bda9020e MT |
458 | static int pit_ioport_read(struct kvm_io_device *this, |
459 | gpa_t addr, int len, void *data) | |
7837699f | 460 | { |
d76685c4 | 461 | struct kvm_pit *pit = dev_to_pit(this); |
7837699f SY |
462 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
463 | struct kvm *kvm = pit->kvm; | |
464 | int ret, count; | |
465 | struct kvm_kpit_channel_state *s; | |
bda9020e MT |
466 | if (!pit_in_range(addr)) |
467 | return -EOPNOTSUPP; | |
7837699f SY |
468 | |
469 | addr &= KVM_PIT_CHANNEL_MASK; | |
470 | s = &pit_state->channels[addr]; | |
471 | ||
472 | mutex_lock(&pit_state->lock); | |
473 | ||
474 | if (s->status_latched) { | |
475 | s->status_latched = 0; | |
476 | ret = s->status; | |
477 | } else if (s->count_latched) { | |
478 | switch (s->count_latched) { | |
479 | default: | |
480 | case RW_STATE_LSB: | |
481 | ret = s->latched_count & 0xff; | |
482 | s->count_latched = 0; | |
483 | break; | |
484 | case RW_STATE_MSB: | |
485 | ret = s->latched_count >> 8; | |
486 | s->count_latched = 0; | |
487 | break; | |
488 | case RW_STATE_WORD0: | |
489 | ret = s->latched_count & 0xff; | |
490 | s->count_latched = RW_STATE_MSB; | |
491 | break; | |
492 | } | |
493 | } else { | |
494 | switch (s->read_state) { | |
495 | default: | |
496 | case RW_STATE_LSB: | |
497 | count = pit_get_count(kvm, addr); | |
498 | ret = count & 0xff; | |
499 | break; | |
500 | case RW_STATE_MSB: | |
501 | count = pit_get_count(kvm, addr); | |
502 | ret = (count >> 8) & 0xff; | |
503 | break; | |
504 | case RW_STATE_WORD0: | |
505 | count = pit_get_count(kvm, addr); | |
506 | ret = count & 0xff; | |
507 | s->read_state = RW_STATE_WORD1; | |
508 | break; | |
509 | case RW_STATE_WORD1: | |
510 | count = pit_get_count(kvm, addr); | |
511 | ret = (count >> 8) & 0xff; | |
512 | s->read_state = RW_STATE_WORD0; | |
513 | break; | |
514 | } | |
515 | } | |
516 | ||
517 | if (len > sizeof(ret)) | |
518 | len = sizeof(ret); | |
519 | memcpy(data, (char *)&ret, len); | |
520 | ||
521 | mutex_unlock(&pit_state->lock); | |
bda9020e | 522 | return 0; |
7837699f SY |
523 | } |
524 | ||
bda9020e MT |
525 | static int speaker_ioport_write(struct kvm_io_device *this, |
526 | gpa_t addr, int len, const void *data) | |
7837699f | 527 | { |
d76685c4 | 528 | struct kvm_pit *pit = speaker_to_pit(this); |
7837699f SY |
529 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
530 | struct kvm *kvm = pit->kvm; | |
531 | u32 val = *(u32 *) data; | |
bda9020e MT |
532 | if (addr != KVM_SPEAKER_BASE_ADDRESS) |
533 | return -EOPNOTSUPP; | |
7837699f SY |
534 | |
535 | mutex_lock(&pit_state->lock); | |
536 | pit_state->speaker_data_on = (val >> 1) & 1; | |
537 | pit_set_gate(kvm, 2, val & 1); | |
538 | mutex_unlock(&pit_state->lock); | |
bda9020e | 539 | return 0; |
7837699f SY |
540 | } |
541 | ||
bda9020e MT |
542 | static int speaker_ioport_read(struct kvm_io_device *this, |
543 | gpa_t addr, int len, void *data) | |
7837699f | 544 | { |
d76685c4 | 545 | struct kvm_pit *pit = speaker_to_pit(this); |
7837699f SY |
546 | struct kvm_kpit_state *pit_state = &pit->pit_state; |
547 | struct kvm *kvm = pit->kvm; | |
548 | unsigned int refresh_clock; | |
549 | int ret; | |
bda9020e MT |
550 | if (addr != KVM_SPEAKER_BASE_ADDRESS) |
551 | return -EOPNOTSUPP; | |
7837699f SY |
552 | |
553 | /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ | |
554 | refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; | |
555 | ||
556 | mutex_lock(&pit_state->lock); | |
557 | ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) | | |
558 | (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4)); | |
559 | if (len > sizeof(ret)) | |
560 | len = sizeof(ret); | |
561 | memcpy(data, (char *)&ret, len); | |
562 | mutex_unlock(&pit_state->lock); | |
bda9020e | 563 | return 0; |
7837699f SY |
564 | } |
565 | ||
308b0f23 | 566 | void kvm_pit_reset(struct kvm_pit *pit) |
7837699f SY |
567 | { |
568 | int i; | |
308b0f23 SY |
569 | struct kvm_kpit_channel_state *c; |
570 | ||
571 | mutex_lock(&pit->pit_state.lock); | |
e9f42757 | 572 | pit->pit_state.flags = 0; |
308b0f23 SY |
573 | for (i = 0; i < 3; i++) { |
574 | c = &pit->pit_state.channels[i]; | |
575 | c->mode = 0xff; | |
576 | c->gate = (i != 2); | |
577 | pit_load_count(pit->kvm, i, 0); | |
578 | } | |
579 | mutex_unlock(&pit->pit_state.lock); | |
580 | ||
581 | atomic_set(&pit->pit_state.pit_timer.pending, 0); | |
3cf57fed | 582 | pit->pit_state.irq_ack = 1; |
308b0f23 SY |
583 | } |
584 | ||
4780c659 AK |
585 | static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask) |
586 | { | |
587 | struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier); | |
588 | ||
589 | if (!mask) { | |
590 | atomic_set(&pit->pit_state.pit_timer.pending, 0); | |
591 | pit->pit_state.irq_ack = 1; | |
592 | } | |
593 | } | |
594 | ||
d76685c4 GH |
595 | static const struct kvm_io_device_ops pit_dev_ops = { |
596 | .read = pit_ioport_read, | |
597 | .write = pit_ioport_write, | |
d76685c4 GH |
598 | }; |
599 | ||
600 | static const struct kvm_io_device_ops speaker_dev_ops = { | |
601 | .read = speaker_ioport_read, | |
602 | .write = speaker_ioport_write, | |
d76685c4 GH |
603 | }; |
604 | ||
6c474694 | 605 | /* Caller must have writers lock on slots_lock */ |
c5ff41ce | 606 | struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags) |
308b0f23 | 607 | { |
7837699f SY |
608 | struct kvm_pit *pit; |
609 | struct kvm_kpit_state *pit_state; | |
090b7aff | 610 | int ret; |
7837699f SY |
611 | |
612 | pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); | |
613 | if (!pit) | |
614 | return NULL; | |
615 | ||
5550af4d | 616 | pit->irq_source_id = kvm_request_irq_source_id(kvm); |
e17d1dc0 AK |
617 | if (pit->irq_source_id < 0) { |
618 | kfree(pit); | |
5550af4d | 619 | return NULL; |
e17d1dc0 | 620 | } |
5550af4d | 621 | |
7837699f SY |
622 | mutex_init(&pit->pit_state.lock); |
623 | mutex_lock(&pit->pit_state.lock); | |
3cf57fed | 624 | spin_lock_init(&pit->pit_state.inject_lock); |
7837699f | 625 | |
7837699f SY |
626 | kvm->arch.vpit = pit; |
627 | pit->kvm = kvm; | |
628 | ||
629 | pit_state = &pit->pit_state; | |
630 | pit_state->pit = pit; | |
631 | hrtimer_init(&pit_state->pit_timer.timer, | |
632 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
3cf57fed MT |
633 | pit_state->irq_ack_notifier.gsi = 0; |
634 | pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq; | |
635 | kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); | |
52d939a0 | 636 | pit_state->pit_timer.reinject = true; |
7837699f SY |
637 | mutex_unlock(&pit->pit_state.lock); |
638 | ||
308b0f23 | 639 | kvm_pit_reset(pit); |
7837699f | 640 | |
4780c659 AK |
641 | pit->mask_notifier.func = pit_mask_notifer; |
642 | kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); | |
643 | ||
6b66ac1a | 644 | kvm_iodevice_init(&pit->dev, &pit_dev_ops); |
090b7aff GH |
645 | ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev); |
646 | if (ret < 0) | |
647 | goto fail; | |
6b66ac1a GH |
648 | |
649 | if (flags & KVM_PIT_SPEAKER_DUMMY) { | |
650 | kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops); | |
090b7aff GH |
651 | ret = __kvm_io_bus_register_dev(&kvm->pio_bus, |
652 | &pit->speaker_dev); | |
653 | if (ret < 0) | |
654 | goto fail_unregister; | |
6b66ac1a GH |
655 | } |
656 | ||
7837699f | 657 | return pit; |
090b7aff GH |
658 | |
659 | fail_unregister: | |
660 | __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev); | |
661 | ||
662 | fail: | |
663 | if (pit->irq_source_id >= 0) | |
664 | kvm_free_irq_source_id(kvm, pit->irq_source_id); | |
665 | ||
666 | kfree(pit); | |
667 | return NULL; | |
7837699f SY |
668 | } |
669 | ||
670 | void kvm_free_pit(struct kvm *kvm) | |
671 | { | |
672 | struct hrtimer *timer; | |
673 | ||
674 | if (kvm->arch.vpit) { | |
4780c659 AK |
675 | kvm_unregister_irq_mask_notifier(kvm, 0, |
676 | &kvm->arch.vpit->mask_notifier); | |
84fde248 GN |
677 | kvm_unregister_irq_ack_notifier(kvm, |
678 | &kvm->arch.vpit->pit_state.irq_ack_notifier); | |
7837699f SY |
679 | mutex_lock(&kvm->arch.vpit->pit_state.lock); |
680 | timer = &kvm->arch.vpit->pit_state.pit_timer.timer; | |
681 | hrtimer_cancel(timer); | |
5550af4d | 682 | kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); |
7837699f SY |
683 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
684 | kfree(kvm->arch.vpit); | |
685 | } | |
686 | } | |
687 | ||
8b2cf73c | 688 | static void __inject_pit_timer_intr(struct kvm *kvm) |
7837699f | 689 | { |
23930f95 JK |
690 | struct kvm_vcpu *vcpu; |
691 | int i; | |
692 | ||
5550af4d SY |
693 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); |
694 | kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); | |
23930f95 JK |
695 | |
696 | /* | |
8fdb2351 JK |
697 | * Provides NMI watchdog support via Virtual Wire mode. |
698 | * The route is: PIT -> PIC -> LVT0 in NMI mode. | |
699 | * | |
700 | * Note: Our Virtual Wire implementation is simplified, only | |
701 | * propagating PIT interrupts to all VCPUs when they have set | |
702 | * LVT0 to NMI delivery. Other PIC interrupts are just sent to | |
703 | * VCPU0, and only if its LVT0 is in EXTINT mode. | |
23930f95 | 704 | */ |
cc6e462c | 705 | if (kvm->arch.vapics_in_nmi_mode > 0) |
988a2cae GN |
706 | kvm_for_each_vcpu(i, vcpu, kvm) |
707 | kvm_apic_nmi_wd_deliver(vcpu); | |
7837699f SY |
708 | } |
709 | ||
710 | void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) | |
711 | { | |
712 | struct kvm_pit *pit = vcpu->kvm->arch.vpit; | |
713 | struct kvm *kvm = vcpu->kvm; | |
714 | struct kvm_kpit_state *ps; | |
715 | ||
95fb4eb6 | 716 | if (pit) { |
3cf57fed | 717 | int inject = 0; |
7837699f SY |
718 | ps = &pit->pit_state; |
719 | ||
3cf57fed MT |
720 | /* Try to inject pending interrupts when |
721 | * last one has been acked. | |
722 | */ | |
723 | spin_lock(&ps->inject_lock); | |
724 | if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) { | |
725 | ps->irq_ack = 0; | |
726 | inject = 1; | |
7837699f | 727 | } |
3cf57fed MT |
728 | spin_unlock(&ps->inject_lock); |
729 | if (inject) | |
730 | __inject_pit_timer_intr(kvm); | |
7837699f SY |
731 | } |
732 | } |