]>
Commit | Line | Data |
---|---|---|
97222cc8 ED |
1 | |
2 | /* | |
3 | * Local APIC virtualization | |
4 | * | |
5 | * Copyright (C) 2006 Qumranet, Inc. | |
6 | * Copyright (C) 2007 Novell | |
7 | * Copyright (C) 2007 Intel | |
9611c187 | 8 | * Copyright 2009 Red Hat, Inc. and/or its affiliates. |
97222cc8 ED |
9 | * |
10 | * Authors: | |
11 | * Dor Laor <[email protected]> | |
12 | * Gregory Haskins <[email protected]> | |
13 | * Yaozu (Eddie) Dong <[email protected]> | |
14 | * | |
15 | * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation. | |
16 | * | |
17 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
18 | * the COPYING file in the top-level directory. | |
19 | */ | |
20 | ||
edf88417 | 21 | #include <linux/kvm_host.h> |
97222cc8 ED |
22 | #include <linux/kvm.h> |
23 | #include <linux/mm.h> | |
24 | #include <linux/highmem.h> | |
25 | #include <linux/smp.h> | |
26 | #include <linux/hrtimer.h> | |
27 | #include <linux/io.h> | |
28 | #include <linux/module.h> | |
6f6d6a1a | 29 | #include <linux/math64.h> |
5a0e3ad6 | 30 | #include <linux/slab.h> |
97222cc8 ED |
31 | #include <asm/processor.h> |
32 | #include <asm/msr.h> | |
33 | #include <asm/page.h> | |
34 | #include <asm/current.h> | |
35 | #include <asm/apicdef.h> | |
60063497 | 36 | #include <linux/atomic.h> |
c5cc421b | 37 | #include <linux/jump_label.h> |
5fdbf976 | 38 | #include "kvm_cache_regs.h" |
97222cc8 | 39 | #include "irq.h" |
229456fc | 40 | #include "trace.h" |
fc61b800 | 41 | #include "x86.h" |
00b27a3e | 42 | #include "cpuid.h" |
97222cc8 | 43 | |
b682b814 MT |
44 | #ifndef CONFIG_X86_64 |
45 | #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) | |
46 | #else | |
47 | #define mod_64(x, y) ((x) % (y)) | |
48 | #endif | |
49 | ||
97222cc8 ED |
50 | #define PRId64 "d" |
51 | #define PRIx64 "llx" | |
52 | #define PRIu64 "u" | |
53 | #define PRIo64 "o" | |
54 | ||
55 | #define APIC_BUS_CYCLE_NS 1 | |
56 | ||
57 | /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ | |
58 | #define apic_debug(fmt, arg...) | |
59 | ||
60 | #define APIC_LVT_NUM 6 | |
61 | /* 14 is the version for Xeon and Pentium 8.4.8*/ | |
62 | #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16)) | |
63 | #define LAPIC_MMIO_LENGTH (1 << 12) | |
64 | /* followed define is not in apicdef.h */ | |
65 | #define APIC_SHORT_MASK 0xc0000 | |
66 | #define APIC_DEST_NOSHORT 0x0 | |
67 | #define APIC_DEST_MASK 0x800 | |
68 | #define MAX_APIC_VECTOR 256 | |
ecba9a52 | 69 | #define APIC_VECTORS_PER_REG 32 |
97222cc8 ED |
70 | |
71 | #define VEC_POS(v) ((v) & (32 - 1)) | |
72 | #define REG_POS(v) (((v) >> 5) << 4) | |
ad312c7c | 73 | |
97222cc8 ED |
74 | static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val) |
75 | { | |
76 | *((u32 *) (apic->regs + reg_off)) = val; | |
77 | } | |
78 | ||
a0c9a822 MT |
79 | static inline int apic_test_vector(int vec, void *bitmap) |
80 | { | |
81 | return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | |
82 | } | |
83 | ||
10606919 YZ |
84 | bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector) |
85 | { | |
86 | struct kvm_lapic *apic = vcpu->arch.apic; | |
87 | ||
88 | return apic_test_vector(vector, apic->regs + APIC_ISR) || | |
89 | apic_test_vector(vector, apic->regs + APIC_IRR); | |
90 | } | |
91 | ||
97222cc8 ED |
92 | static inline void apic_set_vector(int vec, void *bitmap) |
93 | { | |
94 | set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | |
95 | } | |
96 | ||
97 | static inline void apic_clear_vector(int vec, void *bitmap) | |
98 | { | |
99 | clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | |
100 | } | |
101 | ||
8680b94b MT |
102 | static inline int __apic_test_and_set_vector(int vec, void *bitmap) |
103 | { | |
104 | return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | |
105 | } | |
106 | ||
107 | static inline int __apic_test_and_clear_vector(int vec, void *bitmap) | |
108 | { | |
109 | return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); | |
110 | } | |
111 | ||
c5cc421b | 112 | struct static_key_deferred apic_hw_disabled __read_mostly; |
f8c1ea10 GN |
113 | struct static_key_deferred apic_sw_disabled __read_mostly; |
114 | ||
97222cc8 ED |
115 | static inline int apic_enabled(struct kvm_lapic *apic) |
116 | { | |
c48f1496 | 117 | return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic); |
54e9818f GN |
118 | } |
119 | ||
97222cc8 ED |
120 | #define LVT_MASK \ |
121 | (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK) | |
122 | ||
123 | #define LINT_MASK \ | |
124 | (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ | |
125 | APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) | |
126 | ||
127 | static inline int kvm_apic_id(struct kvm_lapic *apic) | |
128 | { | |
c48f1496 | 129 | return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; |
97222cc8 ED |
130 | } |
131 | ||
17d68b76 GN |
132 | #define KVM_X2APIC_CID_BITS 0 |
133 | ||
1e08ec4a GN |
134 | static void recalculate_apic_map(struct kvm *kvm) |
135 | { | |
136 | struct kvm_apic_map *new, *old = NULL; | |
137 | struct kvm_vcpu *vcpu; | |
138 | int i; | |
139 | ||
140 | new = kzalloc(sizeof(struct kvm_apic_map), GFP_KERNEL); | |
141 | ||
142 | mutex_lock(&kvm->arch.apic_map_lock); | |
143 | ||
144 | if (!new) | |
145 | goto out; | |
146 | ||
147 | new->ldr_bits = 8; | |
148 | /* flat mode is default */ | |
149 | new->cid_shift = 8; | |
150 | new->cid_mask = 0; | |
151 | new->lid_mask = 0xff; | |
152 | ||
153 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
154 | struct kvm_lapic *apic = vcpu->arch.apic; | |
155 | u16 cid, lid; | |
156 | u32 ldr; | |
157 | ||
158 | if (!kvm_apic_present(vcpu)) | |
159 | continue; | |
160 | ||
161 | /* | |
162 | * All APICs have to be configured in the same mode by an OS. | |
163 | * We take advatage of this while building logical id loockup | |
164 | * table. After reset APICs are in xapic/flat mode, so if we | |
165 | * find apic with different setting we assume this is the mode | |
166 | * OS wants all apics to be in; build lookup table accordingly. | |
167 | */ | |
168 | if (apic_x2apic_mode(apic)) { | |
169 | new->ldr_bits = 32; | |
170 | new->cid_shift = 16; | |
17d68b76 GN |
171 | new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1; |
172 | new->lid_mask = 0xffff; | |
1e08ec4a GN |
173 | } else if (kvm_apic_sw_enabled(apic) && |
174 | !new->cid_mask /* flat mode */ && | |
175 | kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) { | |
176 | new->cid_shift = 4; | |
177 | new->cid_mask = 0xf; | |
178 | new->lid_mask = 0xf; | |
179 | } | |
180 | ||
181 | new->phys_map[kvm_apic_id(apic)] = apic; | |
182 | ||
183 | ldr = kvm_apic_get_reg(apic, APIC_LDR); | |
184 | cid = apic_cluster_id(new, ldr); | |
185 | lid = apic_logical_id(new, ldr); | |
186 | ||
187 | if (lid) | |
188 | new->logical_map[cid][ffs(lid) - 1] = apic; | |
189 | } | |
190 | out: | |
191 | old = rcu_dereference_protected(kvm->arch.apic_map, | |
192 | lockdep_is_held(&kvm->arch.apic_map_lock)); | |
193 | rcu_assign_pointer(kvm->arch.apic_map, new); | |
194 | mutex_unlock(&kvm->arch.apic_map_lock); | |
195 | ||
196 | if (old) | |
197 | kfree_rcu(old, rcu); | |
c7c9c56c | 198 | |
3d81bc7e | 199 | kvm_vcpu_request_scan_ioapic(kvm); |
1e08ec4a GN |
200 | } |
201 | ||
1e1b6c26 NA |
202 | static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) |
203 | { | |
204 | u32 prev = kvm_apic_get_reg(apic, APIC_SPIV); | |
205 | ||
206 | apic_set_reg(apic, APIC_SPIV, val); | |
207 | if ((prev ^ val) & APIC_SPIV_APIC_ENABLED) { | |
208 | if (val & APIC_SPIV_APIC_ENABLED) { | |
209 | static_key_slow_dec_deferred(&apic_sw_disabled); | |
210 | recalculate_apic_map(apic->vcpu->kvm); | |
211 | } else | |
212 | static_key_slow_inc(&apic_sw_disabled.key); | |
213 | } | |
214 | } | |
215 | ||
1e08ec4a GN |
216 | static inline void kvm_apic_set_id(struct kvm_lapic *apic, u8 id) |
217 | { | |
218 | apic_set_reg(apic, APIC_ID, id << 24); | |
219 | recalculate_apic_map(apic->vcpu->kvm); | |
220 | } | |
221 | ||
222 | static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id) | |
223 | { | |
224 | apic_set_reg(apic, APIC_LDR, id); | |
225 | recalculate_apic_map(apic->vcpu->kvm); | |
226 | } | |
227 | ||
97222cc8 ED |
228 | static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type) |
229 | { | |
c48f1496 | 230 | return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED); |
97222cc8 ED |
231 | } |
232 | ||
233 | static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) | |
234 | { | |
c48f1496 | 235 | return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; |
97222cc8 ED |
236 | } |
237 | ||
a3e06bbe LJ |
238 | static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) |
239 | { | |
c48f1496 | 240 | return ((kvm_apic_get_reg(apic, APIC_LVTT) & |
a3e06bbe LJ |
241 | apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT); |
242 | } | |
243 | ||
97222cc8 ED |
244 | static inline int apic_lvtt_period(struct kvm_lapic *apic) |
245 | { | |
c48f1496 | 246 | return ((kvm_apic_get_reg(apic, APIC_LVTT) & |
a3e06bbe LJ |
247 | apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC); |
248 | } | |
249 | ||
250 | static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) | |
251 | { | |
c48f1496 | 252 | return ((kvm_apic_get_reg(apic, APIC_LVTT) & |
a3e06bbe LJ |
253 | apic->lapic_timer.timer_mode_mask) == |
254 | APIC_LVT_TIMER_TSCDEADLINE); | |
97222cc8 ED |
255 | } |
256 | ||
cc6e462c JK |
257 | static inline int apic_lvt_nmi_mode(u32 lvt_val) |
258 | { | |
259 | return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; | |
260 | } | |
261 | ||
fc61b800 GN |
262 | void kvm_apic_set_version(struct kvm_vcpu *vcpu) |
263 | { | |
264 | struct kvm_lapic *apic = vcpu->arch.apic; | |
265 | struct kvm_cpuid_entry2 *feat; | |
266 | u32 v = APIC_VERSION; | |
267 | ||
c48f1496 | 268 | if (!kvm_vcpu_has_lapic(vcpu)) |
fc61b800 GN |
269 | return; |
270 | ||
271 | feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0); | |
272 | if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31)))) | |
273 | v |= APIC_LVR_DIRECTED_EOI; | |
274 | apic_set_reg(apic, APIC_LVR, v); | |
275 | } | |
276 | ||
f1d24831 | 277 | static const unsigned int apic_lvt_mask[APIC_LVT_NUM] = { |
a3e06bbe | 278 | LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */ |
97222cc8 ED |
279 | LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ |
280 | LVT_MASK | APIC_MODE_MASK, /* LVTPC */ | |
281 | LINT_MASK, LINT_MASK, /* LVT0-1 */ | |
282 | LVT_MASK /* LVTERR */ | |
283 | }; | |
284 | ||
285 | static int find_highest_vector(void *bitmap) | |
286 | { | |
ecba9a52 TY |
287 | int vec; |
288 | u32 *reg; | |
97222cc8 | 289 | |
ecba9a52 TY |
290 | for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; |
291 | vec >= 0; vec -= APIC_VECTORS_PER_REG) { | |
292 | reg = bitmap + REG_POS(vec); | |
293 | if (*reg) | |
294 | return fls(*reg) - 1 + vec; | |
295 | } | |
97222cc8 | 296 | |
ecba9a52 | 297 | return -1; |
97222cc8 ED |
298 | } |
299 | ||
8680b94b MT |
300 | static u8 count_vectors(void *bitmap) |
301 | { | |
ecba9a52 TY |
302 | int vec; |
303 | u32 *reg; | |
8680b94b | 304 | u8 count = 0; |
ecba9a52 TY |
305 | |
306 | for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) { | |
307 | reg = bitmap + REG_POS(vec); | |
308 | count += hweight32(*reg); | |
309 | } | |
310 | ||
8680b94b MT |
311 | return count; |
312 | } | |
313 | ||
a20ed54d YZ |
314 | void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir) |
315 | { | |
316 | u32 i, pir_val; | |
317 | struct kvm_lapic *apic = vcpu->arch.apic; | |
318 | ||
319 | for (i = 0; i <= 7; i++) { | |
320 | pir_val = xchg(&pir[i], 0); | |
321 | if (pir_val) | |
322 | *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val; | |
323 | } | |
324 | } | |
325 | EXPORT_SYMBOL_GPL(kvm_apic_update_irr); | |
326 | ||
11f5cc05 | 327 | static inline void apic_set_irr(int vec, struct kvm_lapic *apic) |
97222cc8 | 328 | { |
33e4c686 | 329 | apic->irr_pending = true; |
11f5cc05 | 330 | apic_set_vector(vec, apic->regs + APIC_IRR); |
97222cc8 ED |
331 | } |
332 | ||
33e4c686 | 333 | static inline int apic_search_irr(struct kvm_lapic *apic) |
97222cc8 | 334 | { |
33e4c686 | 335 | return find_highest_vector(apic->regs + APIC_IRR); |
97222cc8 ED |
336 | } |
337 | ||
338 | static inline int apic_find_highest_irr(struct kvm_lapic *apic) | |
339 | { | |
340 | int result; | |
341 | ||
c7c9c56c YZ |
342 | /* |
343 | * Note that irr_pending is just a hint. It will be always | |
344 | * true with virtual interrupt delivery enabled. | |
345 | */ | |
33e4c686 GN |
346 | if (!apic->irr_pending) |
347 | return -1; | |
348 | ||
5a71785d | 349 | kvm_x86_ops->sync_pir_to_irr(apic->vcpu); |
33e4c686 | 350 | result = apic_search_irr(apic); |
97222cc8 ED |
351 | ASSERT(result == -1 || result >= 16); |
352 | ||
353 | return result; | |
354 | } | |
355 | ||
33e4c686 GN |
356 | static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) |
357 | { | |
56cc2406 WL |
358 | struct kvm_vcpu *vcpu; |
359 | ||
360 | vcpu = apic->vcpu; | |
361 | ||
33e4c686 | 362 | apic_clear_vector(vec, apic->regs + APIC_IRR); |
56cc2406 WL |
363 | if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) |
364 | /* try to update RVI */ | |
365 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
366 | else { | |
367 | vec = apic_search_irr(apic); | |
368 | apic->irr_pending = (vec != -1); | |
369 | } | |
33e4c686 GN |
370 | } |
371 | ||
8680b94b MT |
372 | static inline void apic_set_isr(int vec, struct kvm_lapic *apic) |
373 | { | |
56cc2406 WL |
374 | struct kvm_vcpu *vcpu; |
375 | ||
376 | if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) | |
377 | return; | |
378 | ||
379 | vcpu = apic->vcpu; | |
fc57ac2c | 380 | |
8680b94b | 381 | /* |
56cc2406 WL |
382 | * With APIC virtualization enabled, all caching is disabled |
383 | * because the processor can modify ISR under the hood. Instead | |
384 | * just set SVI. | |
8680b94b | 385 | */ |
56cc2406 WL |
386 | if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) |
387 | kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec); | |
388 | else { | |
389 | ++apic->isr_count; | |
390 | BUG_ON(apic->isr_count > MAX_APIC_VECTOR); | |
391 | /* | |
392 | * ISR (in service register) bit is set when injecting an interrupt. | |
393 | * The highest vector is injected. Thus the latest bit set matches | |
394 | * the highest bit in ISR. | |
395 | */ | |
396 | apic->highest_isr_cache = vec; | |
397 | } | |
8680b94b MT |
398 | } |
399 | ||
fc57ac2c PB |
400 | static inline int apic_find_highest_isr(struct kvm_lapic *apic) |
401 | { | |
402 | int result; | |
403 | ||
404 | /* | |
405 | * Note that isr_count is always 1, and highest_isr_cache | |
406 | * is always -1, with APIC virtualization enabled. | |
407 | */ | |
408 | if (!apic->isr_count) | |
409 | return -1; | |
410 | if (likely(apic->highest_isr_cache != -1)) | |
411 | return apic->highest_isr_cache; | |
412 | ||
413 | result = find_highest_vector(apic->regs + APIC_ISR); | |
414 | ASSERT(result == -1 || result >= 16); | |
415 | ||
416 | return result; | |
417 | } | |
418 | ||
8680b94b MT |
419 | static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) |
420 | { | |
fc57ac2c PB |
421 | struct kvm_vcpu *vcpu; |
422 | if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) | |
423 | return; | |
424 | ||
425 | vcpu = apic->vcpu; | |
426 | ||
427 | /* | |
428 | * We do get here for APIC virtualization enabled if the guest | |
429 | * uses the Hyper-V APIC enlightenment. In this case we may need | |
430 | * to trigger a new interrupt delivery by writing the SVI field; | |
431 | * on the other hand isr_count and highest_isr_cache are unused | |
432 | * and must be left alone. | |
433 | */ | |
434 | if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) | |
435 | kvm_x86_ops->hwapic_isr_update(vcpu->kvm, | |
436 | apic_find_highest_isr(apic)); | |
437 | else { | |
8680b94b | 438 | --apic->isr_count; |
fc57ac2c PB |
439 | BUG_ON(apic->isr_count < 0); |
440 | apic->highest_isr_cache = -1; | |
441 | } | |
8680b94b MT |
442 | } |
443 | ||
6e5d865c YS |
444 | int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) |
445 | { | |
6e5d865c YS |
446 | int highest_irr; |
447 | ||
33e4c686 GN |
448 | /* This may race with setting of irr in __apic_accept_irq() and |
449 | * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq | |
450 | * will cause vmexit immediately and the value will be recalculated | |
451 | * on the next vmentry. | |
452 | */ | |
c48f1496 | 453 | if (!kvm_vcpu_has_lapic(vcpu)) |
6e5d865c | 454 | return 0; |
54e9818f | 455 | highest_irr = apic_find_highest_irr(vcpu->arch.apic); |
6e5d865c YS |
456 | |
457 | return highest_irr; | |
458 | } | |
6e5d865c | 459 | |
6da7e3f6 | 460 | static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, |
b4f2225c YZ |
461 | int vector, int level, int trig_mode, |
462 | unsigned long *dest_map); | |
6da7e3f6 | 463 | |
b4f2225c YZ |
464 | int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, |
465 | unsigned long *dest_map) | |
97222cc8 | 466 | { |
ad312c7c | 467 | struct kvm_lapic *apic = vcpu->arch.apic; |
8be5453f | 468 | |
58c2dde1 | 469 | return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, |
b4f2225c | 470 | irq->level, irq->trig_mode, dest_map); |
97222cc8 ED |
471 | } |
472 | ||
ae7a2a3f MT |
473 | static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val) |
474 | { | |
475 | ||
476 | return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, | |
477 | sizeof(val)); | |
478 | } | |
479 | ||
480 | static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val) | |
481 | { | |
482 | ||
483 | return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, | |
484 | sizeof(*val)); | |
485 | } | |
486 | ||
487 | static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu) | |
488 | { | |
489 | return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; | |
490 | } | |
491 | ||
492 | static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu) | |
493 | { | |
494 | u8 val; | |
495 | if (pv_eoi_get_user(vcpu, &val) < 0) | |
496 | apic_debug("Can't read EOI MSR value: 0x%llx\n", | |
96893977 | 497 | (unsigned long long)vcpu->arch.pv_eoi.msr_val); |
ae7a2a3f MT |
498 | return val & 0x1; |
499 | } | |
500 | ||
501 | static void pv_eoi_set_pending(struct kvm_vcpu *vcpu) | |
502 | { | |
503 | if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) { | |
504 | apic_debug("Can't set EOI MSR value: 0x%llx\n", | |
96893977 | 505 | (unsigned long long)vcpu->arch.pv_eoi.msr_val); |
ae7a2a3f MT |
506 | return; |
507 | } | |
508 | __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); | |
509 | } | |
510 | ||
511 | static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu) | |
512 | { | |
513 | if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) { | |
514 | apic_debug("Can't clear EOI MSR value: 0x%llx\n", | |
96893977 | 515 | (unsigned long long)vcpu->arch.pv_eoi.msr_val); |
ae7a2a3f MT |
516 | return; |
517 | } | |
518 | __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); | |
519 | } | |
520 | ||
cf9e65b7 YZ |
521 | void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr) |
522 | { | |
523 | struct kvm_lapic *apic = vcpu->arch.apic; | |
524 | int i; | |
525 | ||
526 | for (i = 0; i < 8; i++) | |
527 | apic_set_reg(apic, APIC_TMR + 0x10 * i, tmr[i]); | |
528 | } | |
529 | ||
97222cc8 ED |
530 | static void apic_update_ppr(struct kvm_lapic *apic) |
531 | { | |
3842d135 | 532 | u32 tpr, isrv, ppr, old_ppr; |
97222cc8 ED |
533 | int isr; |
534 | ||
c48f1496 GN |
535 | old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI); |
536 | tpr = kvm_apic_get_reg(apic, APIC_TASKPRI); | |
97222cc8 ED |
537 | isr = apic_find_highest_isr(apic); |
538 | isrv = (isr != -1) ? isr : 0; | |
539 | ||
540 | if ((tpr & 0xf0) >= (isrv & 0xf0)) | |
541 | ppr = tpr & 0xff; | |
542 | else | |
543 | ppr = isrv & 0xf0; | |
544 | ||
545 | apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", | |
546 | apic, ppr, isr, isrv); | |
547 | ||
3842d135 AK |
548 | if (old_ppr != ppr) { |
549 | apic_set_reg(apic, APIC_PROCPRI, ppr); | |
83bcacb1 AK |
550 | if (ppr < old_ppr) |
551 | kvm_make_request(KVM_REQ_EVENT, apic->vcpu); | |
3842d135 | 552 | } |
97222cc8 ED |
553 | } |
554 | ||
555 | static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) | |
556 | { | |
557 | apic_set_reg(apic, APIC_TASKPRI, tpr); | |
558 | apic_update_ppr(apic); | |
559 | } | |
560 | ||
561 | int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest) | |
562 | { | |
343f94fe | 563 | return dest == 0xff || kvm_apic_id(apic) == dest; |
97222cc8 ED |
564 | } |
565 | ||
566 | int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) | |
567 | { | |
568 | int result = 0; | |
0105d1a5 GN |
569 | u32 logical_id; |
570 | ||
571 | if (apic_x2apic_mode(apic)) { | |
c48f1496 | 572 | logical_id = kvm_apic_get_reg(apic, APIC_LDR); |
0105d1a5 GN |
573 | return logical_id & mda; |
574 | } | |
97222cc8 | 575 | |
c48f1496 | 576 | logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR)); |
97222cc8 | 577 | |
c48f1496 | 578 | switch (kvm_apic_get_reg(apic, APIC_DFR)) { |
97222cc8 ED |
579 | case APIC_DFR_FLAT: |
580 | if (logical_id & mda) | |
581 | result = 1; | |
582 | break; | |
583 | case APIC_DFR_CLUSTER: | |
584 | if (((logical_id >> 4) == (mda >> 0x4)) | |
585 | && (logical_id & mda & 0xf)) | |
586 | result = 1; | |
587 | break; | |
588 | default: | |
7712de87 | 589 | apic_debug("Bad DFR vcpu %d: %08x\n", |
c48f1496 | 590 | apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR)); |
97222cc8 ED |
591 | break; |
592 | } | |
593 | ||
594 | return result; | |
595 | } | |
596 | ||
343f94fe | 597 | int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, |
97222cc8 ED |
598 | int short_hand, int dest, int dest_mode) |
599 | { | |
600 | int result = 0; | |
ad312c7c | 601 | struct kvm_lapic *target = vcpu->arch.apic; |
97222cc8 ED |
602 | |
603 | apic_debug("target %p, source %p, dest 0x%x, " | |
343f94fe | 604 | "dest_mode 0x%x, short_hand 0x%x\n", |
97222cc8 ED |
605 | target, source, dest, dest_mode, short_hand); |
606 | ||
bd371396 | 607 | ASSERT(target); |
97222cc8 ED |
608 | switch (short_hand) { |
609 | case APIC_DEST_NOSHORT: | |
343f94fe | 610 | if (dest_mode == 0) |
97222cc8 | 611 | /* Physical mode. */ |
343f94fe GN |
612 | result = kvm_apic_match_physical_addr(target, dest); |
613 | else | |
97222cc8 ED |
614 | /* Logical mode. */ |
615 | result = kvm_apic_match_logical_addr(target, dest); | |
616 | break; | |
617 | case APIC_DEST_SELF: | |
343f94fe | 618 | result = (target == source); |
97222cc8 ED |
619 | break; |
620 | case APIC_DEST_ALLINC: | |
621 | result = 1; | |
622 | break; | |
623 | case APIC_DEST_ALLBUT: | |
343f94fe | 624 | result = (target != source); |
97222cc8 ED |
625 | break; |
626 | default: | |
7712de87 JK |
627 | apic_debug("kvm: apic: Bad dest shorthand value %x\n", |
628 | short_hand); | |
97222cc8 ED |
629 | break; |
630 | } | |
631 | ||
632 | return result; | |
633 | } | |
634 | ||
1e08ec4a | 635 | bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, |
b4f2225c | 636 | struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map) |
1e08ec4a GN |
637 | { |
638 | struct kvm_apic_map *map; | |
639 | unsigned long bitmap = 1; | |
640 | struct kvm_lapic **dst; | |
641 | int i; | |
642 | bool ret = false; | |
643 | ||
644 | *r = -1; | |
645 | ||
646 | if (irq->shorthand == APIC_DEST_SELF) { | |
b4f2225c | 647 | *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); |
1e08ec4a GN |
648 | return true; |
649 | } | |
650 | ||
651 | if (irq->shorthand) | |
652 | return false; | |
653 | ||
654 | rcu_read_lock(); | |
655 | map = rcu_dereference(kvm->arch.apic_map); | |
656 | ||
657 | if (!map) | |
658 | goto out; | |
659 | ||
660 | if (irq->dest_mode == 0) { /* physical mode */ | |
661 | if (irq->delivery_mode == APIC_DM_LOWEST || | |
662 | irq->dest_id == 0xff) | |
663 | goto out; | |
664 | dst = &map->phys_map[irq->dest_id & 0xff]; | |
665 | } else { | |
666 | u32 mda = irq->dest_id << (32 - map->ldr_bits); | |
667 | ||
668 | dst = map->logical_map[apic_cluster_id(map, mda)]; | |
669 | ||
670 | bitmap = apic_logical_id(map, mda); | |
671 | ||
672 | if (irq->delivery_mode == APIC_DM_LOWEST) { | |
673 | int l = -1; | |
674 | for_each_set_bit(i, &bitmap, 16) { | |
675 | if (!dst[i]) | |
676 | continue; | |
677 | if (l < 0) | |
678 | l = i; | |
679 | else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0) | |
680 | l = i; | |
681 | } | |
682 | ||
683 | bitmap = (l >= 0) ? 1 << l : 0; | |
684 | } | |
685 | } | |
686 | ||
687 | for_each_set_bit(i, &bitmap, 16) { | |
688 | if (!dst[i]) | |
689 | continue; | |
690 | if (*r < 0) | |
691 | *r = 0; | |
b4f2225c | 692 | *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); |
1e08ec4a GN |
693 | } |
694 | ||
695 | ret = true; | |
696 | out: | |
697 | rcu_read_unlock(); | |
698 | return ret; | |
699 | } | |
700 | ||
97222cc8 ED |
701 | /* |
702 | * Add a pending IRQ into lapic. | |
703 | * Return 1 if successfully added and 0 if discarded. | |
704 | */ | |
705 | static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |
b4f2225c YZ |
706 | int vector, int level, int trig_mode, |
707 | unsigned long *dest_map) | |
97222cc8 | 708 | { |
6da7e3f6 | 709 | int result = 0; |
c5ec1534 | 710 | struct kvm_vcpu *vcpu = apic->vcpu; |
97222cc8 ED |
711 | |
712 | switch (delivery_mode) { | |
97222cc8 | 713 | case APIC_DM_LOWEST: |
e1035715 GN |
714 | vcpu->arch.apic_arb_prio++; |
715 | case APIC_DM_FIXED: | |
97222cc8 ED |
716 | /* FIXME add logic for vcpu on reset */ |
717 | if (unlikely(!apic_enabled(apic))) | |
718 | break; | |
719 | ||
11f5cc05 JK |
720 | result = 1; |
721 | ||
b4f2225c YZ |
722 | if (dest_map) |
723 | __set_bit(vcpu->vcpu_id, dest_map); | |
a5d36f82 | 724 | |
11f5cc05 | 725 | if (kvm_x86_ops->deliver_posted_interrupt) |
5a71785d | 726 | kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); |
11f5cc05 JK |
727 | else { |
728 | apic_set_irr(vector, apic); | |
5a71785d YZ |
729 | |
730 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
731 | kvm_vcpu_kick(vcpu); | |
732 | } | |
5a71785d | 733 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, |
11f5cc05 | 734 | trig_mode, vector, false); |
97222cc8 ED |
735 | break; |
736 | ||
737 | case APIC_DM_REMRD: | |
24d2166b R |
738 | result = 1; |
739 | vcpu->arch.pv.pv_unhalted = 1; | |
740 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
741 | kvm_vcpu_kick(vcpu); | |
97222cc8 ED |
742 | break; |
743 | ||
744 | case APIC_DM_SMI: | |
7712de87 | 745 | apic_debug("Ignoring guest SMI\n"); |
97222cc8 | 746 | break; |
3419ffc8 | 747 | |
97222cc8 | 748 | case APIC_DM_NMI: |
6da7e3f6 | 749 | result = 1; |
3419ffc8 | 750 | kvm_inject_nmi(vcpu); |
26df99c6 | 751 | kvm_vcpu_kick(vcpu); |
97222cc8 ED |
752 | break; |
753 | ||
754 | case APIC_DM_INIT: | |
a52315e1 | 755 | if (!trig_mode || level) { |
6da7e3f6 | 756 | result = 1; |
66450a21 JK |
757 | /* assumes that there are only KVM_APIC_INIT/SIPI */ |
758 | apic->pending_events = (1UL << KVM_APIC_INIT); | |
759 | /* make sure pending_events is visible before sending | |
760 | * the request */ | |
761 | smp_wmb(); | |
3842d135 | 762 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
c5ec1534 HQ |
763 | kvm_vcpu_kick(vcpu); |
764 | } else { | |
1b10bf31 JK |
765 | apic_debug("Ignoring de-assert INIT to vcpu %d\n", |
766 | vcpu->vcpu_id); | |
c5ec1534 | 767 | } |
97222cc8 ED |
768 | break; |
769 | ||
770 | case APIC_DM_STARTUP: | |
1b10bf31 JK |
771 | apic_debug("SIPI to vcpu %d vector 0x%02x\n", |
772 | vcpu->vcpu_id, vector); | |
66450a21 JK |
773 | result = 1; |
774 | apic->sipi_vector = vector; | |
775 | /* make sure sipi_vector is visible for the receiver */ | |
776 | smp_wmb(); | |
777 | set_bit(KVM_APIC_SIPI, &apic->pending_events); | |
778 | kvm_make_request(KVM_REQ_EVENT, vcpu); | |
779 | kvm_vcpu_kick(vcpu); | |
97222cc8 ED |
780 | break; |
781 | ||
23930f95 JK |
782 | case APIC_DM_EXTINT: |
783 | /* | |
784 | * Should only be called by kvm_apic_local_deliver() with LVT0, | |
785 | * before NMI watchdog was enabled. Already handled by | |
786 | * kvm_apic_accept_pic_intr(). | |
787 | */ | |
788 | break; | |
789 | ||
97222cc8 ED |
790 | default: |
791 | printk(KERN_ERR "TODO: unsupported delivery mode %x\n", | |
792 | delivery_mode); | |
793 | break; | |
794 | } | |
795 | return result; | |
796 | } | |
797 | ||
e1035715 | 798 | int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2) |
8be5453f | 799 | { |
e1035715 | 800 | return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; |
8be5453f ZX |
801 | } |
802 | ||
c7c9c56c YZ |
803 | static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector) |
804 | { | |
805 | if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) && | |
806 | kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) { | |
807 | int trigger_mode; | |
808 | if (apic_test_vector(vector, apic->regs + APIC_TMR)) | |
809 | trigger_mode = IOAPIC_LEVEL_TRIG; | |
810 | else | |
811 | trigger_mode = IOAPIC_EDGE_TRIG; | |
1fcc7890 | 812 | kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); |
c7c9c56c YZ |
813 | } |
814 | } | |
815 | ||
ae7a2a3f | 816 | static int apic_set_eoi(struct kvm_lapic *apic) |
97222cc8 ED |
817 | { |
818 | int vector = apic_find_highest_isr(apic); | |
ae7a2a3f MT |
819 | |
820 | trace_kvm_eoi(apic, vector); | |
821 | ||
97222cc8 ED |
822 | /* |
823 | * Not every write EOI will has corresponding ISR, | |
824 | * one example is when Kernel check timer on setup_IO_APIC | |
825 | */ | |
826 | if (vector == -1) | |
ae7a2a3f | 827 | return vector; |
97222cc8 | 828 | |
8680b94b | 829 | apic_clear_isr(vector, apic); |
97222cc8 ED |
830 | apic_update_ppr(apic); |
831 | ||
c7c9c56c | 832 | kvm_ioapic_send_eoi(apic, vector); |
3842d135 | 833 | kvm_make_request(KVM_REQ_EVENT, apic->vcpu); |
ae7a2a3f | 834 | return vector; |
97222cc8 ED |
835 | } |
836 | ||
c7c9c56c YZ |
837 | /* |
838 | * this interface assumes a trap-like exit, which has already finished | |
839 | * desired side effect including vISR and vPPR update. | |
840 | */ | |
841 | void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector) | |
842 | { | |
843 | struct kvm_lapic *apic = vcpu->arch.apic; | |
844 | ||
845 | trace_kvm_eoi(apic, vector); | |
846 | ||
847 | kvm_ioapic_send_eoi(apic, vector); | |
848 | kvm_make_request(KVM_REQ_EVENT, apic->vcpu); | |
849 | } | |
850 | EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated); | |
851 | ||
97222cc8 ED |
852 | static void apic_send_ipi(struct kvm_lapic *apic) |
853 | { | |
c48f1496 GN |
854 | u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR); |
855 | u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2); | |
58c2dde1 | 856 | struct kvm_lapic_irq irq; |
97222cc8 | 857 | |
58c2dde1 GN |
858 | irq.vector = icr_low & APIC_VECTOR_MASK; |
859 | irq.delivery_mode = icr_low & APIC_MODE_MASK; | |
860 | irq.dest_mode = icr_low & APIC_DEST_MASK; | |
861 | irq.level = icr_low & APIC_INT_ASSERT; | |
862 | irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; | |
863 | irq.shorthand = icr_low & APIC_SHORT_MASK; | |
0105d1a5 GN |
864 | if (apic_x2apic_mode(apic)) |
865 | irq.dest_id = icr_high; | |
866 | else | |
867 | irq.dest_id = GET_APIC_DEST_FIELD(icr_high); | |
97222cc8 | 868 | |
1000ff8d GN |
869 | trace_kvm_apic_ipi(icr_low, irq.dest_id); |
870 | ||
97222cc8 ED |
871 | apic_debug("icr_high 0x%x, icr_low 0x%x, " |
872 | "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " | |
873 | "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n", | |
9b5843dd | 874 | icr_high, icr_low, irq.shorthand, irq.dest_id, |
58c2dde1 GN |
875 | irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, |
876 | irq.vector); | |
877 | ||
b4f2225c | 878 | kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); |
97222cc8 ED |
879 | } |
880 | ||
881 | static u32 apic_get_tmcct(struct kvm_lapic *apic) | |
882 | { | |
b682b814 MT |
883 | ktime_t remaining; |
884 | s64 ns; | |
9da8f4e8 | 885 | u32 tmcct; |
97222cc8 ED |
886 | |
887 | ASSERT(apic != NULL); | |
888 | ||
9da8f4e8 | 889 | /* if initial count is 0, current count should also be 0 */ |
b963a22e AH |
890 | if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || |
891 | apic->lapic_timer.period == 0) | |
9da8f4e8 KP |
892 | return 0; |
893 | ||
ace15464 | 894 | remaining = hrtimer_get_remaining(&apic->lapic_timer.timer); |
b682b814 MT |
895 | if (ktime_to_ns(remaining) < 0) |
896 | remaining = ktime_set(0, 0); | |
897 | ||
d3c7b77d MT |
898 | ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); |
899 | tmcct = div64_u64(ns, | |
900 | (APIC_BUS_CYCLE_NS * apic->divide_count)); | |
97222cc8 ED |
901 | |
902 | return tmcct; | |
903 | } | |
904 | ||
b209749f AK |
905 | static void __report_tpr_access(struct kvm_lapic *apic, bool write) |
906 | { | |
907 | struct kvm_vcpu *vcpu = apic->vcpu; | |
908 | struct kvm_run *run = vcpu->run; | |
909 | ||
a8eeb04a | 910 | kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu); |
5fdbf976 | 911 | run->tpr_access.rip = kvm_rip_read(vcpu); |
b209749f AK |
912 | run->tpr_access.is_write = write; |
913 | } | |
914 | ||
915 | static inline void report_tpr_access(struct kvm_lapic *apic, bool write) | |
916 | { | |
917 | if (apic->vcpu->arch.tpr_access_reporting) | |
918 | __report_tpr_access(apic, write); | |
919 | } | |
920 | ||
97222cc8 ED |
921 | static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) |
922 | { | |
923 | u32 val = 0; | |
924 | ||
925 | if (offset >= LAPIC_MMIO_LENGTH) | |
926 | return 0; | |
927 | ||
928 | switch (offset) { | |
0105d1a5 GN |
929 | case APIC_ID: |
930 | if (apic_x2apic_mode(apic)) | |
931 | val = kvm_apic_id(apic); | |
932 | else | |
933 | val = kvm_apic_id(apic) << 24; | |
934 | break; | |
97222cc8 | 935 | case APIC_ARBPRI: |
7712de87 | 936 | apic_debug("Access APIC ARBPRI register which is for P6\n"); |
97222cc8 ED |
937 | break; |
938 | ||
939 | case APIC_TMCCT: /* Timer CCR */ | |
a3e06bbe LJ |
940 | if (apic_lvtt_tscdeadline(apic)) |
941 | return 0; | |
942 | ||
97222cc8 ED |
943 | val = apic_get_tmcct(apic); |
944 | break; | |
4a4541a4 AK |
945 | case APIC_PROCPRI: |
946 | apic_update_ppr(apic); | |
c48f1496 | 947 | val = kvm_apic_get_reg(apic, offset); |
4a4541a4 | 948 | break; |
b209749f AK |
949 | case APIC_TASKPRI: |
950 | report_tpr_access(apic, false); | |
951 | /* fall thru */ | |
97222cc8 | 952 | default: |
c48f1496 | 953 | val = kvm_apic_get_reg(apic, offset); |
97222cc8 ED |
954 | break; |
955 | } | |
956 | ||
957 | return val; | |
958 | } | |
959 | ||
d76685c4 GH |
960 | static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev) |
961 | { | |
962 | return container_of(dev, struct kvm_lapic, dev); | |
963 | } | |
964 | ||
0105d1a5 GN |
965 | static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len, |
966 | void *data) | |
97222cc8 | 967 | { |
97222cc8 ED |
968 | unsigned char alignment = offset & 0xf; |
969 | u32 result; | |
d5b0b5b1 | 970 | /* this bitmask has a bit cleared for each reserved register */ |
0105d1a5 | 971 | static const u64 rmask = 0x43ff01ffffffe70cULL; |
97222cc8 ED |
972 | |
973 | if ((alignment + len) > 4) { | |
4088bb3c GN |
974 | apic_debug("KVM_APIC_READ: alignment error %x %d\n", |
975 | offset, len); | |
0105d1a5 | 976 | return 1; |
97222cc8 | 977 | } |
0105d1a5 GN |
978 | |
979 | if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) { | |
4088bb3c GN |
980 | apic_debug("KVM_APIC_READ: read reserved register %x\n", |
981 | offset); | |
0105d1a5 GN |
982 | return 1; |
983 | } | |
984 | ||
97222cc8 ED |
985 | result = __apic_read(apic, offset & ~0xf); |
986 | ||
229456fc MT |
987 | trace_kvm_apic_read(offset, result); |
988 | ||
97222cc8 ED |
989 | switch (len) { |
990 | case 1: | |
991 | case 2: | |
992 | case 4: | |
993 | memcpy(data, (char *)&result + alignment, len); | |
994 | break; | |
995 | default: | |
996 | printk(KERN_ERR "Local APIC read with len = %x, " | |
997 | "should be 1,2, or 4 instead\n", len); | |
998 | break; | |
999 | } | |
bda9020e | 1000 | return 0; |
97222cc8 ED |
1001 | } |
1002 | ||
0105d1a5 GN |
1003 | static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr) |
1004 | { | |
c48f1496 | 1005 | return kvm_apic_hw_enabled(apic) && |
0105d1a5 GN |
1006 | addr >= apic->base_address && |
1007 | addr < apic->base_address + LAPIC_MMIO_LENGTH; | |
1008 | } | |
1009 | ||
1010 | static int apic_mmio_read(struct kvm_io_device *this, | |
1011 | gpa_t address, int len, void *data) | |
1012 | { | |
1013 | struct kvm_lapic *apic = to_lapic(this); | |
1014 | u32 offset = address - apic->base_address; | |
1015 | ||
1016 | if (!apic_mmio_in_range(apic, address)) | |
1017 | return -EOPNOTSUPP; | |
1018 | ||
1019 | apic_reg_read(apic, offset, len, data); | |
1020 | ||
1021 | return 0; | |
1022 | } | |
1023 | ||
97222cc8 ED |
1024 | static void update_divide_count(struct kvm_lapic *apic) |
1025 | { | |
1026 | u32 tmp1, tmp2, tdcr; | |
1027 | ||
c48f1496 | 1028 | tdcr = kvm_apic_get_reg(apic, APIC_TDCR); |
97222cc8 ED |
1029 | tmp1 = tdcr & 0xf; |
1030 | tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1; | |
d3c7b77d | 1031 | apic->divide_count = 0x1 << (tmp2 & 0x7); |
97222cc8 ED |
1032 | |
1033 | apic_debug("timer divide count is 0x%x\n", | |
9b5843dd | 1034 | apic->divide_count); |
97222cc8 ED |
1035 | } |
1036 | ||
1037 | static void start_apic_timer(struct kvm_lapic *apic) | |
1038 | { | |
a3e06bbe | 1039 | ktime_t now; |
d3c7b77d | 1040 | atomic_set(&apic->lapic_timer.pending, 0); |
0b975a3c | 1041 | |
a3e06bbe | 1042 | if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { |
d5b0b5b1 | 1043 | /* lapic timer in oneshot or periodic mode */ |
a3e06bbe | 1044 | now = apic->lapic_timer.timer.base->get_time(); |
c48f1496 | 1045 | apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT) |
a3e06bbe LJ |
1046 | * APIC_BUS_CYCLE_NS * apic->divide_count; |
1047 | ||
1048 | if (!apic->lapic_timer.period) | |
1049 | return; | |
1050 | /* | |
1051 | * Do not allow the guest to program periodic timers with small | |
1052 | * interval, since the hrtimers are not throttled by the host | |
1053 | * scheduler. | |
1054 | */ | |
1055 | if (apic_lvtt_period(apic)) { | |
1056 | s64 min_period = min_timer_period_us * 1000LL; | |
1057 | ||
1058 | if (apic->lapic_timer.period < min_period) { | |
1059 | pr_info_ratelimited( | |
1060 | "kvm: vcpu %i: requested %lld ns " | |
1061 | "lapic timer period limited to %lld ns\n", | |
1062 | apic->vcpu->vcpu_id, | |
1063 | apic->lapic_timer.period, min_period); | |
1064 | apic->lapic_timer.period = min_period; | |
1065 | } | |
9bc5791d | 1066 | } |
0b975a3c | 1067 | |
a3e06bbe LJ |
1068 | hrtimer_start(&apic->lapic_timer.timer, |
1069 | ktime_add_ns(now, apic->lapic_timer.period), | |
1070 | HRTIMER_MODE_ABS); | |
97222cc8 | 1071 | |
a3e06bbe | 1072 | apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" |
97222cc8 ED |
1073 | PRIx64 ", " |
1074 | "timer initial count 0x%x, period %lldns, " | |
b8688d51 | 1075 | "expire @ 0x%016" PRIx64 ".\n", __func__, |
97222cc8 | 1076 | APIC_BUS_CYCLE_NS, ktime_to_ns(now), |
c48f1496 | 1077 | kvm_apic_get_reg(apic, APIC_TMICT), |
d3c7b77d | 1078 | apic->lapic_timer.period, |
97222cc8 | 1079 | ktime_to_ns(ktime_add_ns(now, |
d3c7b77d | 1080 | apic->lapic_timer.period))); |
a3e06bbe LJ |
1081 | } else if (apic_lvtt_tscdeadline(apic)) { |
1082 | /* lapic timer in tsc deadline mode */ | |
1083 | u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; | |
1084 | u64 ns = 0; | |
1085 | struct kvm_vcpu *vcpu = apic->vcpu; | |
cc578287 | 1086 | unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; |
a3e06bbe LJ |
1087 | unsigned long flags; |
1088 | ||
1089 | if (unlikely(!tscdeadline || !this_tsc_khz)) | |
1090 | return; | |
1091 | ||
1092 | local_irq_save(flags); | |
1093 | ||
1094 | now = apic->lapic_timer.timer.base->get_time(); | |
886b470c | 1095 | guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); |
a3e06bbe LJ |
1096 | if (likely(tscdeadline > guest_tsc)) { |
1097 | ns = (tscdeadline - guest_tsc) * 1000000ULL; | |
1098 | do_div(ns, this_tsc_khz); | |
1099 | } | |
1100 | hrtimer_start(&apic->lapic_timer.timer, | |
1101 | ktime_add_ns(now, ns), HRTIMER_MODE_ABS); | |
1102 | ||
1103 | local_irq_restore(flags); | |
1104 | } | |
97222cc8 ED |
1105 | } |
1106 | ||
cc6e462c JK |
1107 | static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) |
1108 | { | |
c48f1496 | 1109 | int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0)); |
cc6e462c JK |
1110 | |
1111 | if (apic_lvt_nmi_mode(lvt0_val)) { | |
1112 | if (!nmi_wd_enabled) { | |
1113 | apic_debug("Receive NMI setting on APIC_LVT0 " | |
1114 | "for cpu %d\n", apic->vcpu->vcpu_id); | |
1115 | apic->vcpu->kvm->arch.vapics_in_nmi_mode++; | |
1116 | } | |
1117 | } else if (nmi_wd_enabled) | |
1118 | apic->vcpu->kvm->arch.vapics_in_nmi_mode--; | |
1119 | } | |
1120 | ||
0105d1a5 | 1121 | static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) |
97222cc8 | 1122 | { |
0105d1a5 | 1123 | int ret = 0; |
97222cc8 | 1124 | |
0105d1a5 | 1125 | trace_kvm_apic_write(reg, val); |
97222cc8 | 1126 | |
0105d1a5 | 1127 | switch (reg) { |
97222cc8 | 1128 | case APIC_ID: /* Local APIC ID */ |
0105d1a5 | 1129 | if (!apic_x2apic_mode(apic)) |
1e08ec4a | 1130 | kvm_apic_set_id(apic, val >> 24); |
0105d1a5 GN |
1131 | else |
1132 | ret = 1; | |
97222cc8 ED |
1133 | break; |
1134 | ||
1135 | case APIC_TASKPRI: | |
b209749f | 1136 | report_tpr_access(apic, true); |
97222cc8 ED |
1137 | apic_set_tpr(apic, val & 0xff); |
1138 | break; | |
1139 | ||
1140 | case APIC_EOI: | |
1141 | apic_set_eoi(apic); | |
1142 | break; | |
1143 | ||
1144 | case APIC_LDR: | |
0105d1a5 | 1145 | if (!apic_x2apic_mode(apic)) |
1e08ec4a | 1146 | kvm_apic_set_ldr(apic, val & APIC_LDR_MASK); |
0105d1a5 GN |
1147 | else |
1148 | ret = 1; | |
97222cc8 ED |
1149 | break; |
1150 | ||
1151 | case APIC_DFR: | |
1e08ec4a | 1152 | if (!apic_x2apic_mode(apic)) { |
0105d1a5 | 1153 | apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); |
1e08ec4a GN |
1154 | recalculate_apic_map(apic->vcpu->kvm); |
1155 | } else | |
0105d1a5 | 1156 | ret = 1; |
97222cc8 ED |
1157 | break; |
1158 | ||
fc61b800 GN |
1159 | case APIC_SPIV: { |
1160 | u32 mask = 0x3ff; | |
c48f1496 | 1161 | if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI) |
fc61b800 | 1162 | mask |= APIC_SPIV_DIRECTED_EOI; |
f8c1ea10 | 1163 | apic_set_spiv(apic, val & mask); |
97222cc8 ED |
1164 | if (!(val & APIC_SPIV_APIC_ENABLED)) { |
1165 | int i; | |
1166 | u32 lvt_val; | |
1167 | ||
1168 | for (i = 0; i < APIC_LVT_NUM; i++) { | |
c48f1496 | 1169 | lvt_val = kvm_apic_get_reg(apic, |
97222cc8 ED |
1170 | APIC_LVTT + 0x10 * i); |
1171 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, | |
1172 | lvt_val | APIC_LVT_MASKED); | |
1173 | } | |
d3c7b77d | 1174 | atomic_set(&apic->lapic_timer.pending, 0); |
97222cc8 ED |
1175 | |
1176 | } | |
1177 | break; | |
fc61b800 | 1178 | } |
97222cc8 ED |
1179 | case APIC_ICR: |
1180 | /* No delay here, so we always clear the pending bit */ | |
1181 | apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); | |
1182 | apic_send_ipi(apic); | |
1183 | break; | |
1184 | ||
1185 | case APIC_ICR2: | |
0105d1a5 GN |
1186 | if (!apic_x2apic_mode(apic)) |
1187 | val &= 0xff000000; | |
1188 | apic_set_reg(apic, APIC_ICR2, val); | |
97222cc8 ED |
1189 | break; |
1190 | ||
23930f95 | 1191 | case APIC_LVT0: |
cc6e462c | 1192 | apic_manage_nmi_watchdog(apic, val); |
97222cc8 ED |
1193 | case APIC_LVTTHMR: |
1194 | case APIC_LVTPC: | |
97222cc8 ED |
1195 | case APIC_LVT1: |
1196 | case APIC_LVTERR: | |
1197 | /* TODO: Check vector */ | |
c48f1496 | 1198 | if (!kvm_apic_sw_enabled(apic)) |
97222cc8 ED |
1199 | val |= APIC_LVT_MASKED; |
1200 | ||
0105d1a5 GN |
1201 | val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4]; |
1202 | apic_set_reg(apic, reg, val); | |
97222cc8 ED |
1203 | |
1204 | break; | |
1205 | ||
a3e06bbe | 1206 | case APIC_LVTT: |
c48f1496 | 1207 | if ((kvm_apic_get_reg(apic, APIC_LVTT) & |
a3e06bbe LJ |
1208 | apic->lapic_timer.timer_mode_mask) != |
1209 | (val & apic->lapic_timer.timer_mode_mask)) | |
1210 | hrtimer_cancel(&apic->lapic_timer.timer); | |
1211 | ||
c48f1496 | 1212 | if (!kvm_apic_sw_enabled(apic)) |
a3e06bbe LJ |
1213 | val |= APIC_LVT_MASKED; |
1214 | val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); | |
1215 | apic_set_reg(apic, APIC_LVTT, val); | |
1216 | break; | |
1217 | ||
97222cc8 | 1218 | case APIC_TMICT: |
a3e06bbe LJ |
1219 | if (apic_lvtt_tscdeadline(apic)) |
1220 | break; | |
1221 | ||
d3c7b77d | 1222 | hrtimer_cancel(&apic->lapic_timer.timer); |
97222cc8 ED |
1223 | apic_set_reg(apic, APIC_TMICT, val); |
1224 | start_apic_timer(apic); | |
0105d1a5 | 1225 | break; |
97222cc8 ED |
1226 | |
1227 | case APIC_TDCR: | |
1228 | if (val & 4) | |
7712de87 | 1229 | apic_debug("KVM_WRITE:TDCR %x\n", val); |
97222cc8 ED |
1230 | apic_set_reg(apic, APIC_TDCR, val); |
1231 | update_divide_count(apic); | |
1232 | break; | |
1233 | ||
0105d1a5 GN |
1234 | case APIC_ESR: |
1235 | if (apic_x2apic_mode(apic) && val != 0) { | |
7712de87 | 1236 | apic_debug("KVM_WRITE:ESR not zero %x\n", val); |
0105d1a5 GN |
1237 | ret = 1; |
1238 | } | |
1239 | break; | |
1240 | ||
1241 | case APIC_SELF_IPI: | |
1242 | if (apic_x2apic_mode(apic)) { | |
1243 | apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff)); | |
1244 | } else | |
1245 | ret = 1; | |
1246 | break; | |
97222cc8 | 1247 | default: |
0105d1a5 | 1248 | ret = 1; |
97222cc8 ED |
1249 | break; |
1250 | } | |
0105d1a5 GN |
1251 | if (ret) |
1252 | apic_debug("Local APIC Write to read-only register %x\n", reg); | |
1253 | return ret; | |
1254 | } | |
1255 | ||
1256 | static int apic_mmio_write(struct kvm_io_device *this, | |
1257 | gpa_t address, int len, const void *data) | |
1258 | { | |
1259 | struct kvm_lapic *apic = to_lapic(this); | |
1260 | unsigned int offset = address - apic->base_address; | |
1261 | u32 val; | |
1262 | ||
1263 | if (!apic_mmio_in_range(apic, address)) | |
1264 | return -EOPNOTSUPP; | |
1265 | ||
1266 | /* | |
1267 | * APIC register must be aligned on 128-bits boundary. | |
1268 | * 32/64/128 bits registers must be accessed thru 32 bits. | |
1269 | * Refer SDM 8.4.1 | |
1270 | */ | |
1271 | if (len != 4 || (offset & 0xf)) { | |
1272 | /* Don't shout loud, $infamous_os would cause only noise. */ | |
1273 | apic_debug("apic write: bad size=%d %lx\n", len, (long)address); | |
756975bb | 1274 | return 0; |
0105d1a5 GN |
1275 | } |
1276 | ||
1277 | val = *(u32*)data; | |
1278 | ||
1279 | /* too common printing */ | |
1280 | if (offset != APIC_EOI) | |
1281 | apic_debug("%s: offset 0x%x with length 0x%x, and value is " | |
1282 | "0x%x\n", __func__, offset, len, val); | |
1283 | ||
1284 | apic_reg_write(apic, offset & 0xff0, val); | |
1285 | ||
bda9020e | 1286 | return 0; |
97222cc8 ED |
1287 | } |
1288 | ||
58fbbf26 KT |
1289 | void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu) |
1290 | { | |
c48f1496 | 1291 | if (kvm_vcpu_has_lapic(vcpu)) |
58fbbf26 KT |
1292 | apic_reg_write(vcpu->arch.apic, APIC_EOI, 0); |
1293 | } | |
1294 | EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi); | |
1295 | ||
83d4c286 YZ |
1296 | /* emulate APIC access in a trap manner */ |
1297 | void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset) | |
1298 | { | |
1299 | u32 val = 0; | |
1300 | ||
1301 | /* hw has done the conditional check and inst decode */ | |
1302 | offset &= 0xff0; | |
1303 | ||
1304 | apic_reg_read(vcpu->arch.apic, offset, 4, &val); | |
1305 | ||
1306 | /* TODO: optimize to just emulate side effect w/o one more write */ | |
1307 | apic_reg_write(vcpu->arch.apic, offset, val); | |
1308 | } | |
1309 | EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode); | |
1310 | ||
d589444e | 1311 | void kvm_free_lapic(struct kvm_vcpu *vcpu) |
97222cc8 | 1312 | { |
f8c1ea10 GN |
1313 | struct kvm_lapic *apic = vcpu->arch.apic; |
1314 | ||
ad312c7c | 1315 | if (!vcpu->arch.apic) |
97222cc8 ED |
1316 | return; |
1317 | ||
f8c1ea10 | 1318 | hrtimer_cancel(&apic->lapic_timer.timer); |
97222cc8 | 1319 | |
c5cc421b GN |
1320 | if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) |
1321 | static_key_slow_dec_deferred(&apic_hw_disabled); | |
1322 | ||
c48f1496 | 1323 | if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED)) |
f8c1ea10 | 1324 | static_key_slow_dec_deferred(&apic_sw_disabled); |
97222cc8 | 1325 | |
f8c1ea10 GN |
1326 | if (apic->regs) |
1327 | free_page((unsigned long)apic->regs); | |
1328 | ||
1329 | kfree(apic); | |
97222cc8 ED |
1330 | } |
1331 | ||
1332 | /* | |
1333 | *---------------------------------------------------------------------- | |
1334 | * LAPIC interface | |
1335 | *---------------------------------------------------------------------- | |
1336 | */ | |
1337 | ||
a3e06bbe LJ |
1338 | u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) |
1339 | { | |
1340 | struct kvm_lapic *apic = vcpu->arch.apic; | |
a3e06bbe | 1341 | |
c48f1496 | 1342 | if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || |
54e9818f | 1343 | apic_lvtt_period(apic)) |
a3e06bbe LJ |
1344 | return 0; |
1345 | ||
1346 | return apic->lapic_timer.tscdeadline; | |
1347 | } | |
1348 | ||
1349 | void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) | |
1350 | { | |
1351 | struct kvm_lapic *apic = vcpu->arch.apic; | |
a3e06bbe | 1352 | |
c48f1496 | 1353 | if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) || |
54e9818f | 1354 | apic_lvtt_period(apic)) |
a3e06bbe LJ |
1355 | return; |
1356 | ||
1357 | hrtimer_cancel(&apic->lapic_timer.timer); | |
fae0ba21 NA |
1358 | /* Inject here so clearing tscdeadline won't override new value */ |
1359 | if (apic_has_pending_timer(vcpu)) | |
1360 | kvm_inject_apic_timer_irqs(vcpu); | |
a3e06bbe LJ |
1361 | apic->lapic_timer.tscdeadline = data; |
1362 | start_apic_timer(apic); | |
1363 | } | |
1364 | ||
97222cc8 ED |
1365 | void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) |
1366 | { | |
ad312c7c | 1367 | struct kvm_lapic *apic = vcpu->arch.apic; |
97222cc8 | 1368 | |
c48f1496 | 1369 | if (!kvm_vcpu_has_lapic(vcpu)) |
97222cc8 | 1370 | return; |
54e9818f | 1371 | |
b93463aa | 1372 | apic_set_tpr(apic, ((cr8 & 0x0f) << 4) |
c48f1496 | 1373 | | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4)); |
97222cc8 ED |
1374 | } |
1375 | ||
1376 | u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) | |
1377 | { | |
97222cc8 ED |
1378 | u64 tpr; |
1379 | ||
c48f1496 | 1380 | if (!kvm_vcpu_has_lapic(vcpu)) |
97222cc8 | 1381 | return 0; |
54e9818f | 1382 | |
c48f1496 | 1383 | tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI); |
97222cc8 ED |
1384 | |
1385 | return (tpr & 0xf0) >> 4; | |
1386 | } | |
1387 | ||
1388 | void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |
1389 | { | |
8d14695f | 1390 | u64 old_value = vcpu->arch.apic_base; |
ad312c7c | 1391 | struct kvm_lapic *apic = vcpu->arch.apic; |
97222cc8 ED |
1392 | |
1393 | if (!apic) { | |
1394 | value |= MSR_IA32_APICBASE_BSP; | |
ad312c7c | 1395 | vcpu->arch.apic_base = value; |
97222cc8 ED |
1396 | return; |
1397 | } | |
c5af89b6 | 1398 | |
e66d2ae7 JK |
1399 | if (!kvm_vcpu_is_bsp(apic->vcpu)) |
1400 | value &= ~MSR_IA32_APICBASE_BSP; | |
1401 | vcpu->arch.apic_base = value; | |
1402 | ||
c5cc421b | 1403 | /* update jump label if enable bit changes */ |
0dce7cd6 | 1404 | if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) { |
c5cc421b GN |
1405 | if (value & MSR_IA32_APICBASE_ENABLE) |
1406 | static_key_slow_dec_deferred(&apic_hw_disabled); | |
1407 | else | |
1408 | static_key_slow_inc(&apic_hw_disabled.key); | |
1e08ec4a | 1409 | recalculate_apic_map(vcpu->kvm); |
c5cc421b GN |
1410 | } |
1411 | ||
8d14695f YZ |
1412 | if ((old_value ^ value) & X2APIC_ENABLE) { |
1413 | if (value & X2APIC_ENABLE) { | |
1414 | u32 id = kvm_apic_id(apic); | |
1415 | u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); | |
1416 | kvm_apic_set_ldr(apic, ldr); | |
1417 | kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true); | |
1418 | } else | |
1419 | kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false); | |
0105d1a5 | 1420 | } |
8d14695f | 1421 | |
ad312c7c | 1422 | apic->base_address = apic->vcpu->arch.apic_base & |
97222cc8 ED |
1423 | MSR_IA32_APICBASE_BASE; |
1424 | ||
1425 | /* with FSB delivery interrupt, we can restart APIC functionality */ | |
1426 | apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " | |
ad312c7c | 1427 | "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address); |
97222cc8 ED |
1428 | |
1429 | } | |
1430 | ||
c5ec1534 | 1431 | void kvm_lapic_reset(struct kvm_vcpu *vcpu) |
97222cc8 ED |
1432 | { |
1433 | struct kvm_lapic *apic; | |
1434 | int i; | |
1435 | ||
b8688d51 | 1436 | apic_debug("%s\n", __func__); |
97222cc8 ED |
1437 | |
1438 | ASSERT(vcpu); | |
ad312c7c | 1439 | apic = vcpu->arch.apic; |
97222cc8 ED |
1440 | ASSERT(apic != NULL); |
1441 | ||
1442 | /* Stop the timer in case it's a reset to an active apic */ | |
d3c7b77d | 1443 | hrtimer_cancel(&apic->lapic_timer.timer); |
97222cc8 | 1444 | |
1e08ec4a | 1445 | kvm_apic_set_id(apic, vcpu->vcpu_id); |
fc61b800 | 1446 | kvm_apic_set_version(apic->vcpu); |
97222cc8 ED |
1447 | |
1448 | for (i = 0; i < APIC_LVT_NUM; i++) | |
1449 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); | |
40487c68 QH |
1450 | apic_set_reg(apic, APIC_LVT0, |
1451 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); | |
97222cc8 ED |
1452 | |
1453 | apic_set_reg(apic, APIC_DFR, 0xffffffffU); | |
f8c1ea10 | 1454 | apic_set_spiv(apic, 0xff); |
97222cc8 | 1455 | apic_set_reg(apic, APIC_TASKPRI, 0); |
1e08ec4a | 1456 | kvm_apic_set_ldr(apic, 0); |
97222cc8 ED |
1457 | apic_set_reg(apic, APIC_ESR, 0); |
1458 | apic_set_reg(apic, APIC_ICR, 0); | |
1459 | apic_set_reg(apic, APIC_ICR2, 0); | |
1460 | apic_set_reg(apic, APIC_TDCR, 0); | |
1461 | apic_set_reg(apic, APIC_TMICT, 0); | |
1462 | for (i = 0; i < 8; i++) { | |
1463 | apic_set_reg(apic, APIC_IRR + 0x10 * i, 0); | |
1464 | apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); | |
1465 | apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); | |
1466 | } | |
c7c9c56c YZ |
1467 | apic->irr_pending = kvm_apic_vid_enabled(vcpu->kvm); |
1468 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm); | |
8680b94b | 1469 | apic->highest_isr_cache = -1; |
b33ac88b | 1470 | update_divide_count(apic); |
d3c7b77d | 1471 | atomic_set(&apic->lapic_timer.pending, 0); |
c5af89b6 | 1472 | if (kvm_vcpu_is_bsp(vcpu)) |
5dbc8f3f GN |
1473 | kvm_lapic_set_base(vcpu, |
1474 | vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); | |
ae7a2a3f | 1475 | vcpu->arch.pv_eoi.msr_val = 0; |
97222cc8 ED |
1476 | apic_update_ppr(apic); |
1477 | ||
e1035715 | 1478 | vcpu->arch.apic_arb_prio = 0; |
41383771 | 1479 | vcpu->arch.apic_attention = 0; |
e1035715 | 1480 | |
98eff52a | 1481 | apic_debug("%s: vcpu=%p, id=%d, base_msr=" |
b8688d51 | 1482 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, |
97222cc8 | 1483 | vcpu, kvm_apic_id(apic), |
ad312c7c | 1484 | vcpu->arch.apic_base, apic->base_address); |
97222cc8 ED |
1485 | } |
1486 | ||
97222cc8 ED |
1487 | /* |
1488 | *---------------------------------------------------------------------- | |
1489 | * timer interface | |
1490 | *---------------------------------------------------------------------- | |
1491 | */ | |
1b9778da | 1492 | |
2a6eac96 | 1493 | static bool lapic_is_periodic(struct kvm_lapic *apic) |
97222cc8 | 1494 | { |
d3c7b77d | 1495 | return apic_lvtt_period(apic); |
97222cc8 ED |
1496 | } |
1497 | ||
3d80840d MT |
1498 | int apic_has_pending_timer(struct kvm_vcpu *vcpu) |
1499 | { | |
54e9818f | 1500 | struct kvm_lapic *apic = vcpu->arch.apic; |
3d80840d | 1501 | |
c48f1496 | 1502 | if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) && |
54e9818f GN |
1503 | apic_lvt_enabled(apic, APIC_LVTT)) |
1504 | return atomic_read(&apic->lapic_timer.pending); | |
3d80840d MT |
1505 | |
1506 | return 0; | |
1507 | } | |
1508 | ||
89342082 | 1509 | int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type) |
1b9778da | 1510 | { |
c48f1496 | 1511 | u32 reg = kvm_apic_get_reg(apic, lvt_type); |
23930f95 | 1512 | int vector, mode, trig_mode; |
23930f95 | 1513 | |
c48f1496 | 1514 | if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) { |
23930f95 JK |
1515 | vector = reg & APIC_VECTOR_MASK; |
1516 | mode = reg & APIC_MODE_MASK; | |
1517 | trig_mode = reg & APIC_LVT_LEVEL_TRIGGER; | |
b4f2225c YZ |
1518 | return __apic_accept_irq(apic, mode, vector, 1, trig_mode, |
1519 | NULL); | |
23930f95 JK |
1520 | } |
1521 | return 0; | |
1522 | } | |
1b9778da | 1523 | |
8fdb2351 | 1524 | void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu) |
23930f95 | 1525 | { |
8fdb2351 JK |
1526 | struct kvm_lapic *apic = vcpu->arch.apic; |
1527 | ||
1528 | if (apic) | |
1529 | kvm_apic_local_deliver(apic, APIC_LVT0); | |
1b9778da ED |
1530 | } |
1531 | ||
d76685c4 GH |
1532 | static const struct kvm_io_device_ops apic_mmio_ops = { |
1533 | .read = apic_mmio_read, | |
1534 | .write = apic_mmio_write, | |
d76685c4 GH |
1535 | }; |
1536 | ||
e9d90d47 AK |
1537 | static enum hrtimer_restart apic_timer_fn(struct hrtimer *data) |
1538 | { | |
1539 | struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); | |
2a6eac96 AK |
1540 | struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer); |
1541 | struct kvm_vcpu *vcpu = apic->vcpu; | |
e9d90d47 AK |
1542 | wait_queue_head_t *q = &vcpu->wq; |
1543 | ||
1544 | /* | |
1545 | * There is a race window between reading and incrementing, but we do | |
1546 | * not care about potentially losing timer events in the !reinject | |
1547 | * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked | |
1548 | * in vcpu_enter_guest. | |
1549 | */ | |
2a6eac96 | 1550 | if (!atomic_read(&ktimer->pending)) { |
e9d90d47 AK |
1551 | atomic_inc(&ktimer->pending); |
1552 | /* FIXME: this code should not know anything about vcpus */ | |
1553 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | |
1554 | } | |
1555 | ||
1556 | if (waitqueue_active(q)) | |
1557 | wake_up_interruptible(q); | |
1558 | ||
2a6eac96 | 1559 | if (lapic_is_periodic(apic)) { |
e9d90d47 AK |
1560 | hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); |
1561 | return HRTIMER_RESTART; | |
1562 | } else | |
1563 | return HRTIMER_NORESTART; | |
1564 | } | |
1565 | ||
97222cc8 ED |
1566 | int kvm_create_lapic(struct kvm_vcpu *vcpu) |
1567 | { | |
1568 | struct kvm_lapic *apic; | |
1569 | ||
1570 | ASSERT(vcpu != NULL); | |
1571 | apic_debug("apic_init %d\n", vcpu->vcpu_id); | |
1572 | ||
1573 | apic = kzalloc(sizeof(*apic), GFP_KERNEL); | |
1574 | if (!apic) | |
1575 | goto nomem; | |
1576 | ||
ad312c7c | 1577 | vcpu->arch.apic = apic; |
97222cc8 | 1578 | |
afc20184 TY |
1579 | apic->regs = (void *)get_zeroed_page(GFP_KERNEL); |
1580 | if (!apic->regs) { | |
97222cc8 ED |
1581 | printk(KERN_ERR "malloc apic regs error for vcpu %x\n", |
1582 | vcpu->vcpu_id); | |
d589444e | 1583 | goto nomem_free_apic; |
97222cc8 | 1584 | } |
97222cc8 ED |
1585 | apic->vcpu = vcpu; |
1586 | ||
d3c7b77d MT |
1587 | hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, |
1588 | HRTIMER_MODE_ABS); | |
e9d90d47 | 1589 | apic->lapic_timer.timer.function = apic_timer_fn; |
d3c7b77d | 1590 | |
c5cc421b GN |
1591 | /* |
1592 | * APIC is created enabled. This will prevent kvm_lapic_set_base from | |
1593 | * thinking that APIC satet has changed. | |
1594 | */ | |
1595 | vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; | |
6aed64a8 GN |
1596 | kvm_lapic_set_base(vcpu, |
1597 | APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE); | |
97222cc8 | 1598 | |
f8c1ea10 | 1599 | static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ |
c5ec1534 | 1600 | kvm_lapic_reset(vcpu); |
d76685c4 | 1601 | kvm_iodevice_init(&apic->dev, &apic_mmio_ops); |
97222cc8 ED |
1602 | |
1603 | return 0; | |
d589444e RR |
1604 | nomem_free_apic: |
1605 | kfree(apic); | |
97222cc8 | 1606 | nomem: |
97222cc8 ED |
1607 | return -ENOMEM; |
1608 | } | |
97222cc8 ED |
1609 | |
1610 | int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) | |
1611 | { | |
ad312c7c | 1612 | struct kvm_lapic *apic = vcpu->arch.apic; |
97222cc8 ED |
1613 | int highest_irr; |
1614 | ||
c48f1496 | 1615 | if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic)) |
97222cc8 ED |
1616 | return -1; |
1617 | ||
6e5d865c | 1618 | apic_update_ppr(apic); |
97222cc8 ED |
1619 | highest_irr = apic_find_highest_irr(apic); |
1620 | if ((highest_irr == -1) || | |
c48f1496 | 1621 | ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI))) |
97222cc8 ED |
1622 | return -1; |
1623 | return highest_irr; | |
1624 | } | |
1625 | ||
40487c68 QH |
1626 | int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) |
1627 | { | |
c48f1496 | 1628 | u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0); |
40487c68 QH |
1629 | int r = 0; |
1630 | ||
c48f1496 | 1631 | if (!kvm_apic_hw_enabled(vcpu->arch.apic)) |
e7dca5c0 CL |
1632 | r = 1; |
1633 | if ((lvt0 & APIC_LVT_MASKED) == 0 && | |
1634 | GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) | |
1635 | r = 1; | |
40487c68 QH |
1636 | return r; |
1637 | } | |
1638 | ||
1b9778da ED |
1639 | void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) |
1640 | { | |
ad312c7c | 1641 | struct kvm_lapic *apic = vcpu->arch.apic; |
1b9778da | 1642 | |
c48f1496 | 1643 | if (!kvm_vcpu_has_lapic(vcpu)) |
54e9818f GN |
1644 | return; |
1645 | ||
1646 | if (atomic_read(&apic->lapic_timer.pending) > 0) { | |
f1ed0450 | 1647 | kvm_apic_local_deliver(apic, APIC_LVTT); |
fae0ba21 NA |
1648 | if (apic_lvtt_tscdeadline(apic)) |
1649 | apic->lapic_timer.tscdeadline = 0; | |
f1ed0450 | 1650 | atomic_set(&apic->lapic_timer.pending, 0); |
1b9778da ED |
1651 | } |
1652 | } | |
1653 | ||
97222cc8 ED |
1654 | int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) |
1655 | { | |
1656 | int vector = kvm_apic_has_interrupt(vcpu); | |
ad312c7c | 1657 | struct kvm_lapic *apic = vcpu->arch.apic; |
97222cc8 ED |
1658 | |
1659 | if (vector == -1) | |
1660 | return -1; | |
1661 | ||
56cc2406 WL |
1662 | /* |
1663 | * We get here even with APIC virtualization enabled, if doing | |
1664 | * nested virtualization and L1 runs with the "acknowledge interrupt | |
1665 | * on exit" mode. Then we cannot inject the interrupt via RVI, | |
1666 | * because the process would deliver it through the IDT. | |
1667 | */ | |
1668 | ||
8680b94b | 1669 | apic_set_isr(vector, apic); |
97222cc8 ED |
1670 | apic_update_ppr(apic); |
1671 | apic_clear_irr(vector, apic); | |
1672 | return vector; | |
1673 | } | |
96ad2cc6 | 1674 | |
64eb0620 GN |
1675 | void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, |
1676 | struct kvm_lapic_state *s) | |
96ad2cc6 | 1677 | { |
ad312c7c | 1678 | struct kvm_lapic *apic = vcpu->arch.apic; |
96ad2cc6 | 1679 | |
5dbc8f3f | 1680 | kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); |
64eb0620 GN |
1681 | /* set SPIV separately to get count of SW disabled APICs right */ |
1682 | apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); | |
1683 | memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); | |
1e08ec4a GN |
1684 | /* call kvm_apic_set_id() to put apic into apic_map */ |
1685 | kvm_apic_set_id(apic, kvm_apic_id(apic)); | |
fc61b800 GN |
1686 | kvm_apic_set_version(vcpu); |
1687 | ||
96ad2cc6 | 1688 | apic_update_ppr(apic); |
d3c7b77d | 1689 | hrtimer_cancel(&apic->lapic_timer.timer); |
96ad2cc6 ED |
1690 | update_divide_count(apic); |
1691 | start_apic_timer(apic); | |
6e24a6ef | 1692 | apic->irr_pending = true; |
c7c9c56c YZ |
1693 | apic->isr_count = kvm_apic_vid_enabled(vcpu->kvm) ? |
1694 | 1 : count_vectors(apic->regs + APIC_ISR); | |
8680b94b | 1695 | apic->highest_isr_cache = -1; |
c7c9c56c | 1696 | kvm_x86_ops->hwapic_isr_update(vcpu->kvm, apic_find_highest_isr(apic)); |
3842d135 | 1697 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
10606919 | 1698 | kvm_rtc_eoi_tracking_restore_one(vcpu); |
96ad2cc6 | 1699 | } |
a3d7f85f | 1700 | |
2f52d58c | 1701 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) |
a3d7f85f | 1702 | { |
a3d7f85f ED |
1703 | struct hrtimer *timer; |
1704 | ||
c48f1496 | 1705 | if (!kvm_vcpu_has_lapic(vcpu)) |
a3d7f85f ED |
1706 | return; |
1707 | ||
54e9818f | 1708 | timer = &vcpu->arch.apic->lapic_timer.timer; |
a3d7f85f | 1709 | if (hrtimer_cancel(timer)) |
beb20d52 | 1710 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS); |
a3d7f85f | 1711 | } |
b93463aa | 1712 | |
ae7a2a3f MT |
1713 | /* |
1714 | * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt | |
1715 | * | |
1716 | * Detect whether guest triggered PV EOI since the | |
1717 | * last entry. If yes, set EOI on guests's behalf. | |
1718 | * Clear PV EOI in guest memory in any case. | |
1719 | */ | |
1720 | static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu, | |
1721 | struct kvm_lapic *apic) | |
1722 | { | |
1723 | bool pending; | |
1724 | int vector; | |
1725 | /* | |
1726 | * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host | |
1727 | * and KVM_PV_EOI_ENABLED in guest memory as follows: | |
1728 | * | |
1729 | * KVM_APIC_PV_EOI_PENDING is unset: | |
1730 | * -> host disabled PV EOI. | |
1731 | * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set: | |
1732 | * -> host enabled PV EOI, guest did not execute EOI yet. | |
1733 | * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset: | |
1734 | * -> host enabled PV EOI, guest executed EOI. | |
1735 | */ | |
1736 | BUG_ON(!pv_eoi_enabled(vcpu)); | |
1737 | pending = pv_eoi_get_pending(vcpu); | |
1738 | /* | |
1739 | * Clear pending bit in any case: it will be set again on vmentry. | |
1740 | * While this might not be ideal from performance point of view, | |
1741 | * this makes sure pv eoi is only enabled when we know it's safe. | |
1742 | */ | |
1743 | pv_eoi_clr_pending(vcpu); | |
1744 | if (pending) | |
1745 | return; | |
1746 | vector = apic_set_eoi(apic); | |
1747 | trace_kvm_pv_eoi(apic, vector); | |
1748 | } | |
1749 | ||
b93463aa AK |
1750 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) |
1751 | { | |
1752 | u32 data; | |
b93463aa | 1753 | |
ae7a2a3f MT |
1754 | if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) |
1755 | apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); | |
1756 | ||
41383771 | 1757 | if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) |
b93463aa AK |
1758 | return; |
1759 | ||
fda4e2e8 AH |
1760 | kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, |
1761 | sizeof(u32)); | |
b93463aa AK |
1762 | |
1763 | apic_set_tpr(vcpu->arch.apic, data & 0xff); | |
1764 | } | |
1765 | ||
ae7a2a3f MT |
1766 | /* |
1767 | * apic_sync_pv_eoi_to_guest - called before vmentry | |
1768 | * | |
1769 | * Detect whether it's safe to enable PV EOI and | |
1770 | * if yes do so. | |
1771 | */ | |
1772 | static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu, | |
1773 | struct kvm_lapic *apic) | |
1774 | { | |
1775 | if (!pv_eoi_enabled(vcpu) || | |
1776 | /* IRR set or many bits in ISR: could be nested. */ | |
1777 | apic->irr_pending || | |
1778 | /* Cache not set: could be safe but we don't bother. */ | |
1779 | apic->highest_isr_cache == -1 || | |
1780 | /* Need EOI to update ioapic. */ | |
1781 | kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) { | |
1782 | /* | |
1783 | * PV EOI was disabled by apic_sync_pv_eoi_from_guest | |
1784 | * so we need not do anything here. | |
1785 | */ | |
1786 | return; | |
1787 | } | |
1788 | ||
1789 | pv_eoi_set_pending(apic->vcpu); | |
1790 | } | |
1791 | ||
b93463aa AK |
1792 | void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) |
1793 | { | |
1794 | u32 data, tpr; | |
1795 | int max_irr, max_isr; | |
ae7a2a3f | 1796 | struct kvm_lapic *apic = vcpu->arch.apic; |
b93463aa | 1797 | |
ae7a2a3f MT |
1798 | apic_sync_pv_eoi_to_guest(vcpu, apic); |
1799 | ||
41383771 | 1800 | if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) |
b93463aa AK |
1801 | return; |
1802 | ||
c48f1496 | 1803 | tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; |
b93463aa AK |
1804 | max_irr = apic_find_highest_irr(apic); |
1805 | if (max_irr < 0) | |
1806 | max_irr = 0; | |
1807 | max_isr = apic_find_highest_isr(apic); | |
1808 | if (max_isr < 0) | |
1809 | max_isr = 0; | |
1810 | data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); | |
1811 | ||
fda4e2e8 AH |
1812 | kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, |
1813 | sizeof(u32)); | |
b93463aa AK |
1814 | } |
1815 | ||
fda4e2e8 | 1816 | int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) |
b93463aa | 1817 | { |
fda4e2e8 AH |
1818 | if (vapic_addr) { |
1819 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, | |
1820 | &vcpu->arch.apic->vapic_cache, | |
1821 | vapic_addr, sizeof(u32))) | |
1822 | return -EINVAL; | |
41383771 | 1823 | __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); |
fda4e2e8 | 1824 | } else { |
41383771 | 1825 | __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); |
fda4e2e8 AH |
1826 | } |
1827 | ||
1828 | vcpu->arch.apic->vapic_addr = vapic_addr; | |
1829 | return 0; | |
b93463aa | 1830 | } |
0105d1a5 GN |
1831 | |
1832 | int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |
1833 | { | |
1834 | struct kvm_lapic *apic = vcpu->arch.apic; | |
1835 | u32 reg = (msr - APIC_BASE_MSR) << 4; | |
1836 | ||
1837 | if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) | |
1838 | return 1; | |
1839 | ||
1840 | /* if this is ICR write vector before command */ | |
1841 | if (msr == 0x830) | |
1842 | apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); | |
1843 | return apic_reg_write(apic, reg, (u32)data); | |
1844 | } | |
1845 | ||
1846 | int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) | |
1847 | { | |
1848 | struct kvm_lapic *apic = vcpu->arch.apic; | |
1849 | u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; | |
1850 | ||
1851 | if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic)) | |
1852 | return 1; | |
1853 | ||
1854 | if (apic_reg_read(apic, reg, 4, &low)) | |
1855 | return 1; | |
1856 | if (msr == 0x830) | |
1857 | apic_reg_read(apic, APIC_ICR2, 4, &high); | |
1858 | ||
1859 | *data = (((u64)high) << 32) | low; | |
1860 | ||
1861 | return 0; | |
1862 | } | |
10388a07 GN |
1863 | |
1864 | int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data) | |
1865 | { | |
1866 | struct kvm_lapic *apic = vcpu->arch.apic; | |
1867 | ||
c48f1496 | 1868 | if (!kvm_vcpu_has_lapic(vcpu)) |
10388a07 GN |
1869 | return 1; |
1870 | ||
1871 | /* if this is ICR write vector before command */ | |
1872 | if (reg == APIC_ICR) | |
1873 | apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32)); | |
1874 | return apic_reg_write(apic, reg, (u32)data); | |
1875 | } | |
1876 | ||
1877 | int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data) | |
1878 | { | |
1879 | struct kvm_lapic *apic = vcpu->arch.apic; | |
1880 | u32 low, high = 0; | |
1881 | ||
c48f1496 | 1882 | if (!kvm_vcpu_has_lapic(vcpu)) |
10388a07 GN |
1883 | return 1; |
1884 | ||
1885 | if (apic_reg_read(apic, reg, 4, &low)) | |
1886 | return 1; | |
1887 | if (reg == APIC_ICR) | |
1888 | apic_reg_read(apic, APIC_ICR2, 4, &high); | |
1889 | ||
1890 | *data = (((u64)high) << 32) | low; | |
1891 | ||
1892 | return 0; | |
1893 | } | |
ae7a2a3f MT |
1894 | |
1895 | int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) | |
1896 | { | |
1897 | u64 addr = data & ~KVM_MSR_ENABLED; | |
1898 | if (!IS_ALIGNED(addr, 4)) | |
1899 | return 1; | |
1900 | ||
1901 | vcpu->arch.pv_eoi.msr_val = data; | |
1902 | if (!pv_eoi_enabled(vcpu)) | |
1903 | return 0; | |
1904 | return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, | |
8f964525 | 1905 | addr, sizeof(u8)); |
ae7a2a3f | 1906 | } |
c5cc421b | 1907 | |
66450a21 JK |
1908 | void kvm_apic_accept_events(struct kvm_vcpu *vcpu) |
1909 | { | |
1910 | struct kvm_lapic *apic = vcpu->arch.apic; | |
1911 | unsigned int sipi_vector; | |
299018f4 | 1912 | unsigned long pe; |
66450a21 | 1913 | |
299018f4 | 1914 | if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) |
66450a21 JK |
1915 | return; |
1916 | ||
299018f4 GN |
1917 | pe = xchg(&apic->pending_events, 0); |
1918 | ||
1919 | if (test_bit(KVM_APIC_INIT, &pe)) { | |
66450a21 JK |
1920 | kvm_lapic_reset(vcpu); |
1921 | kvm_vcpu_reset(vcpu); | |
1922 | if (kvm_vcpu_is_bsp(apic->vcpu)) | |
1923 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | |
1924 | else | |
1925 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; | |
1926 | } | |
299018f4 | 1927 | if (test_bit(KVM_APIC_SIPI, &pe) && |
66450a21 JK |
1928 | vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { |
1929 | /* evaluate pending_events before reading the vector */ | |
1930 | smp_rmb(); | |
1931 | sipi_vector = apic->sipi_vector; | |
98eff52a | 1932 | apic_debug("vcpu %d received sipi with vector # %x\n", |
66450a21 JK |
1933 | vcpu->vcpu_id, sipi_vector); |
1934 | kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); | |
1935 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | |
1936 | } | |
1937 | } | |
1938 | ||
c5cc421b GN |
1939 | void kvm_lapic_init(void) |
1940 | { | |
1941 | /* do not patch jump label more than once per second */ | |
1942 | jump_label_rate_limit(&apic_hw_disabled, HZ); | |
f8c1ea10 | 1943 | jump_label_rate_limit(&apic_sw_disabled, HZ); |
c5cc421b | 1944 | } |