]> Git Repo - linux.git/blob - arch/x86/kvm/lapic.c
taprio: Handle short intervals and large packets
[linux.git] / arch / x86 / kvm / lapic.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4  * Local APIC virtualization
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2007 Novell
8  * Copyright (C) 2007 Intel
9  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Dor Laor <[email protected]>
13  *   Gregory Haskins <[email protected]>
14  *   Yaozu (Eddie) Dong <[email protected]>
15  *
16  * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17  */
18
19 #include <linux/kvm_host.h>
20 #include <linux/kvm.h>
21 #include <linux/mm.h>
22 #include <linux/highmem.h>
23 #include <linux/smp.h>
24 #include <linux/hrtimer.h>
25 #include <linux/io.h>
26 #include <linux/export.h>
27 #include <linux/math64.h>
28 #include <linux/slab.h>
29 #include <asm/processor.h>
30 #include <asm/msr.h>
31 #include <asm/page.h>
32 #include <asm/current.h>
33 #include <asm/apicdef.h>
34 #include <asm/delay.h>
35 #include <linux/atomic.h>
36 #include <linux/jump_label.h>
37 #include "kvm_cache_regs.h"
38 #include "irq.h"
39 #include "ioapic.h"
40 #include "trace.h"
41 #include "x86.h"
42 #include "cpuid.h"
43 #include "hyperv.h"
44
45 #ifndef CONFIG_X86_64
46 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
47 #else
48 #define mod_64(x, y) ((x) % (y))
49 #endif
50
51 #define PRId64 "d"
52 #define PRIx64 "llx"
53 #define PRIu64 "u"
54 #define PRIo64 "o"
55
56 /* 14 is the version for Xeon and Pentium 8.4.8*/
57 #define APIC_VERSION                    (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
58 #define LAPIC_MMIO_LENGTH               (1 << 12)
59 /* followed define is not in apicdef.h */
60 #define MAX_APIC_VECTOR                 256
61 #define APIC_VECTORS_PER_REG            32
62
63 static bool lapic_timer_advance_dynamic __read_mostly;
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN  100     /* clock cycles */
65 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX  10000   /* clock cycles */
66 #define LAPIC_TIMER_ADVANCE_NS_INIT     1000
67 #define LAPIC_TIMER_ADVANCE_NS_MAX     5000
68 /* step-by-step approximation to mitigate fluctuation */
69 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
70
71 static inline int apic_test_vector(int vec, void *bitmap)
72 {
73         return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
74 }
75
76 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
77 {
78         struct kvm_lapic *apic = vcpu->arch.apic;
79
80         return apic_test_vector(vector, apic->regs + APIC_ISR) ||
81                 apic_test_vector(vector, apic->regs + APIC_IRR);
82 }
83
84 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
85 {
86         return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
87 }
88
89 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
90 {
91         return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
92 }
93
94 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
95 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
96
97 static inline int apic_enabled(struct kvm_lapic *apic)
98 {
99         return kvm_apic_sw_enabled(apic) &&     kvm_apic_hw_enabled(apic);
100 }
101
102 #define LVT_MASK        \
103         (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
104
105 #define LINT_MASK       \
106         (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
107          APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
108
109 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
110 {
111         return apic->vcpu->vcpu_id;
112 }
113
114 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
115 {
116         return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
117 }
118
119 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
120 {
121         return kvm_x86_ops.set_hv_timer
122                && !(kvm_mwait_in_guest(vcpu->kvm) ||
123                     kvm_can_post_timer_interrupt(vcpu));
124 }
125 EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
126
127 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
128 {
129         return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
130 }
131
132 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
133                 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
134         switch (map->mode) {
135         case KVM_APIC_MODE_X2APIC: {
136                 u32 offset = (dest_id >> 16) * 16;
137                 u32 max_apic_id = map->max_apic_id;
138
139                 if (offset <= max_apic_id) {
140                         u8 cluster_size = min(max_apic_id - offset + 1, 16U);
141
142                         offset = array_index_nospec(offset, map->max_apic_id + 1);
143                         *cluster = &map->phys_map[offset];
144                         *mask = dest_id & (0xffff >> (16 - cluster_size));
145                 } else {
146                         *mask = 0;
147                 }
148
149                 return true;
150                 }
151         case KVM_APIC_MODE_XAPIC_FLAT:
152                 *cluster = map->xapic_flat_map;
153                 *mask = dest_id & 0xff;
154                 return true;
155         case KVM_APIC_MODE_XAPIC_CLUSTER:
156                 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
157                 *mask = dest_id & 0xf;
158                 return true;
159         default:
160                 /* Not optimized. */
161                 return false;
162         }
163 }
164
165 static void kvm_apic_map_free(struct rcu_head *rcu)
166 {
167         struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
168
169         kvfree(map);
170 }
171
172 /*
173  * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
174  *
175  * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
176  * apic_map_lock_held.
177  */
178 enum {
179         CLEAN,
180         UPDATE_IN_PROGRESS,
181         DIRTY
182 };
183
184 void kvm_recalculate_apic_map(struct kvm *kvm)
185 {
186         struct kvm_apic_map *new, *old = NULL;
187         struct kvm_vcpu *vcpu;
188         int i;
189         u32 max_id = 255; /* enough space for any xAPIC ID */
190
191         /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
192         if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
193                 return;
194
195         mutex_lock(&kvm->arch.apic_map_lock);
196         /*
197          * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
198          * (if clean) or the APIC registers (if dirty).
199          */
200         if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
201                                    DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
202                 /* Someone else has updated the map. */
203                 mutex_unlock(&kvm->arch.apic_map_lock);
204                 return;
205         }
206
207         kvm_for_each_vcpu(i, vcpu, kvm)
208                 if (kvm_apic_present(vcpu))
209                         max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
210
211         new = kvzalloc(sizeof(struct kvm_apic_map) +
212                            sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
213                            GFP_KERNEL_ACCOUNT);
214
215         if (!new)
216                 goto out;
217
218         new->max_apic_id = max_id;
219
220         kvm_for_each_vcpu(i, vcpu, kvm) {
221                 struct kvm_lapic *apic = vcpu->arch.apic;
222                 struct kvm_lapic **cluster;
223                 u16 mask;
224                 u32 ldr;
225                 u8 xapic_id;
226                 u32 x2apic_id;
227
228                 if (!kvm_apic_present(vcpu))
229                         continue;
230
231                 xapic_id = kvm_xapic_id(apic);
232                 x2apic_id = kvm_x2apic_id(apic);
233
234                 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
235                 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
236                                 x2apic_id <= new->max_apic_id)
237                         new->phys_map[x2apic_id] = apic;
238                 /*
239                  * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
240                  * prevent them from masking VCPUs with APIC ID <= 0xff.
241                  */
242                 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
243                         new->phys_map[xapic_id] = apic;
244
245                 if (!kvm_apic_sw_enabled(apic))
246                         continue;
247
248                 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
249
250                 if (apic_x2apic_mode(apic)) {
251                         new->mode |= KVM_APIC_MODE_X2APIC;
252                 } else if (ldr) {
253                         ldr = GET_APIC_LOGICAL_ID(ldr);
254                         if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
255                                 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
256                         else
257                                 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
258                 }
259
260                 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
261                         continue;
262
263                 if (mask)
264                         cluster[ffs(mask) - 1] = apic;
265         }
266 out:
267         old = rcu_dereference_protected(kvm->arch.apic_map,
268                         lockdep_is_held(&kvm->arch.apic_map_lock));
269         rcu_assign_pointer(kvm->arch.apic_map, new);
270         /*
271          * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
272          * If another update has come in, leave it DIRTY.
273          */
274         atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
275                                UPDATE_IN_PROGRESS, CLEAN);
276         mutex_unlock(&kvm->arch.apic_map_lock);
277
278         if (old)
279                 call_rcu(&old->rcu, kvm_apic_map_free);
280
281         kvm_make_scan_ioapic_request(kvm);
282 }
283
284 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
285 {
286         bool enabled = val & APIC_SPIV_APIC_ENABLED;
287
288         kvm_lapic_set_reg(apic, APIC_SPIV, val);
289
290         if (enabled != apic->sw_enabled) {
291                 apic->sw_enabled = enabled;
292                 if (enabled)
293                         static_branch_slow_dec_deferred(&apic_sw_disabled);
294                 else
295                         static_branch_inc(&apic_sw_disabled.key);
296
297                 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
298         }
299 }
300
301 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
302 {
303         kvm_lapic_set_reg(apic, APIC_ID, id << 24);
304         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
305 }
306
307 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
308 {
309         kvm_lapic_set_reg(apic, APIC_LDR, id);
310         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
311 }
312
313 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
314 {
315         kvm_lapic_set_reg(apic, APIC_DFR, val);
316         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
317 }
318
319 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
320 {
321         return ((id >> 4) << 16) | (1 << (id & 0xf));
322 }
323
324 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
325 {
326         u32 ldr = kvm_apic_calc_x2apic_ldr(id);
327
328         WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
329
330         kvm_lapic_set_reg(apic, APIC_ID, id);
331         kvm_lapic_set_reg(apic, APIC_LDR, ldr);
332         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
333 }
334
335 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
336 {
337         return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
338 }
339
340 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
341 {
342         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
343 }
344
345 static inline int apic_lvtt_period(struct kvm_lapic *apic)
346 {
347         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
348 }
349
350 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
351 {
352         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
353 }
354
355 static inline int apic_lvt_nmi_mode(u32 lvt_val)
356 {
357         return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
358 }
359
360 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
361 {
362         struct kvm_lapic *apic = vcpu->arch.apic;
363         u32 v = APIC_VERSION;
364
365         if (!lapic_in_kernel(vcpu))
366                 return;
367
368         /*
369          * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
370          * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
371          * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
372          * version first and level-triggered interrupts never get EOIed in
373          * IOAPIC.
374          */
375         if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
376             !ioapic_in_kernel(vcpu->kvm))
377                 v |= APIC_LVR_DIRECTED_EOI;
378         kvm_lapic_set_reg(apic, APIC_LVR, v);
379 }
380
381 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
382         LVT_MASK ,      /* part LVTT mask, timer mode mask added at runtime */
383         LVT_MASK | APIC_MODE_MASK,      /* LVTTHMR */
384         LVT_MASK | APIC_MODE_MASK,      /* LVTPC */
385         LINT_MASK, LINT_MASK,   /* LVT0-1 */
386         LVT_MASK                /* LVTERR */
387 };
388
389 static int find_highest_vector(void *bitmap)
390 {
391         int vec;
392         u32 *reg;
393
394         for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
395              vec >= 0; vec -= APIC_VECTORS_PER_REG) {
396                 reg = bitmap + REG_POS(vec);
397                 if (*reg)
398                         return __fls(*reg) + vec;
399         }
400
401         return -1;
402 }
403
404 static u8 count_vectors(void *bitmap)
405 {
406         int vec;
407         u32 *reg;
408         u8 count = 0;
409
410         for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
411                 reg = bitmap + REG_POS(vec);
412                 count += hweight32(*reg);
413         }
414
415         return count;
416 }
417
418 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
419 {
420         u32 i, vec;
421         u32 pir_val, irr_val, prev_irr_val;
422         int max_updated_irr;
423
424         max_updated_irr = -1;
425         *max_irr = -1;
426
427         for (i = vec = 0; i <= 7; i++, vec += 32) {
428                 pir_val = READ_ONCE(pir[i]);
429                 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
430                 if (pir_val) {
431                         prev_irr_val = irr_val;
432                         irr_val |= xchg(&pir[i], 0);
433                         *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
434                         if (prev_irr_val != irr_val) {
435                                 max_updated_irr =
436                                         __fls(irr_val ^ prev_irr_val) + vec;
437                         }
438                 }
439                 if (irr_val)
440                         *max_irr = __fls(irr_val) + vec;
441         }
442
443         return ((max_updated_irr != -1) &&
444                 (max_updated_irr == *max_irr));
445 }
446 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
447
448 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
449 {
450         struct kvm_lapic *apic = vcpu->arch.apic;
451
452         return __kvm_apic_update_irr(pir, apic->regs, max_irr);
453 }
454 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
455
456 static inline int apic_search_irr(struct kvm_lapic *apic)
457 {
458         return find_highest_vector(apic->regs + APIC_IRR);
459 }
460
461 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
462 {
463         int result;
464
465         /*
466          * Note that irr_pending is just a hint. It will be always
467          * true with virtual interrupt delivery enabled.
468          */
469         if (!apic->irr_pending)
470                 return -1;
471
472         result = apic_search_irr(apic);
473         ASSERT(result == -1 || result >= 16);
474
475         return result;
476 }
477
478 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
479 {
480         struct kvm_vcpu *vcpu;
481
482         vcpu = apic->vcpu;
483
484         if (unlikely(vcpu->arch.apicv_active)) {
485                 /* need to update RVI */
486                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
487                 static_call(kvm_x86_hwapic_irr_update)(vcpu,
488                                 apic_find_highest_irr(apic));
489         } else {
490                 apic->irr_pending = false;
491                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
492                 if (apic_search_irr(apic) != -1)
493                         apic->irr_pending = true;
494         }
495 }
496
497 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
498 {
499         apic_clear_irr(vec, vcpu->arch.apic);
500 }
501 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
502
503 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
504 {
505         struct kvm_vcpu *vcpu;
506
507         if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
508                 return;
509
510         vcpu = apic->vcpu;
511
512         /*
513          * With APIC virtualization enabled, all caching is disabled
514          * because the processor can modify ISR under the hood.  Instead
515          * just set SVI.
516          */
517         if (unlikely(vcpu->arch.apicv_active))
518                 static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
519         else {
520                 ++apic->isr_count;
521                 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
522                 /*
523                  * ISR (in service register) bit is set when injecting an interrupt.
524                  * The highest vector is injected. Thus the latest bit set matches
525                  * the highest bit in ISR.
526                  */
527                 apic->highest_isr_cache = vec;
528         }
529 }
530
531 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
532 {
533         int result;
534
535         /*
536          * Note that isr_count is always 1, and highest_isr_cache
537          * is always -1, with APIC virtualization enabled.
538          */
539         if (!apic->isr_count)
540                 return -1;
541         if (likely(apic->highest_isr_cache != -1))
542                 return apic->highest_isr_cache;
543
544         result = find_highest_vector(apic->regs + APIC_ISR);
545         ASSERT(result == -1 || result >= 16);
546
547         return result;
548 }
549
550 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
551 {
552         struct kvm_vcpu *vcpu;
553         if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
554                 return;
555
556         vcpu = apic->vcpu;
557
558         /*
559          * We do get here for APIC virtualization enabled if the guest
560          * uses the Hyper-V APIC enlightenment.  In this case we may need
561          * to trigger a new interrupt delivery by writing the SVI field;
562          * on the other hand isr_count and highest_isr_cache are unused
563          * and must be left alone.
564          */
565         if (unlikely(vcpu->arch.apicv_active))
566                 static_call(kvm_x86_hwapic_isr_update)(vcpu,
567                                                 apic_find_highest_isr(apic));
568         else {
569                 --apic->isr_count;
570                 BUG_ON(apic->isr_count < 0);
571                 apic->highest_isr_cache = -1;
572         }
573 }
574
575 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
576 {
577         /* This may race with setting of irr in __apic_accept_irq() and
578          * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
579          * will cause vmexit immediately and the value will be recalculated
580          * on the next vmentry.
581          */
582         return apic_find_highest_irr(vcpu->arch.apic);
583 }
584 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
585
586 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
587                              int vector, int level, int trig_mode,
588                              struct dest_map *dest_map);
589
590 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
591                      struct dest_map *dest_map)
592 {
593         struct kvm_lapic *apic = vcpu->arch.apic;
594
595         return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
596                         irq->level, irq->trig_mode, dest_map);
597 }
598
599 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
600                          struct kvm_lapic_irq *irq, u32 min)
601 {
602         int i, count = 0;
603         struct kvm_vcpu *vcpu;
604
605         if (min > map->max_apic_id)
606                 return 0;
607
608         for_each_set_bit(i, ipi_bitmap,
609                 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
610                 if (map->phys_map[min + i]) {
611                         vcpu = map->phys_map[min + i]->vcpu;
612                         count += kvm_apic_set_irq(vcpu, irq, NULL);
613                 }
614         }
615
616         return count;
617 }
618
619 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
620                     unsigned long ipi_bitmap_high, u32 min,
621                     unsigned long icr, int op_64_bit)
622 {
623         struct kvm_apic_map *map;
624         struct kvm_lapic_irq irq = {0};
625         int cluster_size = op_64_bit ? 64 : 32;
626         int count;
627
628         if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
629                 return -KVM_EINVAL;
630
631         irq.vector = icr & APIC_VECTOR_MASK;
632         irq.delivery_mode = icr & APIC_MODE_MASK;
633         irq.level = (icr & APIC_INT_ASSERT) != 0;
634         irq.trig_mode = icr & APIC_INT_LEVELTRIG;
635
636         rcu_read_lock();
637         map = rcu_dereference(kvm->arch.apic_map);
638
639         count = -EOPNOTSUPP;
640         if (likely(map)) {
641                 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
642                 min += cluster_size;
643                 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
644         }
645
646         rcu_read_unlock();
647         return count;
648 }
649
650 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
651 {
652
653         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
654                                       sizeof(val));
655 }
656
657 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
658 {
659
660         return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
661                                       sizeof(*val));
662 }
663
664 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
665 {
666         return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
667 }
668
669 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
670 {
671         u8 val;
672         if (pv_eoi_get_user(vcpu, &val) < 0) {
673                 printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
674                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
675                 return false;
676         }
677         return val & KVM_PV_EOI_ENABLED;
678 }
679
680 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
681 {
682         if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
683                 printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
684                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
685                 return;
686         }
687         __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
688 }
689
690 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
691 {
692         if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
693                 printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
694                            (unsigned long long)vcpu->arch.pv_eoi.msr_val);
695                 return;
696         }
697         __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
698 }
699
700 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
701 {
702         int highest_irr;
703         if (apic->vcpu->arch.apicv_active)
704                 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
705         else
706                 highest_irr = apic_find_highest_irr(apic);
707         if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
708                 return -1;
709         return highest_irr;
710 }
711
712 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
713 {
714         u32 tpr, isrv, ppr, old_ppr;
715         int isr;
716
717         old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
718         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
719         isr = apic_find_highest_isr(apic);
720         isrv = (isr != -1) ? isr : 0;
721
722         if ((tpr & 0xf0) >= (isrv & 0xf0))
723                 ppr = tpr & 0xff;
724         else
725                 ppr = isrv & 0xf0;
726
727         *new_ppr = ppr;
728         if (old_ppr != ppr)
729                 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
730
731         return ppr < old_ppr;
732 }
733
734 static void apic_update_ppr(struct kvm_lapic *apic)
735 {
736         u32 ppr;
737
738         if (__apic_update_ppr(apic, &ppr) &&
739             apic_has_interrupt_for_ppr(apic, ppr) != -1)
740                 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
741 }
742
743 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
744 {
745         apic_update_ppr(vcpu->arch.apic);
746 }
747 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
748
749 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
750 {
751         kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
752         apic_update_ppr(apic);
753 }
754
755 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
756 {
757         return mda == (apic_x2apic_mode(apic) ?
758                         X2APIC_BROADCAST : APIC_BROADCAST);
759 }
760
761 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
762 {
763         if (kvm_apic_broadcast(apic, mda))
764                 return true;
765
766         if (apic_x2apic_mode(apic))
767                 return mda == kvm_x2apic_id(apic);
768
769         /*
770          * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
771          * it were in x2APIC mode.  Hotplugged VCPUs start in xAPIC mode and
772          * this allows unique addressing of VCPUs with APIC ID over 0xff.
773          * The 0xff condition is needed because writeable xAPIC ID.
774          */
775         if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
776                 return true;
777
778         return mda == kvm_xapic_id(apic);
779 }
780
781 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
782 {
783         u32 logical_id;
784
785         if (kvm_apic_broadcast(apic, mda))
786                 return true;
787
788         logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
789
790         if (apic_x2apic_mode(apic))
791                 return ((logical_id >> 16) == (mda >> 16))
792                        && (logical_id & mda & 0xffff) != 0;
793
794         logical_id = GET_APIC_LOGICAL_ID(logical_id);
795
796         switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
797         case APIC_DFR_FLAT:
798                 return (logical_id & mda) != 0;
799         case APIC_DFR_CLUSTER:
800                 return ((logical_id >> 4) == (mda >> 4))
801                        && (logical_id & mda & 0xf) != 0;
802         default:
803                 return false;
804         }
805 }
806
807 /* The KVM local APIC implementation has two quirks:
808  *
809  *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
810  *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
811  *    KVM doesn't do that aliasing.
812  *
813  *  - in-kernel IOAPIC messages have to be delivered directly to
814  *    x2APIC, because the kernel does not support interrupt remapping.
815  *    In order to support broadcast without interrupt remapping, x2APIC
816  *    rewrites the destination of non-IPI messages from APIC_BROADCAST
817  *    to X2APIC_BROADCAST.
818  *
819  * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
820  * important when userspace wants to use x2APIC-format MSIs, because
821  * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
822  */
823 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
824                 struct kvm_lapic *source, struct kvm_lapic *target)
825 {
826         bool ipi = source != NULL;
827
828         if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
829             !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
830                 return X2APIC_BROADCAST;
831
832         return dest_id;
833 }
834
835 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
836                            int shorthand, unsigned int dest, int dest_mode)
837 {
838         struct kvm_lapic *target = vcpu->arch.apic;
839         u32 mda = kvm_apic_mda(vcpu, dest, source, target);
840
841         ASSERT(target);
842         switch (shorthand) {
843         case APIC_DEST_NOSHORT:
844                 if (dest_mode == APIC_DEST_PHYSICAL)
845                         return kvm_apic_match_physical_addr(target, mda);
846                 else
847                         return kvm_apic_match_logical_addr(target, mda);
848         case APIC_DEST_SELF:
849                 return target == source;
850         case APIC_DEST_ALLINC:
851                 return true;
852         case APIC_DEST_ALLBUT:
853                 return target != source;
854         default:
855                 return false;
856         }
857 }
858 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
859
860 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
861                        const unsigned long *bitmap, u32 bitmap_size)
862 {
863         u32 mod;
864         int i, idx = -1;
865
866         mod = vector % dest_vcpus;
867
868         for (i = 0; i <= mod; i++) {
869                 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
870                 BUG_ON(idx == bitmap_size);
871         }
872
873         return idx;
874 }
875
876 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
877 {
878         if (!kvm->arch.disabled_lapic_found) {
879                 kvm->arch.disabled_lapic_found = true;
880                 printk(KERN_INFO
881                        "Disabled LAPIC found during irq injection\n");
882         }
883 }
884
885 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
886                 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
887 {
888         if (kvm->arch.x2apic_broadcast_quirk_disabled) {
889                 if ((irq->dest_id == APIC_BROADCAST &&
890                                 map->mode != KVM_APIC_MODE_X2APIC))
891                         return true;
892                 if (irq->dest_id == X2APIC_BROADCAST)
893                         return true;
894         } else {
895                 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
896                 if (irq->dest_id == (x2apic_ipi ?
897                                      X2APIC_BROADCAST : APIC_BROADCAST))
898                         return true;
899         }
900
901         return false;
902 }
903
904 /* Return true if the interrupt can be handled by using *bitmap as index mask
905  * for valid destinations in *dst array.
906  * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
907  * Note: we may have zero kvm_lapic destinations when we return true, which
908  * means that the interrupt should be dropped.  In this case, *bitmap would be
909  * zero and *dst undefined.
910  */
911 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
912                 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
913                 struct kvm_apic_map *map, struct kvm_lapic ***dst,
914                 unsigned long *bitmap)
915 {
916         int i, lowest;
917
918         if (irq->shorthand == APIC_DEST_SELF && src) {
919                 *dst = src;
920                 *bitmap = 1;
921                 return true;
922         } else if (irq->shorthand)
923                 return false;
924
925         if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
926                 return false;
927
928         if (irq->dest_mode == APIC_DEST_PHYSICAL) {
929                 if (irq->dest_id > map->max_apic_id) {
930                         *bitmap = 0;
931                 } else {
932                         u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
933                         *dst = &map->phys_map[dest_id];
934                         *bitmap = 1;
935                 }
936                 return true;
937         }
938
939         *bitmap = 0;
940         if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
941                                 (u16 *)bitmap))
942                 return false;
943
944         if (!kvm_lowest_prio_delivery(irq))
945                 return true;
946
947         if (!kvm_vector_hashing_enabled()) {
948                 lowest = -1;
949                 for_each_set_bit(i, bitmap, 16) {
950                         if (!(*dst)[i])
951                                 continue;
952                         if (lowest < 0)
953                                 lowest = i;
954                         else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
955                                                 (*dst)[lowest]->vcpu) < 0)
956                                 lowest = i;
957                 }
958         } else {
959                 if (!*bitmap)
960                         return true;
961
962                 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
963                                 bitmap, 16);
964
965                 if (!(*dst)[lowest]) {
966                         kvm_apic_disabled_lapic_found(kvm);
967                         *bitmap = 0;
968                         return true;
969                 }
970         }
971
972         *bitmap = (lowest >= 0) ? 1 << lowest : 0;
973
974         return true;
975 }
976
977 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
978                 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
979 {
980         struct kvm_apic_map *map;
981         unsigned long bitmap;
982         struct kvm_lapic **dst = NULL;
983         int i;
984         bool ret;
985
986         *r = -1;
987
988         if (irq->shorthand == APIC_DEST_SELF) {
989                 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
990                 return true;
991         }
992
993         rcu_read_lock();
994         map = rcu_dereference(kvm->arch.apic_map);
995
996         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
997         if (ret) {
998                 *r = 0;
999                 for_each_set_bit(i, &bitmap, 16) {
1000                         if (!dst[i])
1001                                 continue;
1002                         *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1003                 }
1004         }
1005
1006         rcu_read_unlock();
1007         return ret;
1008 }
1009
1010 /*
1011  * This routine tries to handle interrupts in posted mode, here is how
1012  * it deals with different cases:
1013  * - For single-destination interrupts, handle it in posted mode
1014  * - Else if vector hashing is enabled and it is a lowest-priority
1015  *   interrupt, handle it in posted mode and use the following mechanism
1016  *   to find the destination vCPU.
1017  *      1. For lowest-priority interrupts, store all the possible
1018  *         destination vCPUs in an array.
1019  *      2. Use "guest vector % max number of destination vCPUs" to find
1020  *         the right destination vCPU in the array for the lowest-priority
1021  *         interrupt.
1022  * - Otherwise, use remapped mode to inject the interrupt.
1023  */
1024 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1025                         struct kvm_vcpu **dest_vcpu)
1026 {
1027         struct kvm_apic_map *map;
1028         unsigned long bitmap;
1029         struct kvm_lapic **dst = NULL;
1030         bool ret = false;
1031
1032         if (irq->shorthand)
1033                 return false;
1034
1035         rcu_read_lock();
1036         map = rcu_dereference(kvm->arch.apic_map);
1037
1038         if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1039                         hweight16(bitmap) == 1) {
1040                 unsigned long i = find_first_bit(&bitmap, 16);
1041
1042                 if (dst[i]) {
1043                         *dest_vcpu = dst[i]->vcpu;
1044                         ret = true;
1045                 }
1046         }
1047
1048         rcu_read_unlock();
1049         return ret;
1050 }
1051
1052 /*
1053  * Add a pending IRQ into lapic.
1054  * Return 1 if successfully added and 0 if discarded.
1055  */
1056 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1057                              int vector, int level, int trig_mode,
1058                              struct dest_map *dest_map)
1059 {
1060         int result = 0;
1061         struct kvm_vcpu *vcpu = apic->vcpu;
1062
1063         trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1064                                   trig_mode, vector);
1065         switch (delivery_mode) {
1066         case APIC_DM_LOWEST:
1067                 vcpu->arch.apic_arb_prio++;
1068                 fallthrough;
1069         case APIC_DM_FIXED:
1070                 if (unlikely(trig_mode && !level))
1071                         break;
1072
1073                 /* FIXME add logic for vcpu on reset */
1074                 if (unlikely(!apic_enabled(apic)))
1075                         break;
1076
1077                 result = 1;
1078
1079                 if (dest_map) {
1080                         __set_bit(vcpu->vcpu_id, dest_map->map);
1081                         dest_map->vectors[vcpu->vcpu_id] = vector;
1082                 }
1083
1084                 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1085                         if (trig_mode)
1086                                 kvm_lapic_set_vector(vector,
1087                                                      apic->regs + APIC_TMR);
1088                         else
1089                                 kvm_lapic_clear_vector(vector,
1090                                                        apic->regs + APIC_TMR);
1091                 }
1092
1093                 if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
1094                         kvm_lapic_set_irr(vector, apic);
1095                         kvm_make_request(KVM_REQ_EVENT, vcpu);
1096                         kvm_vcpu_kick(vcpu);
1097                 }
1098                 break;
1099
1100         case APIC_DM_REMRD:
1101                 result = 1;
1102                 vcpu->arch.pv.pv_unhalted = 1;
1103                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1104                 kvm_vcpu_kick(vcpu);
1105                 break;
1106
1107         case APIC_DM_SMI:
1108                 result = 1;
1109                 kvm_make_request(KVM_REQ_SMI, vcpu);
1110                 kvm_vcpu_kick(vcpu);
1111                 break;
1112
1113         case APIC_DM_NMI:
1114                 result = 1;
1115                 kvm_inject_nmi(vcpu);
1116                 kvm_vcpu_kick(vcpu);
1117                 break;
1118
1119         case APIC_DM_INIT:
1120                 if (!trig_mode || level) {
1121                         result = 1;
1122                         /* assumes that there are only KVM_APIC_INIT/SIPI */
1123                         apic->pending_events = (1UL << KVM_APIC_INIT);
1124                         kvm_make_request(KVM_REQ_EVENT, vcpu);
1125                         kvm_vcpu_kick(vcpu);
1126                 }
1127                 break;
1128
1129         case APIC_DM_STARTUP:
1130                 result = 1;
1131                 apic->sipi_vector = vector;
1132                 /* make sure sipi_vector is visible for the receiver */
1133                 smp_wmb();
1134                 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1135                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1136                 kvm_vcpu_kick(vcpu);
1137                 break;
1138
1139         case APIC_DM_EXTINT:
1140                 /*
1141                  * Should only be called by kvm_apic_local_deliver() with LVT0,
1142                  * before NMI watchdog was enabled. Already handled by
1143                  * kvm_apic_accept_pic_intr().
1144                  */
1145                 break;
1146
1147         default:
1148                 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1149                        delivery_mode);
1150                 break;
1151         }
1152         return result;
1153 }
1154
1155 /*
1156  * This routine identifies the destination vcpus mask meant to receive the
1157  * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1158  * out the destination vcpus array and set the bitmap or it traverses to
1159  * each available vcpu to identify the same.
1160  */
1161 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1162                               unsigned long *vcpu_bitmap)
1163 {
1164         struct kvm_lapic **dest_vcpu = NULL;
1165         struct kvm_lapic *src = NULL;
1166         struct kvm_apic_map *map;
1167         struct kvm_vcpu *vcpu;
1168         unsigned long bitmap;
1169         int i, vcpu_idx;
1170         bool ret;
1171
1172         rcu_read_lock();
1173         map = rcu_dereference(kvm->arch.apic_map);
1174
1175         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1176                                           &bitmap);
1177         if (ret) {
1178                 for_each_set_bit(i, &bitmap, 16) {
1179                         if (!dest_vcpu[i])
1180                                 continue;
1181                         vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1182                         __set_bit(vcpu_idx, vcpu_bitmap);
1183                 }
1184         } else {
1185                 kvm_for_each_vcpu(i, vcpu, kvm) {
1186                         if (!kvm_apic_present(vcpu))
1187                                 continue;
1188                         if (!kvm_apic_match_dest(vcpu, NULL,
1189                                                  irq->shorthand,
1190                                                  irq->dest_id,
1191                                                  irq->dest_mode))
1192                                 continue;
1193                         __set_bit(i, vcpu_bitmap);
1194                 }
1195         }
1196         rcu_read_unlock();
1197 }
1198
1199 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1200 {
1201         return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1202 }
1203
1204 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1205 {
1206         return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1207 }
1208
1209 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1210 {
1211         int trigger_mode;
1212
1213         /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1214         if (!kvm_ioapic_handles_vector(apic, vector))
1215                 return;
1216
1217         /* Request a KVM exit to inform the userspace IOAPIC. */
1218         if (irqchip_split(apic->vcpu->kvm)) {
1219                 apic->vcpu->arch.pending_ioapic_eoi = vector;
1220                 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1221                 return;
1222         }
1223
1224         if (apic_test_vector(vector, apic->regs + APIC_TMR))
1225                 trigger_mode = IOAPIC_LEVEL_TRIG;
1226         else
1227                 trigger_mode = IOAPIC_EDGE_TRIG;
1228
1229         kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1230 }
1231
1232 static int apic_set_eoi(struct kvm_lapic *apic)
1233 {
1234         int vector = apic_find_highest_isr(apic);
1235
1236         trace_kvm_eoi(apic, vector);
1237
1238         /*
1239          * Not every write EOI will has corresponding ISR,
1240          * one example is when Kernel check timer on setup_IO_APIC
1241          */
1242         if (vector == -1)
1243                 return vector;
1244
1245         apic_clear_isr(vector, apic);
1246         apic_update_ppr(apic);
1247
1248         if (to_hv_vcpu(apic->vcpu) &&
1249             test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1250                 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1251
1252         kvm_ioapic_send_eoi(apic, vector);
1253         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1254         return vector;
1255 }
1256
1257 /*
1258  * this interface assumes a trap-like exit, which has already finished
1259  * desired side effect including vISR and vPPR update.
1260  */
1261 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1262 {
1263         struct kvm_lapic *apic = vcpu->arch.apic;
1264
1265         trace_kvm_eoi(apic, vector);
1266
1267         kvm_ioapic_send_eoi(apic, vector);
1268         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1269 }
1270 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1271
1272 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1273 {
1274         struct kvm_lapic_irq irq;
1275
1276         irq.vector = icr_low & APIC_VECTOR_MASK;
1277         irq.delivery_mode = icr_low & APIC_MODE_MASK;
1278         irq.dest_mode = icr_low & APIC_DEST_MASK;
1279         irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1280         irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1281         irq.shorthand = icr_low & APIC_SHORT_MASK;
1282         irq.msi_redir_hint = false;
1283         if (apic_x2apic_mode(apic))
1284                 irq.dest_id = icr_high;
1285         else
1286                 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1287
1288         trace_kvm_apic_ipi(icr_low, irq.dest_id);
1289
1290         kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1291 }
1292
1293 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1294 {
1295         ktime_t remaining, now;
1296         s64 ns;
1297         u32 tmcct;
1298
1299         ASSERT(apic != NULL);
1300
1301         /* if initial count is 0, current count should also be 0 */
1302         if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1303                 apic->lapic_timer.period == 0)
1304                 return 0;
1305
1306         now = ktime_get();
1307         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1308         if (ktime_to_ns(remaining) < 0)
1309                 remaining = 0;
1310
1311         ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1312         tmcct = div64_u64(ns,
1313                          (APIC_BUS_CYCLE_NS * apic->divide_count));
1314
1315         return tmcct;
1316 }
1317
1318 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1319 {
1320         struct kvm_vcpu *vcpu = apic->vcpu;
1321         struct kvm_run *run = vcpu->run;
1322
1323         kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1324         run->tpr_access.rip = kvm_rip_read(vcpu);
1325         run->tpr_access.is_write = write;
1326 }
1327
1328 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1329 {
1330         if (apic->vcpu->arch.tpr_access_reporting)
1331                 __report_tpr_access(apic, write);
1332 }
1333
1334 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1335 {
1336         u32 val = 0;
1337
1338         if (offset >= LAPIC_MMIO_LENGTH)
1339                 return 0;
1340
1341         switch (offset) {
1342         case APIC_ARBPRI:
1343                 break;
1344
1345         case APIC_TMCCT:        /* Timer CCR */
1346                 if (apic_lvtt_tscdeadline(apic))
1347                         return 0;
1348
1349                 val = apic_get_tmcct(apic);
1350                 break;
1351         case APIC_PROCPRI:
1352                 apic_update_ppr(apic);
1353                 val = kvm_lapic_get_reg(apic, offset);
1354                 break;
1355         case APIC_TASKPRI:
1356                 report_tpr_access(apic, false);
1357                 fallthrough;
1358         default:
1359                 val = kvm_lapic_get_reg(apic, offset);
1360                 break;
1361         }
1362
1363         return val;
1364 }
1365
1366 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1367 {
1368         return container_of(dev, struct kvm_lapic, dev);
1369 }
1370
1371 #define APIC_REG_MASK(reg)      (1ull << ((reg) >> 4))
1372 #define APIC_REGS_MASK(first, count) \
1373         (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1374
1375 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1376                 void *data)
1377 {
1378         unsigned char alignment = offset & 0xf;
1379         u32 result;
1380         /* this bitmask has a bit cleared for each reserved register */
1381         u64 valid_reg_mask =
1382                 APIC_REG_MASK(APIC_ID) |
1383                 APIC_REG_MASK(APIC_LVR) |
1384                 APIC_REG_MASK(APIC_TASKPRI) |
1385                 APIC_REG_MASK(APIC_PROCPRI) |
1386                 APIC_REG_MASK(APIC_LDR) |
1387                 APIC_REG_MASK(APIC_DFR) |
1388                 APIC_REG_MASK(APIC_SPIV) |
1389                 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1390                 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1391                 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1392                 APIC_REG_MASK(APIC_ESR) |
1393                 APIC_REG_MASK(APIC_ICR) |
1394                 APIC_REG_MASK(APIC_ICR2) |
1395                 APIC_REG_MASK(APIC_LVTT) |
1396                 APIC_REG_MASK(APIC_LVTTHMR) |
1397                 APIC_REG_MASK(APIC_LVTPC) |
1398                 APIC_REG_MASK(APIC_LVT0) |
1399                 APIC_REG_MASK(APIC_LVT1) |
1400                 APIC_REG_MASK(APIC_LVTERR) |
1401                 APIC_REG_MASK(APIC_TMICT) |
1402                 APIC_REG_MASK(APIC_TMCCT) |
1403                 APIC_REG_MASK(APIC_TDCR);
1404
1405         /* ARBPRI is not valid on x2APIC */
1406         if (!apic_x2apic_mode(apic))
1407                 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
1408
1409         if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1410                 return 1;
1411
1412         result = __apic_read(apic, offset & ~0xf);
1413
1414         trace_kvm_apic_read(offset, result);
1415
1416         switch (len) {
1417         case 1:
1418         case 2:
1419         case 4:
1420                 memcpy(data, (char *)&result + alignment, len);
1421                 break;
1422         default:
1423                 printk(KERN_ERR "Local APIC read with len = %x, "
1424                        "should be 1,2, or 4 instead\n", len);
1425                 break;
1426         }
1427         return 0;
1428 }
1429 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1430
1431 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1432 {
1433         return addr >= apic->base_address &&
1434                 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1435 }
1436
1437 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1438                            gpa_t address, int len, void *data)
1439 {
1440         struct kvm_lapic *apic = to_lapic(this);
1441         u32 offset = address - apic->base_address;
1442
1443         if (!apic_mmio_in_range(apic, address))
1444                 return -EOPNOTSUPP;
1445
1446         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1447                 if (!kvm_check_has_quirk(vcpu->kvm,
1448                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1449                         return -EOPNOTSUPP;
1450
1451                 memset(data, 0xff, len);
1452                 return 0;
1453         }
1454
1455         kvm_lapic_reg_read(apic, offset, len, data);
1456
1457         return 0;
1458 }
1459
1460 static void update_divide_count(struct kvm_lapic *apic)
1461 {
1462         u32 tmp1, tmp2, tdcr;
1463
1464         tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1465         tmp1 = tdcr & 0xf;
1466         tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1467         apic->divide_count = 0x1 << (tmp2 & 0x7);
1468 }
1469
1470 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1471 {
1472         /*
1473          * Do not allow the guest to program periodic timers with small
1474          * interval, since the hrtimers are not throttled by the host
1475          * scheduler.
1476          */
1477         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1478                 s64 min_period = min_timer_period_us * 1000LL;
1479
1480                 if (apic->lapic_timer.period < min_period) {
1481                         pr_info_ratelimited(
1482                             "kvm: vcpu %i: requested %lld ns "
1483                             "lapic timer period limited to %lld ns\n",
1484                             apic->vcpu->vcpu_id,
1485                             apic->lapic_timer.period, min_period);
1486                         apic->lapic_timer.period = min_period;
1487                 }
1488         }
1489 }
1490
1491 static void cancel_hv_timer(struct kvm_lapic *apic);
1492
1493 static void apic_update_lvtt(struct kvm_lapic *apic)
1494 {
1495         u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1496                         apic->lapic_timer.timer_mode_mask;
1497
1498         if (apic->lapic_timer.timer_mode != timer_mode) {
1499                 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1500                                 APIC_LVT_TIMER_TSCDEADLINE)) {
1501                         hrtimer_cancel(&apic->lapic_timer.timer);
1502                         preempt_disable();
1503                         if (apic->lapic_timer.hv_timer_in_use)
1504                                 cancel_hv_timer(apic);
1505                         preempt_enable();
1506                         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1507                         apic->lapic_timer.period = 0;
1508                         apic->lapic_timer.tscdeadline = 0;
1509                 }
1510                 apic->lapic_timer.timer_mode = timer_mode;
1511                 limit_periodic_timer_frequency(apic);
1512         }
1513 }
1514
1515 /*
1516  * On APICv, this test will cause a busy wait
1517  * during a higher-priority task.
1518  */
1519
1520 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1521 {
1522         struct kvm_lapic *apic = vcpu->arch.apic;
1523         u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1524
1525         if (kvm_apic_hw_enabled(apic)) {
1526                 int vec = reg & APIC_VECTOR_MASK;
1527                 void *bitmap = apic->regs + APIC_ISR;
1528
1529                 if (vcpu->arch.apicv_active)
1530                         bitmap = apic->regs + APIC_IRR;
1531
1532                 if (apic_test_vector(vec, bitmap))
1533                         return true;
1534         }
1535         return false;
1536 }
1537
1538 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1539 {
1540         u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1541
1542         /*
1543          * If the guest TSC is running at a different ratio than the host, then
1544          * convert the delay to nanoseconds to achieve an accurate delay.  Note
1545          * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1546          * always for VMX enabled hardware.
1547          */
1548         if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
1549                 __delay(min(guest_cycles,
1550                         nsec_to_cycles(vcpu, timer_advance_ns)));
1551         } else {
1552                 u64 delay_ns = guest_cycles * 1000000ULL;
1553                 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1554                 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1555         }
1556 }
1557
1558 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1559                                               s64 advance_expire_delta)
1560 {
1561         struct kvm_lapic *apic = vcpu->arch.apic;
1562         u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1563         u64 ns;
1564
1565         /* Do not adjust for tiny fluctuations or large random spikes. */
1566         if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1567             abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1568                 return;
1569
1570         /* too early */
1571         if (advance_expire_delta < 0) {
1572                 ns = -advance_expire_delta * 1000000ULL;
1573                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1574                 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1575         } else {
1576         /* too late */
1577                 ns = advance_expire_delta * 1000000ULL;
1578                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1579                 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1580         }
1581
1582         if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1583                 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1584         apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1585 }
1586
1587 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1588 {
1589         struct kvm_lapic *apic = vcpu->arch.apic;
1590         u64 guest_tsc, tsc_deadline;
1591
1592         tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1593         apic->lapic_timer.expired_tscdeadline = 0;
1594         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1595         apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
1596
1597         if (guest_tsc < tsc_deadline)
1598                 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1599
1600         if (lapic_timer_advance_dynamic)
1601                 adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
1602 }
1603
1604 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1605 {
1606         if (lapic_in_kernel(vcpu) &&
1607             vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1608             vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1609             lapic_timer_int_injected(vcpu))
1610                 __kvm_wait_lapic_expire(vcpu);
1611 }
1612 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1613
1614 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1615 {
1616         struct kvm_timer *ktimer = &apic->lapic_timer;
1617
1618         kvm_apic_local_deliver(apic, APIC_LVTT);
1619         if (apic_lvtt_tscdeadline(apic)) {
1620                 ktimer->tscdeadline = 0;
1621         } else if (apic_lvtt_oneshot(apic)) {
1622                 ktimer->tscdeadline = 0;
1623                 ktimer->target_expiration = 0;
1624         }
1625 }
1626
1627 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1628 {
1629         struct kvm_vcpu *vcpu = apic->vcpu;
1630         struct kvm_timer *ktimer = &apic->lapic_timer;
1631
1632         if (atomic_read(&apic->lapic_timer.pending))
1633                 return;
1634
1635         if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1636                 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1637
1638         if (!from_timer_fn && vcpu->arch.apicv_active) {
1639                 WARN_ON(kvm_get_running_vcpu() != vcpu);
1640                 kvm_apic_inject_pending_timer_irqs(apic);
1641                 return;
1642         }
1643
1644         if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1645                 kvm_wait_lapic_expire(vcpu);
1646                 kvm_apic_inject_pending_timer_irqs(apic);
1647                 return;
1648         }
1649
1650         atomic_inc(&apic->lapic_timer.pending);
1651         kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1652         if (from_timer_fn)
1653                 kvm_vcpu_kick(vcpu);
1654 }
1655
1656 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1657 {
1658         struct kvm_timer *ktimer = &apic->lapic_timer;
1659         u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1660         u64 ns = 0;
1661         ktime_t expire;
1662         struct kvm_vcpu *vcpu = apic->vcpu;
1663         unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1664         unsigned long flags;
1665         ktime_t now;
1666
1667         if (unlikely(!tscdeadline || !this_tsc_khz))
1668                 return;
1669
1670         local_irq_save(flags);
1671
1672         now = ktime_get();
1673         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1674
1675         ns = (tscdeadline - guest_tsc) * 1000000ULL;
1676         do_div(ns, this_tsc_khz);
1677
1678         if (likely(tscdeadline > guest_tsc) &&
1679             likely(ns > apic->lapic_timer.timer_advance_ns)) {
1680                 expire = ktime_add_ns(now, ns);
1681                 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1682                 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1683         } else
1684                 apic_timer_expired(apic, false);
1685
1686         local_irq_restore(flags);
1687 }
1688
1689 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1690 {
1691         return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1692 }
1693
1694 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1695 {
1696         ktime_t now, remaining;
1697         u64 ns_remaining_old, ns_remaining_new;
1698
1699         apic->lapic_timer.period =
1700                         tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1701         limit_periodic_timer_frequency(apic);
1702
1703         now = ktime_get();
1704         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1705         if (ktime_to_ns(remaining) < 0)
1706                 remaining = 0;
1707
1708         ns_remaining_old = ktime_to_ns(remaining);
1709         ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1710                                            apic->divide_count, old_divisor);
1711
1712         apic->lapic_timer.tscdeadline +=
1713                 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1714                 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1715         apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1716 }
1717
1718 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1719 {
1720         ktime_t now;
1721         u64 tscl = rdtsc();
1722         s64 deadline;
1723
1724         now = ktime_get();
1725         apic->lapic_timer.period =
1726                         tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1727
1728         if (!apic->lapic_timer.period) {
1729                 apic->lapic_timer.tscdeadline = 0;
1730                 return false;
1731         }
1732
1733         limit_periodic_timer_frequency(apic);
1734         deadline = apic->lapic_timer.period;
1735
1736         if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1737                 if (unlikely(count_reg != APIC_TMICT)) {
1738                         deadline = tmict_to_ns(apic,
1739                                      kvm_lapic_get_reg(apic, count_reg));
1740                         if (unlikely(deadline <= 0))
1741                                 deadline = apic->lapic_timer.period;
1742                         else if (unlikely(deadline > apic->lapic_timer.period)) {
1743                                 pr_info_ratelimited(
1744                                     "kvm: vcpu %i: requested lapic timer restore with "
1745                                     "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1746                                     "Using initial count to start timer.\n",
1747                                     apic->vcpu->vcpu_id,
1748                                     count_reg,
1749                                     kvm_lapic_get_reg(apic, count_reg),
1750                                     deadline, apic->lapic_timer.period);
1751                                 kvm_lapic_set_reg(apic, count_reg, 0);
1752                                 deadline = apic->lapic_timer.period;
1753                         }
1754                 }
1755         }
1756
1757         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1758                 nsec_to_cycles(apic->vcpu, deadline);
1759         apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1760
1761         return true;
1762 }
1763
1764 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1765 {
1766         ktime_t now = ktime_get();
1767         u64 tscl = rdtsc();
1768         ktime_t delta;
1769
1770         /*
1771          * Synchronize both deadlines to the same time source or
1772          * differences in the periods (caused by differences in the
1773          * underlying clocks or numerical approximation errors) will
1774          * cause the two to drift apart over time as the errors
1775          * accumulate.
1776          */
1777         apic->lapic_timer.target_expiration =
1778                 ktime_add_ns(apic->lapic_timer.target_expiration,
1779                                 apic->lapic_timer.period);
1780         delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1781         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1782                 nsec_to_cycles(apic->vcpu, delta);
1783 }
1784
1785 static void start_sw_period(struct kvm_lapic *apic)
1786 {
1787         if (!apic->lapic_timer.period)
1788                 return;
1789
1790         if (ktime_after(ktime_get(),
1791                         apic->lapic_timer.target_expiration)) {
1792                 apic_timer_expired(apic, false);
1793
1794                 if (apic_lvtt_oneshot(apic))
1795                         return;
1796
1797                 advance_periodic_target_expiration(apic);
1798         }
1799
1800         hrtimer_start(&apic->lapic_timer.timer,
1801                 apic->lapic_timer.target_expiration,
1802                 HRTIMER_MODE_ABS_HARD);
1803 }
1804
1805 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1806 {
1807         if (!lapic_in_kernel(vcpu))
1808                 return false;
1809
1810         return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1811 }
1812 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1813
1814 static void cancel_hv_timer(struct kvm_lapic *apic)
1815 {
1816         WARN_ON(preemptible());
1817         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1818         static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1819         apic->lapic_timer.hv_timer_in_use = false;
1820 }
1821
1822 static bool start_hv_timer(struct kvm_lapic *apic)
1823 {
1824         struct kvm_timer *ktimer = &apic->lapic_timer;
1825         struct kvm_vcpu *vcpu = apic->vcpu;
1826         bool expired;
1827
1828         WARN_ON(preemptible());
1829         if (!kvm_can_use_hv_timer(vcpu))
1830                 return false;
1831
1832         if (!ktimer->tscdeadline)
1833                 return false;
1834
1835         if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1836                 return false;
1837
1838         ktimer->hv_timer_in_use = true;
1839         hrtimer_cancel(&ktimer->timer);
1840
1841         /*
1842          * To simplify handling the periodic timer, leave the hv timer running
1843          * even if the deadline timer has expired, i.e. rely on the resulting
1844          * VM-Exit to recompute the periodic timer's target expiration.
1845          */
1846         if (!apic_lvtt_period(apic)) {
1847                 /*
1848                  * Cancel the hv timer if the sw timer fired while the hv timer
1849                  * was being programmed, or if the hv timer itself expired.
1850                  */
1851                 if (atomic_read(&ktimer->pending)) {
1852                         cancel_hv_timer(apic);
1853                 } else if (expired) {
1854                         apic_timer_expired(apic, false);
1855                         cancel_hv_timer(apic);
1856                 }
1857         }
1858
1859         trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1860
1861         return true;
1862 }
1863
1864 static void start_sw_timer(struct kvm_lapic *apic)
1865 {
1866         struct kvm_timer *ktimer = &apic->lapic_timer;
1867
1868         WARN_ON(preemptible());
1869         if (apic->lapic_timer.hv_timer_in_use)
1870                 cancel_hv_timer(apic);
1871         if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1872                 return;
1873
1874         if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1875                 start_sw_period(apic);
1876         else if (apic_lvtt_tscdeadline(apic))
1877                 start_sw_tscdeadline(apic);
1878         trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1879 }
1880
1881 static void restart_apic_timer(struct kvm_lapic *apic)
1882 {
1883         preempt_disable();
1884
1885         if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1886                 goto out;
1887
1888         if (!start_hv_timer(apic))
1889                 start_sw_timer(apic);
1890 out:
1891         preempt_enable();
1892 }
1893
1894 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1895 {
1896         struct kvm_lapic *apic = vcpu->arch.apic;
1897
1898         preempt_disable();
1899         /* If the preempt notifier has already run, it also called apic_timer_expired */
1900         if (!apic->lapic_timer.hv_timer_in_use)
1901                 goto out;
1902         WARN_ON(rcuwait_active(&vcpu->wait));
1903         cancel_hv_timer(apic);
1904         apic_timer_expired(apic, false);
1905
1906         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1907                 advance_periodic_target_expiration(apic);
1908                 restart_apic_timer(apic);
1909         }
1910 out:
1911         preempt_enable();
1912 }
1913 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1914
1915 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1916 {
1917         restart_apic_timer(vcpu->arch.apic);
1918 }
1919 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1920
1921 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1922 {
1923         struct kvm_lapic *apic = vcpu->arch.apic;
1924
1925         preempt_disable();
1926         /* Possibly the TSC deadline timer is not enabled yet */
1927         if (apic->lapic_timer.hv_timer_in_use)
1928                 start_sw_timer(apic);
1929         preempt_enable();
1930 }
1931 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1932
1933 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1934 {
1935         struct kvm_lapic *apic = vcpu->arch.apic;
1936
1937         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1938         restart_apic_timer(apic);
1939 }
1940
1941 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
1942 {
1943         atomic_set(&apic->lapic_timer.pending, 0);
1944
1945         if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1946             && !set_target_expiration(apic, count_reg))
1947                 return;
1948
1949         restart_apic_timer(apic);
1950 }
1951
1952 static void start_apic_timer(struct kvm_lapic *apic)
1953 {
1954         __start_apic_timer(apic, APIC_TMICT);
1955 }
1956
1957 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1958 {
1959         bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1960
1961         if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1962                 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1963                 if (lvt0_in_nmi_mode) {
1964                         atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1965                 } else
1966                         atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1967         }
1968 }
1969
1970 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
1971 {
1972         int ret = 0;
1973
1974         trace_kvm_apic_write(reg, val);
1975
1976         switch (reg) {
1977         case APIC_ID:           /* Local APIC ID */
1978                 if (!apic_x2apic_mode(apic))
1979                         kvm_apic_set_xapic_id(apic, val >> 24);
1980                 else
1981                         ret = 1;
1982                 break;
1983
1984         case APIC_TASKPRI:
1985                 report_tpr_access(apic, true);
1986                 apic_set_tpr(apic, val & 0xff);
1987                 break;
1988
1989         case APIC_EOI:
1990                 apic_set_eoi(apic);
1991                 break;
1992
1993         case APIC_LDR:
1994                 if (!apic_x2apic_mode(apic))
1995                         kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
1996                 else
1997                         ret = 1;
1998                 break;
1999
2000         case APIC_DFR:
2001                 if (!apic_x2apic_mode(apic))
2002                         kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2003                 else
2004                         ret = 1;
2005                 break;
2006
2007         case APIC_SPIV: {
2008                 u32 mask = 0x3ff;
2009                 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2010                         mask |= APIC_SPIV_DIRECTED_EOI;
2011                 apic_set_spiv(apic, val & mask);
2012                 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2013                         int i;
2014                         u32 lvt_val;
2015
2016                         for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
2017                                 lvt_val = kvm_lapic_get_reg(apic,
2018                                                        APIC_LVTT + 0x10 * i);
2019                                 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
2020                                              lvt_val | APIC_LVT_MASKED);
2021                         }
2022                         apic_update_lvtt(apic);
2023                         atomic_set(&apic->lapic_timer.pending, 0);
2024
2025                 }
2026                 break;
2027         }
2028         case APIC_ICR:
2029                 /* No delay here, so we always clear the pending bit */
2030                 val &= ~(1 << 12);
2031                 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2032                 kvm_lapic_set_reg(apic, APIC_ICR, val);
2033                 break;
2034
2035         case APIC_ICR2:
2036                 if (!apic_x2apic_mode(apic))
2037                         val &= 0xff000000;
2038                 kvm_lapic_set_reg(apic, APIC_ICR2, val);
2039                 break;
2040
2041         case APIC_LVT0:
2042                 apic_manage_nmi_watchdog(apic, val);
2043                 fallthrough;
2044         case APIC_LVTTHMR:
2045         case APIC_LVTPC:
2046         case APIC_LVT1:
2047         case APIC_LVTERR: {
2048                 /* TODO: Check vector */
2049                 size_t size;
2050                 u32 index;
2051
2052                 if (!kvm_apic_sw_enabled(apic))
2053                         val |= APIC_LVT_MASKED;
2054                 size = ARRAY_SIZE(apic_lvt_mask);
2055                 index = array_index_nospec(
2056                                 (reg - APIC_LVTT) >> 4, size);
2057                 val &= apic_lvt_mask[index];
2058                 kvm_lapic_set_reg(apic, reg, val);
2059                 break;
2060         }
2061
2062         case APIC_LVTT:
2063                 if (!kvm_apic_sw_enabled(apic))
2064                         val |= APIC_LVT_MASKED;
2065                 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2066                 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2067                 apic_update_lvtt(apic);
2068                 break;
2069
2070         case APIC_TMICT:
2071                 if (apic_lvtt_tscdeadline(apic))
2072                         break;
2073
2074                 hrtimer_cancel(&apic->lapic_timer.timer);
2075                 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2076                 start_apic_timer(apic);
2077                 break;
2078
2079         case APIC_TDCR: {
2080                 uint32_t old_divisor = apic->divide_count;
2081
2082                 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2083                 update_divide_count(apic);
2084                 if (apic->divide_count != old_divisor &&
2085                                 apic->lapic_timer.period) {
2086                         hrtimer_cancel(&apic->lapic_timer.timer);
2087                         update_target_expiration(apic, old_divisor);
2088                         restart_apic_timer(apic);
2089                 }
2090                 break;
2091         }
2092         case APIC_ESR:
2093                 if (apic_x2apic_mode(apic) && val != 0)
2094                         ret = 1;
2095                 break;
2096
2097         case APIC_SELF_IPI:
2098                 if (apic_x2apic_mode(apic)) {
2099                         kvm_lapic_reg_write(apic, APIC_ICR,
2100                                             APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
2101                 } else
2102                         ret = 1;
2103                 break;
2104         default:
2105                 ret = 1;
2106                 break;
2107         }
2108
2109         kvm_recalculate_apic_map(apic->vcpu->kvm);
2110
2111         return ret;
2112 }
2113 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
2114
2115 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2116                             gpa_t address, int len, const void *data)
2117 {
2118         struct kvm_lapic *apic = to_lapic(this);
2119         unsigned int offset = address - apic->base_address;
2120         u32 val;
2121
2122         if (!apic_mmio_in_range(apic, address))
2123                 return -EOPNOTSUPP;
2124
2125         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2126                 if (!kvm_check_has_quirk(vcpu->kvm,
2127                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2128                         return -EOPNOTSUPP;
2129
2130                 return 0;
2131         }
2132
2133         /*
2134          * APIC register must be aligned on 128-bits boundary.
2135          * 32/64/128 bits registers must be accessed thru 32 bits.
2136          * Refer SDM 8.4.1
2137          */
2138         if (len != 4 || (offset & 0xf))
2139                 return 0;
2140
2141         val = *(u32*)data;
2142
2143         kvm_lapic_reg_write(apic, offset & 0xff0, val);
2144
2145         return 0;
2146 }
2147
2148 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2149 {
2150         kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2151 }
2152 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2153
2154 /* emulate APIC access in a trap manner */
2155 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2156 {
2157         u32 val = 0;
2158
2159         /* hw has done the conditional check and inst decode */
2160         offset &= 0xff0;
2161
2162         kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2163
2164         /* TODO: optimize to just emulate side effect w/o one more write */
2165         kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
2166 }
2167 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2168
2169 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2170 {
2171         struct kvm_lapic *apic = vcpu->arch.apic;
2172
2173         if (!vcpu->arch.apic)
2174                 return;
2175
2176         hrtimer_cancel(&apic->lapic_timer.timer);
2177
2178         if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2179                 static_branch_slow_dec_deferred(&apic_hw_disabled);
2180
2181         if (!apic->sw_enabled)
2182                 static_branch_slow_dec_deferred(&apic_sw_disabled);
2183
2184         if (apic->regs)
2185                 free_page((unsigned long)apic->regs);
2186
2187         kfree(apic);
2188 }
2189
2190 /*
2191  *----------------------------------------------------------------------
2192  * LAPIC interface
2193  *----------------------------------------------------------------------
2194  */
2195 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2196 {
2197         struct kvm_lapic *apic = vcpu->arch.apic;
2198
2199         if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2200                 return 0;
2201
2202         return apic->lapic_timer.tscdeadline;
2203 }
2204
2205 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2206 {
2207         struct kvm_lapic *apic = vcpu->arch.apic;
2208
2209         if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2210                 return;
2211
2212         hrtimer_cancel(&apic->lapic_timer.timer);
2213         apic->lapic_timer.tscdeadline = data;
2214         start_apic_timer(apic);
2215 }
2216
2217 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2218 {
2219         struct kvm_lapic *apic = vcpu->arch.apic;
2220
2221         apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2222                      | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2223 }
2224
2225 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2226 {
2227         u64 tpr;
2228
2229         tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2230
2231         return (tpr & 0xf0) >> 4;
2232 }
2233
2234 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2235 {
2236         u64 old_value = vcpu->arch.apic_base;
2237         struct kvm_lapic *apic = vcpu->arch.apic;
2238
2239         if (!apic)
2240                 value |= MSR_IA32_APICBASE_BSP;
2241
2242         vcpu->arch.apic_base = value;
2243
2244         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2245                 kvm_update_cpuid_runtime(vcpu);
2246
2247         if (!apic)
2248                 return;
2249
2250         /* update jump label if enable bit changes */
2251         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2252                 if (value & MSR_IA32_APICBASE_ENABLE) {
2253                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2254                         static_branch_slow_dec_deferred(&apic_hw_disabled);
2255                 } else {
2256                         static_branch_inc(&apic_hw_disabled.key);
2257                         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2258                 }
2259         }
2260
2261         if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2262                 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2263
2264         if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2265                 static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
2266
2267         apic->base_address = apic->vcpu->arch.apic_base &
2268                              MSR_IA32_APICBASE_BASE;
2269
2270         if ((value & MSR_IA32_APICBASE_ENABLE) &&
2271              apic->base_address != APIC_DEFAULT_PHYS_BASE)
2272                 pr_warn_once("APIC base relocation is unsupported by KVM");
2273 }
2274
2275 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2276 {
2277         struct kvm_lapic *apic = vcpu->arch.apic;
2278
2279         if (vcpu->arch.apicv_active) {
2280                 /* irr_pending is always true when apicv is activated. */
2281                 apic->irr_pending = true;
2282                 apic->isr_count = 1;
2283         } else {
2284                 apic->irr_pending = (apic_search_irr(apic) != -1);
2285                 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2286         }
2287 }
2288 EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
2289
2290 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2291 {
2292         struct kvm_lapic *apic = vcpu->arch.apic;
2293         int i;
2294
2295         if (!apic)
2296                 return;
2297
2298         /* Stop the timer in case it's a reset to an active apic */
2299         hrtimer_cancel(&apic->lapic_timer.timer);
2300
2301         if (!init_event) {
2302                 kvm_lapic_set_base(vcpu, APIC_DEFAULT_PHYS_BASE |
2303                                          MSR_IA32_APICBASE_ENABLE);
2304                 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2305         }
2306         kvm_apic_set_version(apic->vcpu);
2307
2308         for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2309                 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2310         apic_update_lvtt(apic);
2311         if (kvm_vcpu_is_reset_bsp(vcpu) &&
2312             kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2313                 kvm_lapic_set_reg(apic, APIC_LVT0,
2314                              SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2315         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2316
2317         kvm_apic_set_dfr(apic, 0xffffffffU);
2318         apic_set_spiv(apic, 0xff);
2319         kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2320         if (!apic_x2apic_mode(apic))
2321                 kvm_apic_set_ldr(apic, 0);
2322         kvm_lapic_set_reg(apic, APIC_ESR, 0);
2323         kvm_lapic_set_reg(apic, APIC_ICR, 0);
2324         kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2325         kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2326         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2327         for (i = 0; i < 8; i++) {
2328                 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2329                 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2330                 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2331         }
2332         kvm_apic_update_apicv(vcpu);
2333         apic->highest_isr_cache = -1;
2334         update_divide_count(apic);
2335         atomic_set(&apic->lapic_timer.pending, 0);
2336         if (kvm_vcpu_is_bsp(vcpu))
2337                 kvm_lapic_set_base(vcpu,
2338                                 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
2339         vcpu->arch.pv_eoi.msr_val = 0;
2340         apic_update_ppr(apic);
2341         if (vcpu->arch.apicv_active) {
2342                 static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2343                 static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2344                 static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
2345         }
2346
2347         vcpu->arch.apic_arb_prio = 0;
2348         vcpu->arch.apic_attention = 0;
2349
2350         kvm_recalculate_apic_map(vcpu->kvm);
2351 }
2352
2353 /*
2354  *----------------------------------------------------------------------
2355  * timer interface
2356  *----------------------------------------------------------------------
2357  */
2358
2359 static bool lapic_is_periodic(struct kvm_lapic *apic)
2360 {
2361         return apic_lvtt_period(apic);
2362 }
2363
2364 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2365 {
2366         struct kvm_lapic *apic = vcpu->arch.apic;
2367
2368         if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2369                 return atomic_read(&apic->lapic_timer.pending);
2370
2371         return 0;
2372 }
2373
2374 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2375 {
2376         u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2377         int vector, mode, trig_mode;
2378
2379         if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2380                 vector = reg & APIC_VECTOR_MASK;
2381                 mode = reg & APIC_MODE_MASK;
2382                 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2383                 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2384                                         NULL);
2385         }
2386         return 0;
2387 }
2388
2389 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2390 {
2391         struct kvm_lapic *apic = vcpu->arch.apic;
2392
2393         if (apic)
2394                 kvm_apic_local_deliver(apic, APIC_LVT0);
2395 }
2396
2397 static const struct kvm_io_device_ops apic_mmio_ops = {
2398         .read     = apic_mmio_read,
2399         .write    = apic_mmio_write,
2400 };
2401
2402 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2403 {
2404         struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2405         struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2406
2407         apic_timer_expired(apic, true);
2408
2409         if (lapic_is_periodic(apic)) {
2410                 advance_periodic_target_expiration(apic);
2411                 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2412                 return HRTIMER_RESTART;
2413         } else
2414                 return HRTIMER_NORESTART;
2415 }
2416
2417 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2418 {
2419         struct kvm_lapic *apic;
2420
2421         ASSERT(vcpu != NULL);
2422
2423         apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2424         if (!apic)
2425                 goto nomem;
2426
2427         vcpu->arch.apic = apic;
2428
2429         apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2430         if (!apic->regs) {
2431                 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2432                        vcpu->vcpu_id);
2433                 goto nomem_free_apic;
2434         }
2435         apic->vcpu = vcpu;
2436
2437         hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2438                      HRTIMER_MODE_ABS_HARD);
2439         apic->lapic_timer.timer.function = apic_timer_fn;
2440         if (timer_advance_ns == -1) {
2441                 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2442                 lapic_timer_advance_dynamic = true;
2443         } else {
2444                 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2445                 lapic_timer_advance_dynamic = false;
2446         }
2447
2448         /*
2449          * APIC is created enabled. This will prevent kvm_lapic_set_base from
2450          * thinking that APIC state has changed.
2451          */
2452         vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2453         static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2454         kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2455
2456         return 0;
2457 nomem_free_apic:
2458         kfree(apic);
2459         vcpu->arch.apic = NULL;
2460 nomem:
2461         return -ENOMEM;
2462 }
2463
2464 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2465 {
2466         struct kvm_lapic *apic = vcpu->arch.apic;
2467         u32 ppr;
2468
2469         if (!kvm_apic_present(vcpu))
2470                 return -1;
2471
2472         __apic_update_ppr(apic, &ppr);
2473         return apic_has_interrupt_for_ppr(apic, ppr);
2474 }
2475 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2476
2477 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2478 {
2479         u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2480
2481         if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2482                 return 1;
2483         if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2484             GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2485                 return 1;
2486         return 0;
2487 }
2488
2489 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2490 {
2491         struct kvm_lapic *apic = vcpu->arch.apic;
2492
2493         if (atomic_read(&apic->lapic_timer.pending) > 0) {
2494                 kvm_apic_inject_pending_timer_irqs(apic);
2495                 atomic_set(&apic->lapic_timer.pending, 0);
2496         }
2497 }
2498
2499 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2500 {
2501         int vector = kvm_apic_has_interrupt(vcpu);
2502         struct kvm_lapic *apic = vcpu->arch.apic;
2503         u32 ppr;
2504
2505         if (vector == -1)
2506                 return -1;
2507
2508         /*
2509          * We get here even with APIC virtualization enabled, if doing
2510          * nested virtualization and L1 runs with the "acknowledge interrupt
2511          * on exit" mode.  Then we cannot inject the interrupt via RVI,
2512          * because the process would deliver it through the IDT.
2513          */
2514
2515         apic_clear_irr(vector, apic);
2516         if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2517                 /*
2518                  * For auto-EOI interrupts, there might be another pending
2519                  * interrupt above PPR, so check whether to raise another
2520                  * KVM_REQ_EVENT.
2521                  */
2522                 apic_update_ppr(apic);
2523         } else {
2524                 /*
2525                  * For normal interrupts, PPR has been raised and there cannot
2526                  * be a higher-priority pending interrupt---except if there was
2527                  * a concurrent interrupt injection, but that would have
2528                  * triggered KVM_REQ_EVENT already.
2529                  */
2530                 apic_set_isr(vector, apic);
2531                 __apic_update_ppr(apic, &ppr);
2532         }
2533
2534         return vector;
2535 }
2536
2537 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2538                 struct kvm_lapic_state *s, bool set)
2539 {
2540         if (apic_x2apic_mode(vcpu->arch.apic)) {
2541                 u32 *id = (u32 *)(s->regs + APIC_ID);
2542                 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2543
2544                 if (vcpu->kvm->arch.x2apic_format) {
2545                         if (*id != vcpu->vcpu_id)
2546                                 return -EINVAL;
2547                 } else {
2548                         if (set)
2549                                 *id >>= 24;
2550                         else
2551                                 *id <<= 24;
2552                 }
2553
2554                 /* In x2APIC mode, the LDR is fixed and based on the id */
2555                 if (set)
2556                         *ldr = kvm_apic_calc_x2apic_ldr(*id);
2557         }
2558
2559         return 0;
2560 }
2561
2562 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2563 {
2564         memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2565
2566         /*
2567          * Get calculated timer current count for remaining timer period (if
2568          * any) and store it in the returned register set.
2569          */
2570         __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2571                             __apic_read(vcpu->arch.apic, APIC_TMCCT));
2572
2573         return kvm_apic_state_fixup(vcpu, s, false);
2574 }
2575
2576 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2577 {
2578         struct kvm_lapic *apic = vcpu->arch.apic;
2579         int r;
2580
2581         kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2582         /* set SPIV separately to get count of SW disabled APICs right */
2583         apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2584
2585         r = kvm_apic_state_fixup(vcpu, s, true);
2586         if (r) {
2587                 kvm_recalculate_apic_map(vcpu->kvm);
2588                 return r;
2589         }
2590         memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2591
2592         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2593         kvm_recalculate_apic_map(vcpu->kvm);
2594         kvm_apic_set_version(vcpu);
2595
2596         apic_update_ppr(apic);
2597         hrtimer_cancel(&apic->lapic_timer.timer);
2598         apic_update_lvtt(apic);
2599         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2600         update_divide_count(apic);
2601         __start_apic_timer(apic, APIC_TMCCT);
2602         kvm_apic_update_apicv(vcpu);
2603         apic->highest_isr_cache = -1;
2604         if (vcpu->arch.apicv_active) {
2605                 static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2606                 static_call(kvm_x86_hwapic_irr_update)(vcpu,
2607                                 apic_find_highest_irr(apic));
2608                 static_call(kvm_x86_hwapic_isr_update)(vcpu,
2609                                 apic_find_highest_isr(apic));
2610         }
2611         kvm_make_request(KVM_REQ_EVENT, vcpu);
2612         if (ioapic_in_kernel(vcpu->kvm))
2613                 kvm_rtc_eoi_tracking_restore_one(vcpu);
2614
2615         vcpu->arch.apic_arb_prio = 0;
2616
2617         return 0;
2618 }
2619
2620 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2621 {
2622         struct hrtimer *timer;
2623
2624         if (!lapic_in_kernel(vcpu) ||
2625                 kvm_can_post_timer_interrupt(vcpu))
2626                 return;
2627
2628         timer = &vcpu->arch.apic->lapic_timer.timer;
2629         if (hrtimer_cancel(timer))
2630                 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2631 }
2632
2633 /*
2634  * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2635  *
2636  * Detect whether guest triggered PV EOI since the
2637  * last entry. If yes, set EOI on guests's behalf.
2638  * Clear PV EOI in guest memory in any case.
2639  */
2640 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2641                                         struct kvm_lapic *apic)
2642 {
2643         bool pending;
2644         int vector;
2645         /*
2646          * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2647          * and KVM_PV_EOI_ENABLED in guest memory as follows:
2648          *
2649          * KVM_APIC_PV_EOI_PENDING is unset:
2650          *      -> host disabled PV EOI.
2651          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2652          *      -> host enabled PV EOI, guest did not execute EOI yet.
2653          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2654          *      -> host enabled PV EOI, guest executed EOI.
2655          */
2656         BUG_ON(!pv_eoi_enabled(vcpu));
2657         pending = pv_eoi_get_pending(vcpu);
2658         /*
2659          * Clear pending bit in any case: it will be set again on vmentry.
2660          * While this might not be ideal from performance point of view,
2661          * this makes sure pv eoi is only enabled when we know it's safe.
2662          */
2663         pv_eoi_clr_pending(vcpu);
2664         if (pending)
2665                 return;
2666         vector = apic_set_eoi(apic);
2667         trace_kvm_pv_eoi(apic, vector);
2668 }
2669
2670 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2671 {
2672         u32 data;
2673
2674         if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2675                 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2676
2677         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2678                 return;
2679
2680         if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2681                                   sizeof(u32)))
2682                 return;
2683
2684         apic_set_tpr(vcpu->arch.apic, data & 0xff);
2685 }
2686
2687 /*
2688  * apic_sync_pv_eoi_to_guest - called before vmentry
2689  *
2690  * Detect whether it's safe to enable PV EOI and
2691  * if yes do so.
2692  */
2693 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2694                                         struct kvm_lapic *apic)
2695 {
2696         if (!pv_eoi_enabled(vcpu) ||
2697             /* IRR set or many bits in ISR: could be nested. */
2698             apic->irr_pending ||
2699             /* Cache not set: could be safe but we don't bother. */
2700             apic->highest_isr_cache == -1 ||
2701             /* Need EOI to update ioapic. */
2702             kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2703                 /*
2704                  * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2705                  * so we need not do anything here.
2706                  */
2707                 return;
2708         }
2709
2710         pv_eoi_set_pending(apic->vcpu);
2711 }
2712
2713 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2714 {
2715         u32 data, tpr;
2716         int max_irr, max_isr;
2717         struct kvm_lapic *apic = vcpu->arch.apic;
2718
2719         apic_sync_pv_eoi_to_guest(vcpu, apic);
2720
2721         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2722                 return;
2723
2724         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2725         max_irr = apic_find_highest_irr(apic);
2726         if (max_irr < 0)
2727                 max_irr = 0;
2728         max_isr = apic_find_highest_isr(apic);
2729         if (max_isr < 0)
2730                 max_isr = 0;
2731         data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2732
2733         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2734                                 sizeof(u32));
2735 }
2736
2737 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2738 {
2739         if (vapic_addr) {
2740                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2741                                         &vcpu->arch.apic->vapic_cache,
2742                                         vapic_addr, sizeof(u32)))
2743                         return -EINVAL;
2744                 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2745         } else {
2746                 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2747         }
2748
2749         vcpu->arch.apic->vapic_addr = vapic_addr;
2750         return 0;
2751 }
2752
2753 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2754 {
2755         struct kvm_lapic *apic = vcpu->arch.apic;
2756         u32 reg = (msr - APIC_BASE_MSR) << 4;
2757
2758         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2759                 return 1;
2760
2761         if (reg == APIC_ICR2)
2762                 return 1;
2763
2764         /* if this is ICR write vector before command */
2765         if (reg == APIC_ICR)
2766                 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2767         return kvm_lapic_reg_write(apic, reg, (u32)data);
2768 }
2769
2770 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2771 {
2772         struct kvm_lapic *apic = vcpu->arch.apic;
2773         u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2774
2775         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2776                 return 1;
2777
2778         if (reg == APIC_DFR || reg == APIC_ICR2)
2779                 return 1;
2780
2781         if (kvm_lapic_reg_read(apic, reg, 4, &low))
2782                 return 1;
2783         if (reg == APIC_ICR)
2784                 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2785
2786         *data = (((u64)high) << 32) | low;
2787
2788         return 0;
2789 }
2790
2791 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2792 {
2793         struct kvm_lapic *apic = vcpu->arch.apic;
2794
2795         if (!lapic_in_kernel(vcpu))
2796                 return 1;
2797
2798         /* if this is ICR write vector before command */
2799         if (reg == APIC_ICR)
2800                 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2801         return kvm_lapic_reg_write(apic, reg, (u32)data);
2802 }
2803
2804 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2805 {
2806         struct kvm_lapic *apic = vcpu->arch.apic;
2807         u32 low, high = 0;
2808
2809         if (!lapic_in_kernel(vcpu))
2810                 return 1;
2811
2812         if (kvm_lapic_reg_read(apic, reg, 4, &low))
2813                 return 1;
2814         if (reg == APIC_ICR)
2815                 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2816
2817         *data = (((u64)high) << 32) | low;
2818
2819         return 0;
2820 }
2821
2822 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2823 {
2824         u64 addr = data & ~KVM_MSR_ENABLED;
2825         struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2826         unsigned long new_len;
2827
2828         if (!IS_ALIGNED(addr, 4))
2829                 return 1;
2830
2831         vcpu->arch.pv_eoi.msr_val = data;
2832         if (!pv_eoi_enabled(vcpu))
2833                 return 0;
2834
2835         if (addr == ghc->gpa && len <= ghc->len)
2836                 new_len = ghc->len;
2837         else
2838                 new_len = len;
2839
2840         return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2841 }
2842
2843 void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2844 {
2845         struct kvm_lapic *apic = vcpu->arch.apic;
2846         u8 sipi_vector;
2847         int r;
2848         unsigned long pe;
2849
2850         if (!lapic_in_kernel(vcpu))
2851                 return;
2852
2853         /*
2854          * Read pending events before calling the check_events
2855          * callback.
2856          */
2857         pe = smp_load_acquire(&apic->pending_events);
2858         if (!pe)
2859                 return;
2860
2861         if (is_guest_mode(vcpu)) {
2862                 r = kvm_x86_ops.nested_ops->check_events(vcpu);
2863                 if (r < 0)
2864                         return;
2865                 /*
2866                  * If an event has happened and caused a vmexit,
2867                  * we know INITs are latched and therefore
2868                  * we will not incorrectly deliver an APIC
2869                  * event instead of a vmexit.
2870                  */
2871         }
2872
2873         /*
2874          * INITs are latched while CPU is in specific states
2875          * (SMM, VMX root mode, SVM with GIF=0).
2876          * Because a CPU cannot be in these states immediately
2877          * after it has processed an INIT signal (and thus in
2878          * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2879          * and leave the INIT pending.
2880          */
2881         if (kvm_vcpu_latch_init(vcpu)) {
2882                 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2883                 if (test_bit(KVM_APIC_SIPI, &pe))
2884                         clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2885                 return;
2886         }
2887
2888         if (test_bit(KVM_APIC_INIT, &pe)) {
2889                 clear_bit(KVM_APIC_INIT, &apic->pending_events);
2890                 kvm_vcpu_reset(vcpu, true);
2891                 if (kvm_vcpu_is_bsp(apic->vcpu))
2892                         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2893                 else
2894                         vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2895         }
2896         if (test_bit(KVM_APIC_SIPI, &pe)) {
2897                 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2898                 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2899                         /* evaluate pending_events before reading the vector */
2900                         smp_rmb();
2901                         sipi_vector = apic->sipi_vector;
2902                         kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2903                         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2904                 }
2905         }
2906 }
2907
2908 void kvm_lapic_exit(void)
2909 {
2910         static_key_deferred_flush(&apic_hw_disabled);
2911         static_key_deferred_flush(&apic_sw_disabled);
2912 }
This page took 0.190098 seconds and 4 git commands to generate.