]> Git Repo - J-linux.git/blob - arch/riscv/kvm/aia.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / riscv / kvm / aia.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *      Anup Patel <[email protected]>
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/bitops.h>
12 #include <linux/irq.h>
13 #include <linux/irqchip/riscv-imsic.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kvm_host.h>
16 #include <linux/percpu.h>
17 #include <linux/spinlock.h>
18 #include <asm/cpufeature.h>
19 #include <asm/kvm_nacl.h>
20
21 struct aia_hgei_control {
22         raw_spinlock_t lock;
23         unsigned long free_bitmap;
24         struct kvm_vcpu *owners[BITS_PER_LONG];
25 };
26 static DEFINE_PER_CPU(struct aia_hgei_control, aia_hgei);
27 static int hgei_parent_irq;
28
29 unsigned int kvm_riscv_aia_nr_hgei;
30 unsigned int kvm_riscv_aia_max_ids;
31 DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
32
33 static int aia_find_hgei(struct kvm_vcpu *owner)
34 {
35         int i, hgei;
36         unsigned long flags;
37         struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
38
39         raw_spin_lock_irqsave(&hgctrl->lock, flags);
40
41         hgei = -1;
42         for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
43                 if (hgctrl->owners[i] == owner) {
44                         hgei = i;
45                         break;
46                 }
47         }
48
49         raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
50
51         put_cpu_ptr(&aia_hgei);
52         return hgei;
53 }
54
55 static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
56 {
57         unsigned long hvictl;
58
59         /*
60          * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
61          * no interrupt in HVICTL.
62          */
63
64         hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
65         hvictl |= ext_irq_pending;
66         return hvictl;
67 }
68
69 #ifdef CONFIG_32BIT
70 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
71 {
72         struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
73         unsigned long mask, val;
74
75         if (!kvm_riscv_aia_available())
76                 return;
77
78         if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
79                 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
80                 val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
81
82                 csr->hviph &= ~mask;
83                 csr->hviph |= val;
84         }
85 }
86
87 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
88 {
89         struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
90
91         if (kvm_riscv_aia_available())
92                 csr->vsieh = ncsr_read(CSR_VSIEH);
93 }
94 #endif
95
96 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
97 {
98         int hgei;
99         unsigned long seip;
100
101         if (!kvm_riscv_aia_available())
102                 return false;
103
104 #ifdef CONFIG_32BIT
105         if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
106             (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
107                 return true;
108 #endif
109
110         seip = vcpu->arch.guest_csr.vsie;
111         seip &= (unsigned long)mask;
112         seip &= BIT(IRQ_S_EXT);
113
114         if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
115                 return false;
116
117         hgei = aia_find_hgei(vcpu);
118         if (hgei > 0)
119                 return !!(ncsr_read(CSR_HGEIP) & BIT(hgei));
120
121         return false;
122 }
123
124 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
125 {
126         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
127
128         if (!kvm_riscv_aia_available())
129                 return;
130
131 #ifdef CONFIG_32BIT
132         ncsr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
133 #endif
134         ncsr_write(CSR_HVICTL, aia_hvictl_value(!!(csr->hvip & BIT(IRQ_VS_EXT))));
135 }
136
137 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
138 {
139         struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
140         void *nsh;
141
142         if (!kvm_riscv_aia_available())
143                 return;
144
145         if (kvm_riscv_nacl_sync_csr_available()) {
146                 nsh = nacl_shmem();
147                 nacl_csr_write(nsh, CSR_VSISELECT, csr->vsiselect);
148                 nacl_csr_write(nsh, CSR_HVIPRIO1, csr->hviprio1);
149                 nacl_csr_write(nsh, CSR_HVIPRIO2, csr->hviprio2);
150 #ifdef CONFIG_32BIT
151                 nacl_csr_write(nsh, CSR_VSIEH, csr->vsieh);
152                 nacl_csr_write(nsh, CSR_HVIPH, csr->hviph);
153                 nacl_csr_write(nsh, CSR_HVIPRIO1H, csr->hviprio1h);
154                 nacl_csr_write(nsh, CSR_HVIPRIO2H, csr->hviprio2h);
155 #endif
156         } else {
157                 csr_write(CSR_VSISELECT, csr->vsiselect);
158                 csr_write(CSR_HVIPRIO1, csr->hviprio1);
159                 csr_write(CSR_HVIPRIO2, csr->hviprio2);
160 #ifdef CONFIG_32BIT
161                 csr_write(CSR_VSIEH, csr->vsieh);
162                 csr_write(CSR_HVIPH, csr->hviph);
163                 csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
164                 csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
165 #endif
166         }
167 }
168
169 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
170 {
171         struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
172         void *nsh;
173
174         if (!kvm_riscv_aia_available())
175                 return;
176
177         if (kvm_riscv_nacl_available()) {
178                 nsh = nacl_shmem();
179                 csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
180                 csr->hviprio1 = nacl_csr_read(nsh, CSR_HVIPRIO1);
181                 csr->hviprio2 = nacl_csr_read(nsh, CSR_HVIPRIO2);
182 #ifdef CONFIG_32BIT
183                 csr->vsieh = nacl_csr_read(nsh, CSR_VSIEH);
184                 csr->hviph = nacl_csr_read(nsh, CSR_HVIPH);
185                 csr->hviprio1h = nacl_csr_read(nsh, CSR_HVIPRIO1H);
186                 csr->hviprio2h = nacl_csr_read(nsh, CSR_HVIPRIO2H);
187 #endif
188         } else {
189                 csr->vsiselect = csr_read(CSR_VSISELECT);
190                 csr->hviprio1 = csr_read(CSR_HVIPRIO1);
191                 csr->hviprio2 = csr_read(CSR_HVIPRIO2);
192 #ifdef CONFIG_32BIT
193                 csr->vsieh = csr_read(CSR_VSIEH);
194                 csr->hviph = csr_read(CSR_HVIPH);
195                 csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
196                 csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
197 #endif
198         }
199 }
200
201 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
202                                unsigned long reg_num,
203                                unsigned long *out_val)
204 {
205         struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
206
207         if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
208                 return -ENOENT;
209
210         *out_val = 0;
211         if (kvm_riscv_aia_available())
212                 *out_val = ((unsigned long *)csr)[reg_num];
213
214         return 0;
215 }
216
217 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
218                                unsigned long reg_num,
219                                unsigned long val)
220 {
221         struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
222
223         if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
224                 return -ENOENT;
225
226         if (kvm_riscv_aia_available()) {
227                 ((unsigned long *)csr)[reg_num] = val;
228
229 #ifdef CONFIG_32BIT
230                 if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
231                         WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
232 #endif
233         }
234
235         return 0;
236 }
237
238 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
239                                  unsigned int csr_num,
240                                  unsigned long *val,
241                                  unsigned long new_val,
242                                  unsigned long wr_mask)
243 {
244         /* If AIA not available then redirect trap */
245         if (!kvm_riscv_aia_available())
246                 return KVM_INSN_ILLEGAL_TRAP;
247
248         /* If AIA not initialized then forward to user space */
249         if (!kvm_riscv_aia_initialized(vcpu->kvm))
250                 return KVM_INSN_EXIT_TO_USER_SPACE;
251
252         return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
253                                             val, new_val, wr_mask);
254 }
255
256 /*
257  * External IRQ priority always read-only zero. This means default
258  * priority order  is always preferred for external IRQs unless
259  * HVICTL.IID == 9 and HVICTL.IPRIO != 0
260  */
261 static int aia_irq2bitpos[] = {
262 0,     8,   -1,   -1,   16,   24,   -1,   -1, /* 0 - 7 */
263 32,   -1,   -1,   -1,   -1,   40,   48,   56, /* 8 - 15 */
264 64,   72,   80,   88,   96,  104,  112,  120, /* 16 - 23 */
265 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 24 - 31 */
266 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 32 - 39 */
267 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 40 - 47 */
268 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 48 - 55 */
269 -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, /* 56 - 63 */
270 };
271
272 static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
273 {
274         unsigned long hviprio;
275         int bitpos = aia_irq2bitpos[irq];
276
277         if (bitpos < 0)
278                 return 0;
279
280         switch (bitpos / BITS_PER_LONG) {
281         case 0:
282                 hviprio = ncsr_read(CSR_HVIPRIO1);
283                 break;
284         case 1:
285 #ifndef CONFIG_32BIT
286                 hviprio = ncsr_read(CSR_HVIPRIO2);
287                 break;
288 #else
289                 hviprio = ncsr_read(CSR_HVIPRIO1H);
290                 break;
291         case 2:
292                 hviprio = ncsr_read(CSR_HVIPRIO2);
293                 break;
294         case 3:
295                 hviprio = ncsr_read(CSR_HVIPRIO2H);
296                 break;
297 #endif
298         default:
299                 return 0;
300         }
301
302         return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
303 }
304
305 static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
306 {
307         unsigned long hviprio;
308         int bitpos = aia_irq2bitpos[irq];
309
310         if (bitpos < 0)
311                 return;
312
313         switch (bitpos / BITS_PER_LONG) {
314         case 0:
315                 hviprio = ncsr_read(CSR_HVIPRIO1);
316                 break;
317         case 1:
318 #ifndef CONFIG_32BIT
319                 hviprio = ncsr_read(CSR_HVIPRIO2);
320                 break;
321 #else
322                 hviprio = ncsr_read(CSR_HVIPRIO1H);
323                 break;
324         case 2:
325                 hviprio = ncsr_read(CSR_HVIPRIO2);
326                 break;
327         case 3:
328                 hviprio = ncsr_read(CSR_HVIPRIO2H);
329                 break;
330 #endif
331         default:
332                 return;
333         }
334
335         hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
336         hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
337
338         switch (bitpos / BITS_PER_LONG) {
339         case 0:
340                 ncsr_write(CSR_HVIPRIO1, hviprio);
341                 break;
342         case 1:
343 #ifndef CONFIG_32BIT
344                 ncsr_write(CSR_HVIPRIO2, hviprio);
345                 break;
346 #else
347                 ncsr_write(CSR_HVIPRIO1H, hviprio);
348                 break;
349         case 2:
350                 ncsr_write(CSR_HVIPRIO2, hviprio);
351                 break;
352         case 3:
353                 ncsr_write(CSR_HVIPRIO2H, hviprio);
354                 break;
355 #endif
356         default:
357                 return;
358         }
359 }
360
361 static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
362                          unsigned long *val, unsigned long new_val,
363                          unsigned long wr_mask)
364 {
365         int i, first_irq, nirqs;
366         unsigned long old_val;
367         u8 prio;
368
369 #ifndef CONFIG_32BIT
370         if (isel & 0x1)
371                 return KVM_INSN_ILLEGAL_TRAP;
372 #endif
373
374         nirqs = 4 * (BITS_PER_LONG / 32);
375         first_irq = (isel - ISELECT_IPRIO0) * 4;
376
377         old_val = 0;
378         for (i = 0; i < nirqs; i++) {
379                 prio = aia_get_iprio8(vcpu, first_irq + i);
380                 old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
381         }
382
383         if (val)
384                 *val = old_val;
385
386         if (wr_mask) {
387                 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
388                 for (i = 0; i < nirqs; i++) {
389                         prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
390                                 TOPI_IPRIO_MASK;
391                         aia_set_iprio8(vcpu, first_irq + i, prio);
392                 }
393         }
394
395         return KVM_INSN_CONTINUE_NEXT_SEPC;
396 }
397
398 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
399                                 unsigned long *val, unsigned long new_val,
400                                 unsigned long wr_mask)
401 {
402         unsigned int isel;
403
404         /* If AIA not available then redirect trap */
405         if (!kvm_riscv_aia_available())
406                 return KVM_INSN_ILLEGAL_TRAP;
407
408         /* First try to emulate in kernel space */
409         isel = ncsr_read(CSR_VSISELECT) & ISELECT_MASK;
410         if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
411                 return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
412         else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
413                  kvm_riscv_aia_initialized(vcpu->kvm))
414                 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
415                                                     wr_mask);
416
417         /* We can't handle it here so redirect to user space */
418         return KVM_INSN_EXIT_TO_USER_SPACE;
419 }
420
421 int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
422                              void __iomem **hgei_va, phys_addr_t *hgei_pa)
423 {
424         int ret = -ENOENT;
425         unsigned long flags;
426         const struct imsic_global_config *gc;
427         const struct imsic_local_config *lc;
428         struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
429
430         if (!kvm_riscv_aia_available() || !hgctrl)
431                 return -ENODEV;
432
433         raw_spin_lock_irqsave(&hgctrl->lock, flags);
434
435         if (hgctrl->free_bitmap) {
436                 ret = __ffs(hgctrl->free_bitmap);
437                 hgctrl->free_bitmap &= ~BIT(ret);
438                 hgctrl->owners[ret] = owner;
439         }
440
441         raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
442
443         gc = imsic_get_global_config();
444         lc = (gc) ? per_cpu_ptr(gc->local, cpu) : NULL;
445         if (lc && ret > 0) {
446                 if (hgei_va)
447                         *hgei_va = lc->msi_va + (ret * IMSIC_MMIO_PAGE_SZ);
448                 if (hgei_pa)
449                         *hgei_pa = lc->msi_pa + (ret * IMSIC_MMIO_PAGE_SZ);
450         }
451
452         return ret;
453 }
454
455 void kvm_riscv_aia_free_hgei(int cpu, int hgei)
456 {
457         unsigned long flags;
458         struct aia_hgei_control *hgctrl = per_cpu_ptr(&aia_hgei, cpu);
459
460         if (!kvm_riscv_aia_available() || !hgctrl)
461                 return;
462
463         raw_spin_lock_irqsave(&hgctrl->lock, flags);
464
465         if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei) {
466                 if (!(hgctrl->free_bitmap & BIT(hgei))) {
467                         hgctrl->free_bitmap |= BIT(hgei);
468                         hgctrl->owners[hgei] = NULL;
469                 }
470         }
471
472         raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
473 }
474
475 void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
476 {
477         int hgei;
478
479         if (!kvm_riscv_aia_available())
480                 return;
481
482         hgei = aia_find_hgei(owner);
483         if (hgei > 0) {
484                 if (enable)
485                         csr_set(CSR_HGEIE, BIT(hgei));
486                 else
487                         csr_clear(CSR_HGEIE, BIT(hgei));
488         }
489 }
490
491 static irqreturn_t hgei_interrupt(int irq, void *dev_id)
492 {
493         int i;
494         unsigned long hgei_mask, flags;
495         struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
496
497         hgei_mask = csr_read(CSR_HGEIP) & csr_read(CSR_HGEIE);
498         csr_clear(CSR_HGEIE, hgei_mask);
499
500         raw_spin_lock_irqsave(&hgctrl->lock, flags);
501
502         for_each_set_bit(i, &hgei_mask, BITS_PER_LONG) {
503                 if (hgctrl->owners[i])
504                         kvm_vcpu_kick(hgctrl->owners[i]);
505         }
506
507         raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
508
509         put_cpu_ptr(&aia_hgei);
510         return IRQ_HANDLED;
511 }
512
513 static int aia_hgei_init(void)
514 {
515         int cpu, rc;
516         struct irq_domain *domain;
517         struct aia_hgei_control *hgctrl;
518
519         /* Initialize per-CPU guest external interrupt line management */
520         for_each_possible_cpu(cpu) {
521                 hgctrl = per_cpu_ptr(&aia_hgei, cpu);
522                 raw_spin_lock_init(&hgctrl->lock);
523                 if (kvm_riscv_aia_nr_hgei) {
524                         hgctrl->free_bitmap =
525                                 BIT(kvm_riscv_aia_nr_hgei + 1) - 1;
526                         hgctrl->free_bitmap &= ~BIT(0);
527                 } else
528                         hgctrl->free_bitmap = 0;
529         }
530
531         /* Skip SGEI interrupt setup for zero guest external interrupts */
532         if (!kvm_riscv_aia_nr_hgei)
533                 goto skip_sgei_interrupt;
534
535         /* Find INTC irq domain */
536         domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
537                                           DOMAIN_BUS_ANY);
538         if (!domain) {
539                 kvm_err("unable to find INTC domain\n");
540                 return -ENOENT;
541         }
542
543         /* Map per-CPU SGEI interrupt from INTC domain */
544         hgei_parent_irq = irq_create_mapping(domain, IRQ_S_GEXT);
545         if (!hgei_parent_irq) {
546                 kvm_err("unable to map SGEI IRQ\n");
547                 return -ENOMEM;
548         }
549
550         /* Request per-CPU SGEI interrupt */
551         rc = request_percpu_irq(hgei_parent_irq, hgei_interrupt,
552                                 "riscv-kvm", &aia_hgei);
553         if (rc) {
554                 kvm_err("failed to request SGEI IRQ\n");
555                 return rc;
556         }
557
558 skip_sgei_interrupt:
559         return 0;
560 }
561
562 static void aia_hgei_exit(void)
563 {
564         /* Do nothing for zero guest external interrupts */
565         if (!kvm_riscv_aia_nr_hgei)
566                 return;
567
568         /* Free per-CPU SGEI interrupt */
569         free_percpu_irq(hgei_parent_irq, &aia_hgei);
570 }
571
572 void kvm_riscv_aia_enable(void)
573 {
574         if (!kvm_riscv_aia_available())
575                 return;
576
577         csr_write(CSR_HVICTL, aia_hvictl_value(false));
578         csr_write(CSR_HVIPRIO1, 0x0);
579         csr_write(CSR_HVIPRIO2, 0x0);
580 #ifdef CONFIG_32BIT
581         csr_write(CSR_HVIPH, 0x0);
582         csr_write(CSR_HIDELEGH, 0x0);
583         csr_write(CSR_HVIPRIO1H, 0x0);
584         csr_write(CSR_HVIPRIO2H, 0x0);
585 #endif
586
587         /* Enable per-CPU SGEI interrupt */
588         enable_percpu_irq(hgei_parent_irq,
589                           irq_get_trigger_type(hgei_parent_irq));
590         csr_set(CSR_HIE, BIT(IRQ_S_GEXT));
591         /* Enable IRQ filtering for overflow interrupt only if sscofpmf is present */
592         if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
593                 csr_set(CSR_HVIEN, BIT(IRQ_PMU_OVF));
594 }
595
596 void kvm_riscv_aia_disable(void)
597 {
598         int i;
599         unsigned long flags;
600         struct kvm_vcpu *vcpu;
601         struct aia_hgei_control *hgctrl;
602
603         if (!kvm_riscv_aia_available())
604                 return;
605         hgctrl = get_cpu_ptr(&aia_hgei);
606
607         if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_SSCOFPMF))
608                 csr_clear(CSR_HVIEN, BIT(IRQ_PMU_OVF));
609         /* Disable per-CPU SGEI interrupt */
610         csr_clear(CSR_HIE, BIT(IRQ_S_GEXT));
611         disable_percpu_irq(hgei_parent_irq);
612
613         csr_write(CSR_HVICTL, aia_hvictl_value(false));
614
615         raw_spin_lock_irqsave(&hgctrl->lock, flags);
616
617         for (i = 0; i <= kvm_riscv_aia_nr_hgei; i++) {
618                 vcpu = hgctrl->owners[i];
619                 if (!vcpu)
620                         continue;
621
622                 /*
623                  * We release hgctrl->lock before notifying IMSIC
624                  * so that we don't have lock ordering issues.
625                  */
626                 raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
627
628                 /* Notify IMSIC */
629                 kvm_riscv_vcpu_aia_imsic_release(vcpu);
630
631                 /*
632                  * Wakeup VCPU if it was blocked so that it can
633                  * run on other HARTs
634                  */
635                 if (csr_read(CSR_HGEIE) & BIT(i)) {
636                         csr_clear(CSR_HGEIE, BIT(i));
637                         kvm_vcpu_kick(vcpu);
638                 }
639
640                 raw_spin_lock_irqsave(&hgctrl->lock, flags);
641         }
642
643         raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
644
645         put_cpu_ptr(&aia_hgei);
646 }
647
648 int kvm_riscv_aia_init(void)
649 {
650         int rc;
651         const struct imsic_global_config *gc;
652
653         if (!riscv_isa_extension_available(NULL, SxAIA))
654                 return -ENODEV;
655         gc = imsic_get_global_config();
656
657         /* Figure-out number of bits in HGEIE */
658         csr_write(CSR_HGEIE, -1UL);
659         kvm_riscv_aia_nr_hgei = fls_long(csr_read(CSR_HGEIE));
660         csr_write(CSR_HGEIE, 0);
661         if (kvm_riscv_aia_nr_hgei)
662                 kvm_riscv_aia_nr_hgei--;
663
664         /*
665          * Number of usable HGEI lines should be minimum of per-HART
666          * IMSIC guest files and number of bits in HGEIE
667          */
668         if (gc)
669                 kvm_riscv_aia_nr_hgei = min((ulong)kvm_riscv_aia_nr_hgei,
670                                             BIT(gc->guest_index_bits) - 1);
671         else
672                 kvm_riscv_aia_nr_hgei = 0;
673
674         /* Find number of guest MSI IDs */
675         kvm_riscv_aia_max_ids = IMSIC_MAX_ID;
676         if (gc && kvm_riscv_aia_nr_hgei)
677                 kvm_riscv_aia_max_ids = gc->nr_guest_ids + 1;
678
679         /* Initialize guest external interrupt line management */
680         rc = aia_hgei_init();
681         if (rc)
682                 return rc;
683
684         /* Register device operations */
685         rc = kvm_register_device_ops(&kvm_riscv_aia_device_ops,
686                                      KVM_DEV_TYPE_RISCV_AIA);
687         if (rc) {
688                 aia_hgei_exit();
689                 return rc;
690         }
691
692         /* Enable KVM AIA support */
693         static_branch_enable(&kvm_riscv_aia_available);
694
695         return 0;
696 }
697
698 void kvm_riscv_aia_exit(void)
699 {
700         if (!kvm_riscv_aia_available())
701                 return;
702
703         /* Unregister device operations */
704         kvm_unregister_device_ops(KVM_DEV_TYPE_RISCV_AIA);
705
706         /* Cleanup the HGEI state */
707         aia_hgei_exit();
708 }
This page took 0.066606 seconds and 4 git commands to generate.