1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <asm/hwcap.h>
14 DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
16 static void aia_set_hvictl(bool ext_irq_pending)
21 * HVICTL.IID == 9 and HVICTL.IPRIO == 0 represents
22 * no interrupt in HVICTL.
25 hvictl = (IRQ_S_EXT << HVICTL_IID_SHIFT) & HVICTL_IID;
26 hvictl |= ext_irq_pending;
27 csr_write(CSR_HVICTL, hvictl);
31 void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
33 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
34 unsigned long mask, val;
36 if (!kvm_riscv_aia_available())
39 if (READ_ONCE(vcpu->arch.irqs_pending_mask[1])) {
40 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[1], 0);
41 val = READ_ONCE(vcpu->arch.irqs_pending[1]) & mask;
48 void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
50 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
52 if (kvm_riscv_aia_available())
53 csr->vsieh = csr_read(CSR_VSIEH);
57 bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
61 if (!kvm_riscv_aia_available())
65 if (READ_ONCE(vcpu->arch.irqs_pending[1]) &
66 (vcpu->arch.aia_context.guest_csr.vsieh & upper_32_bits(mask)))
70 seip = vcpu->arch.guest_csr.vsie;
71 seip &= (unsigned long)mask;
72 seip &= BIT(IRQ_S_EXT);
74 if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
80 void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
82 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
84 if (!kvm_riscv_aia_available())
88 csr_write(CSR_HVIPH, vcpu->arch.aia_context.guest_csr.hviph);
90 aia_set_hvictl(!!(csr->hvip & BIT(IRQ_VS_EXT)));
93 void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
95 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
97 if (!kvm_riscv_aia_available())
100 csr_write(CSR_VSISELECT, csr->vsiselect);
101 csr_write(CSR_HVIPRIO1, csr->hviprio1);
102 csr_write(CSR_HVIPRIO2, csr->hviprio2);
104 csr_write(CSR_VSIEH, csr->vsieh);
105 csr_write(CSR_HVIPH, csr->hviph);
106 csr_write(CSR_HVIPRIO1H, csr->hviprio1h);
107 csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
111 void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
113 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
115 if (!kvm_riscv_aia_available())
118 csr->vsiselect = csr_read(CSR_VSISELECT);
119 csr->hviprio1 = csr_read(CSR_HVIPRIO1);
120 csr->hviprio2 = csr_read(CSR_HVIPRIO2);
122 csr->vsieh = csr_read(CSR_VSIEH);
123 csr->hviph = csr_read(CSR_HVIPH);
124 csr->hviprio1h = csr_read(CSR_HVIPRIO1H);
125 csr->hviprio2h = csr_read(CSR_HVIPRIO2H);
129 int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
130 unsigned long reg_num,
131 unsigned long *out_val)
133 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
135 if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
139 if (kvm_riscv_aia_available())
140 *out_val = ((unsigned long *)csr)[reg_num];
145 int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
146 unsigned long reg_num,
149 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
151 if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
154 if (kvm_riscv_aia_available()) {
155 ((unsigned long *)csr)[reg_num] = val;
158 if (reg_num == KVM_REG_RISCV_CSR_AIA_REG(siph))
159 WRITE_ONCE(vcpu->arch.irqs_pending_mask[1], 0);
166 int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
167 unsigned int csr_num,
169 unsigned long new_val,
170 unsigned long wr_mask)
172 /* If AIA not available then redirect trap */
173 if (!kvm_riscv_aia_available())
174 return KVM_INSN_ILLEGAL_TRAP;
176 /* If AIA not initialized then forward to user space */
177 if (!kvm_riscv_aia_initialized(vcpu->kvm))
178 return KVM_INSN_EXIT_TO_USER_SPACE;
180 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, KVM_RISCV_AIA_IMSIC_TOPEI,
181 val, new_val, wr_mask);
185 * External IRQ priority always read-only zero. This means default
186 * priority order is always preferred for external IRQs unless
187 * HVICTL.IID == 9 and HVICTL.IPRIO != 0
189 static int aia_irq2bitpos[] = {
190 0, 8, -1, -1, 16, 24, -1, -1, /* 0 - 7 */
191 32, -1, -1, -1, -1, 40, 48, 56, /* 8 - 15 */
192 64, 72, 80, 88, 96, 104, 112, 120, /* 16 - 23 */
193 -1, -1, -1, -1, -1, -1, -1, -1, /* 24 - 31 */
194 -1, -1, -1, -1, -1, -1, -1, -1, /* 32 - 39 */
195 -1, -1, -1, -1, -1, -1, -1, -1, /* 40 - 47 */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* 48 - 55 */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* 56 - 63 */
200 static u8 aia_get_iprio8(struct kvm_vcpu *vcpu, unsigned int irq)
202 unsigned long hviprio;
203 int bitpos = aia_irq2bitpos[irq];
208 switch (bitpos / BITS_PER_LONG) {
210 hviprio = csr_read(CSR_HVIPRIO1);
214 hviprio = csr_read(CSR_HVIPRIO2);
217 hviprio = csr_read(CSR_HVIPRIO1H);
220 hviprio = csr_read(CSR_HVIPRIO2);
223 hviprio = csr_read(CSR_HVIPRIO2H);
230 return (hviprio >> (bitpos % BITS_PER_LONG)) & TOPI_IPRIO_MASK;
233 static void aia_set_iprio8(struct kvm_vcpu *vcpu, unsigned int irq, u8 prio)
235 unsigned long hviprio;
236 int bitpos = aia_irq2bitpos[irq];
241 switch (bitpos / BITS_PER_LONG) {
243 hviprio = csr_read(CSR_HVIPRIO1);
247 hviprio = csr_read(CSR_HVIPRIO2);
250 hviprio = csr_read(CSR_HVIPRIO1H);
253 hviprio = csr_read(CSR_HVIPRIO2);
256 hviprio = csr_read(CSR_HVIPRIO2H);
263 hviprio &= ~(TOPI_IPRIO_MASK << (bitpos % BITS_PER_LONG));
264 hviprio |= (unsigned long)prio << (bitpos % BITS_PER_LONG);
266 switch (bitpos / BITS_PER_LONG) {
268 csr_write(CSR_HVIPRIO1, hviprio);
272 csr_write(CSR_HVIPRIO2, hviprio);
275 csr_write(CSR_HVIPRIO1H, hviprio);
278 csr_write(CSR_HVIPRIO2, hviprio);
281 csr_write(CSR_HVIPRIO2H, hviprio);
289 static int aia_rmw_iprio(struct kvm_vcpu *vcpu, unsigned int isel,
290 unsigned long *val, unsigned long new_val,
291 unsigned long wr_mask)
293 int i, first_irq, nirqs;
294 unsigned long old_val;
299 return KVM_INSN_ILLEGAL_TRAP;
302 nirqs = 4 * (BITS_PER_LONG / 32);
303 first_irq = (isel - ISELECT_IPRIO0) * 4;
306 for (i = 0; i < nirqs; i++) {
307 prio = aia_get_iprio8(vcpu, first_irq + i);
308 old_val |= (unsigned long)prio << (TOPI_IPRIO_BITS * i);
315 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
316 for (i = 0; i < nirqs; i++) {
317 prio = (new_val >> (TOPI_IPRIO_BITS * i)) &
319 aia_set_iprio8(vcpu, first_irq + i, prio);
323 return KVM_INSN_CONTINUE_NEXT_SEPC;
326 #define IMSIC_FIRST 0x70
327 #define IMSIC_LAST 0xff
328 int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
329 unsigned long *val, unsigned long new_val,
330 unsigned long wr_mask)
334 /* If AIA not available then redirect trap */
335 if (!kvm_riscv_aia_available())
336 return KVM_INSN_ILLEGAL_TRAP;
338 /* First try to emulate in kernel space */
339 isel = csr_read(CSR_VSISELECT) & ISELECT_MASK;
340 if (isel >= ISELECT_IPRIO0 && isel <= ISELECT_IPRIO15)
341 return aia_rmw_iprio(vcpu, isel, val, new_val, wr_mask);
342 else if (isel >= IMSIC_FIRST && isel <= IMSIC_LAST &&
343 kvm_riscv_aia_initialized(vcpu->kvm))
344 return kvm_riscv_vcpu_aia_imsic_rmw(vcpu, isel, val, new_val,
347 /* We can't handle it here so redirect to user space */
348 return KVM_INSN_EXIT_TO_USER_SPACE;
351 void kvm_riscv_aia_enable(void)
353 if (!kvm_riscv_aia_available())
356 aia_set_hvictl(false);
357 csr_write(CSR_HVIPRIO1, 0x0);
358 csr_write(CSR_HVIPRIO2, 0x0);
360 csr_write(CSR_HVIPH, 0x0);
361 csr_write(CSR_HIDELEGH, 0x0);
362 csr_write(CSR_HVIPRIO1H, 0x0);
363 csr_write(CSR_HVIPRIO2H, 0x0);
367 void kvm_riscv_aia_disable(void)
369 if (!kvm_riscv_aia_available())
372 aia_set_hvictl(false);
375 int kvm_riscv_aia_init(void)
377 if (!riscv_isa_extension_available(NULL, SxAIA))
380 /* Enable KVM AIA support */
381 static_branch_enable(&kvm_riscv_aia_available);
386 void kvm_riscv_aia_exit(void)