]> Git Repo - linux.git/blob - arch/riscv/kvm/vcpu.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / arch / riscv / kvm / vcpu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <[email protected]>
7  */
8
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
19 #include <linux/fs.h>
20 #include <linux/kvm_host.h>
21 #include <asm/csr.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
24 #include <asm/sbi.h>
25
26 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
27         KVM_GENERIC_VCPU_STATS(),
28         STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
29         STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
30         STATS_DESC_COUNTER(VCPU, mmio_exit_user),
31         STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
32         STATS_DESC_COUNTER(VCPU, csr_exit_user),
33         STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
34         STATS_DESC_COUNTER(VCPU, signal_exits),
35         STATS_DESC_COUNTER(VCPU, exits)
36 };
37
38 const struct kvm_stats_header kvm_vcpu_stats_header = {
39         .name_size = KVM_STATS_NAME_SIZE,
40         .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
41         .id_offset = sizeof(struct kvm_stats_header),
42         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
43         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
44                        sizeof(kvm_vcpu_stats_desc),
45 };
46
47 #define KVM_RISCV_BASE_ISA_MASK         GENMASK(25, 0)
48
49 #define KVM_ISA_EXT_ARR(ext)            [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
50
51 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
52 static const unsigned long kvm_isa_ext_arr[] = {
53         [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
54         [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
55         [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
56         [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
57         [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
58         [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
59         [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
60
61         KVM_ISA_EXT_ARR(SSAIA),
62         KVM_ISA_EXT_ARR(SSTC),
63         KVM_ISA_EXT_ARR(SVINVAL),
64         KVM_ISA_EXT_ARR(SVPBMT),
65         KVM_ISA_EXT_ARR(ZBB),
66         KVM_ISA_EXT_ARR(ZIHINTPAUSE),
67         KVM_ISA_EXT_ARR(ZICBOM),
68         KVM_ISA_EXT_ARR(ZICBOZ),
69 };
70
71 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
72 {
73         unsigned long i;
74
75         for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
76                 if (kvm_isa_ext_arr[i] == base_ext)
77                         return i;
78         }
79
80         return KVM_RISCV_ISA_EXT_MAX;
81 }
82
83 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
84 {
85         switch (ext) {
86         case KVM_RISCV_ISA_EXT_H:
87                 return false;
88         default:
89                 break;
90         }
91
92         return true;
93 }
94
95 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
96 {
97         switch (ext) {
98         case KVM_RISCV_ISA_EXT_A:
99         case KVM_RISCV_ISA_EXT_C:
100         case KVM_RISCV_ISA_EXT_I:
101         case KVM_RISCV_ISA_EXT_M:
102         case KVM_RISCV_ISA_EXT_SSAIA:
103         case KVM_RISCV_ISA_EXT_SSTC:
104         case KVM_RISCV_ISA_EXT_SVINVAL:
105         case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
106         case KVM_RISCV_ISA_EXT_ZBB:
107                 return false;
108         default:
109                 break;
110         }
111
112         return true;
113 }
114
115 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
116 {
117         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
118         struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
119         struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
120         struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
121         bool loaded;
122
123         /**
124          * The preemption should be disabled here because it races with
125          * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
126          * also calls vcpu_load/put.
127          */
128         get_cpu();
129         loaded = (vcpu->cpu != -1);
130         if (loaded)
131                 kvm_arch_vcpu_put(vcpu);
132
133         vcpu->arch.last_exit_cpu = -1;
134
135         memcpy(csr, reset_csr, sizeof(*csr));
136
137         memcpy(cntx, reset_cntx, sizeof(*cntx));
138
139         kvm_riscv_vcpu_fp_reset(vcpu);
140
141         kvm_riscv_vcpu_timer_reset(vcpu);
142
143         kvm_riscv_vcpu_aia_reset(vcpu);
144
145         bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
146         bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
147
148         kvm_riscv_vcpu_pmu_reset(vcpu);
149
150         vcpu->arch.hfence_head = 0;
151         vcpu->arch.hfence_tail = 0;
152         memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
153
154         /* Reset the guest CSRs for hotplug usecase */
155         if (loaded)
156                 kvm_arch_vcpu_load(vcpu, smp_processor_id());
157         put_cpu();
158 }
159
160 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
161 {
162         return 0;
163 }
164
165 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
166 {
167         int rc;
168         struct kvm_cpu_context *cntx;
169         struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
170         unsigned long host_isa, i;
171
172         /* Mark this VCPU never ran */
173         vcpu->arch.ran_atleast_once = false;
174         vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
175         bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
176
177         /* Setup ISA features available to VCPU */
178         for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
179                 host_isa = kvm_isa_ext_arr[i];
180                 if (__riscv_isa_extension_available(NULL, host_isa) &&
181                     kvm_riscv_vcpu_isa_enable_allowed(i))
182                         set_bit(host_isa, vcpu->arch.isa);
183         }
184
185         /* Setup vendor, arch, and implementation details */
186         vcpu->arch.mvendorid = sbi_get_mvendorid();
187         vcpu->arch.marchid = sbi_get_marchid();
188         vcpu->arch.mimpid = sbi_get_mimpid();
189
190         /* Setup VCPU hfence queue */
191         spin_lock_init(&vcpu->arch.hfence_lock);
192
193         /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
194         cntx = &vcpu->arch.guest_reset_context;
195         cntx->sstatus = SR_SPP | SR_SPIE;
196         cntx->hstatus = 0;
197         cntx->hstatus |= HSTATUS_VTW;
198         cntx->hstatus |= HSTATUS_SPVP;
199         cntx->hstatus |= HSTATUS_SPV;
200
201         /* By default, make CY, TM, and IR counters accessible in VU mode */
202         reset_csr->scounteren = 0x7;
203
204         /* Setup VCPU timer */
205         kvm_riscv_vcpu_timer_init(vcpu);
206
207         /* setup performance monitoring */
208         kvm_riscv_vcpu_pmu_init(vcpu);
209
210         /* Setup VCPU AIA */
211         rc = kvm_riscv_vcpu_aia_init(vcpu);
212         if (rc)
213                 return rc;
214
215         /* Reset VCPU */
216         kvm_riscv_reset_vcpu(vcpu);
217
218         return 0;
219 }
220
221 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
222 {
223         /**
224          * vcpu with id 0 is the designated boot cpu.
225          * Keep all vcpus with non-zero id in power-off state so that
226          * they can be brought up using SBI HSM extension.
227          */
228         if (vcpu->vcpu_idx != 0)
229                 kvm_riscv_vcpu_power_off(vcpu);
230 }
231
232 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233 {
234         /* Cleanup VCPU AIA context */
235         kvm_riscv_vcpu_aia_deinit(vcpu);
236
237         /* Cleanup VCPU timer */
238         kvm_riscv_vcpu_timer_deinit(vcpu);
239
240         kvm_riscv_vcpu_pmu_deinit(vcpu);
241
242         /* Free unused pages pre-allocated for G-stage page table mappings */
243         kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
244 }
245
246 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
247 {
248         return kvm_riscv_vcpu_timer_pending(vcpu);
249 }
250
251 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
252 {
253 }
254
255 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
256 {
257 }
258
259 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
260 {
261         return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
262                 !vcpu->arch.power_off && !vcpu->arch.pause);
263 }
264
265 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
266 {
267         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
268 }
269
270 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
271 {
272         return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
273 }
274
275 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
276 {
277         return VM_FAULT_SIGBUS;
278 }
279
280 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
281                                          const struct kvm_one_reg *reg)
282 {
283         unsigned long __user *uaddr =
284                         (unsigned long __user *)(unsigned long)reg->addr;
285         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
286                                             KVM_REG_SIZE_MASK |
287                                             KVM_REG_RISCV_CONFIG);
288         unsigned long reg_val;
289
290         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
291                 return -EINVAL;
292
293         switch (reg_num) {
294         case KVM_REG_RISCV_CONFIG_REG(isa):
295                 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
296                 break;
297         case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
298                 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
299                         return -EINVAL;
300                 reg_val = riscv_cbom_block_size;
301                 break;
302         case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
303                 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
304                         return -EINVAL;
305                 reg_val = riscv_cboz_block_size;
306                 break;
307         case KVM_REG_RISCV_CONFIG_REG(mvendorid):
308                 reg_val = vcpu->arch.mvendorid;
309                 break;
310         case KVM_REG_RISCV_CONFIG_REG(marchid):
311                 reg_val = vcpu->arch.marchid;
312                 break;
313         case KVM_REG_RISCV_CONFIG_REG(mimpid):
314                 reg_val = vcpu->arch.mimpid;
315                 break;
316         default:
317                 return -EINVAL;
318         }
319
320         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
321                 return -EFAULT;
322
323         return 0;
324 }
325
326 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
327                                          const struct kvm_one_reg *reg)
328 {
329         unsigned long __user *uaddr =
330                         (unsigned long __user *)(unsigned long)reg->addr;
331         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
332                                             KVM_REG_SIZE_MASK |
333                                             KVM_REG_RISCV_CONFIG);
334         unsigned long i, isa_ext, reg_val;
335
336         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
337                 return -EINVAL;
338
339         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
340                 return -EFAULT;
341
342         switch (reg_num) {
343         case KVM_REG_RISCV_CONFIG_REG(isa):
344                 /*
345                  * This ONE REG interface is only defined for
346                  * single letter extensions.
347                  */
348                 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
349                         return -EINVAL;
350
351                 if (!vcpu->arch.ran_atleast_once) {
352                         /* Ignore the enable/disable request for certain extensions */
353                         for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
354                                 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
355                                 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
356                                         reg_val &= ~BIT(i);
357                                         continue;
358                                 }
359                                 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
360                                         if (reg_val & BIT(i))
361                                                 reg_val &= ~BIT(i);
362                                 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
363                                         if (!(reg_val & BIT(i)))
364                                                 reg_val |= BIT(i);
365                         }
366                         reg_val &= riscv_isa_extension_base(NULL);
367                         /* Do not modify anything beyond single letter extensions */
368                         reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
369                                   (reg_val & KVM_RISCV_BASE_ISA_MASK);
370                         vcpu->arch.isa[0] = reg_val;
371                         kvm_riscv_vcpu_fp_reset(vcpu);
372                 } else {
373                         return -EOPNOTSUPP;
374                 }
375                 break;
376         case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
377                 return -EOPNOTSUPP;
378         case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
379                 return -EOPNOTSUPP;
380         case KVM_REG_RISCV_CONFIG_REG(mvendorid):
381                 if (!vcpu->arch.ran_atleast_once)
382                         vcpu->arch.mvendorid = reg_val;
383                 else
384                         return -EBUSY;
385                 break;
386         case KVM_REG_RISCV_CONFIG_REG(marchid):
387                 if (!vcpu->arch.ran_atleast_once)
388                         vcpu->arch.marchid = reg_val;
389                 else
390                         return -EBUSY;
391                 break;
392         case KVM_REG_RISCV_CONFIG_REG(mimpid):
393                 if (!vcpu->arch.ran_atleast_once)
394                         vcpu->arch.mimpid = reg_val;
395                 else
396                         return -EBUSY;
397                 break;
398         default:
399                 return -EINVAL;
400         }
401
402         return 0;
403 }
404
405 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
406                                        const struct kvm_one_reg *reg)
407 {
408         struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
409         unsigned long __user *uaddr =
410                         (unsigned long __user *)(unsigned long)reg->addr;
411         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
412                                             KVM_REG_SIZE_MASK |
413                                             KVM_REG_RISCV_CORE);
414         unsigned long reg_val;
415
416         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
417                 return -EINVAL;
418         if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
419                 return -EINVAL;
420
421         if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
422                 reg_val = cntx->sepc;
423         else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
424                  reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
425                 reg_val = ((unsigned long *)cntx)[reg_num];
426         else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
427                 reg_val = (cntx->sstatus & SR_SPP) ?
428                                 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
429         else
430                 return -EINVAL;
431
432         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
433                 return -EFAULT;
434
435         return 0;
436 }
437
438 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
439                                        const struct kvm_one_reg *reg)
440 {
441         struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
442         unsigned long __user *uaddr =
443                         (unsigned long __user *)(unsigned long)reg->addr;
444         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
445                                             KVM_REG_SIZE_MASK |
446                                             KVM_REG_RISCV_CORE);
447         unsigned long reg_val;
448
449         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
450                 return -EINVAL;
451         if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
452                 return -EINVAL;
453
454         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
455                 return -EFAULT;
456
457         if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
458                 cntx->sepc = reg_val;
459         else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
460                  reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
461                 ((unsigned long *)cntx)[reg_num] = reg_val;
462         else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
463                 if (reg_val == KVM_RISCV_MODE_S)
464                         cntx->sstatus |= SR_SPP;
465                 else
466                         cntx->sstatus &= ~SR_SPP;
467         } else
468                 return -EINVAL;
469
470         return 0;
471 }
472
473 static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
474                                           unsigned long reg_num,
475                                           unsigned long *out_val)
476 {
477         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
478
479         if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
480                 return -EINVAL;
481
482         if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
483                 kvm_riscv_vcpu_flush_interrupts(vcpu);
484                 *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
485                 *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
486         } else
487                 *out_val = ((unsigned long *)csr)[reg_num];
488
489         return 0;
490 }
491
492 static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
493                                                  unsigned long reg_num,
494                                                  unsigned long reg_val)
495 {
496         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
497
498         if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
499                 return -EINVAL;
500
501         if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
502                 reg_val &= VSIP_VALID_MASK;
503                 reg_val <<= VSIP_TO_HVIP_SHIFT;
504         }
505
506         ((unsigned long *)csr)[reg_num] = reg_val;
507
508         if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
509                 WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
510
511         return 0;
512 }
513
514 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
515                                       const struct kvm_one_reg *reg)
516 {
517         int rc;
518         unsigned long __user *uaddr =
519                         (unsigned long __user *)(unsigned long)reg->addr;
520         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
521                                             KVM_REG_SIZE_MASK |
522                                             KVM_REG_RISCV_CSR);
523         unsigned long reg_val, reg_subtype;
524
525         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
526                 return -EINVAL;
527
528         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
529         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
530         switch (reg_subtype) {
531         case KVM_REG_RISCV_CSR_GENERAL:
532                 rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
533                 break;
534         case KVM_REG_RISCV_CSR_AIA:
535                 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
536                 break;
537         default:
538                 rc = -EINVAL;
539                 break;
540         }
541         if (rc)
542                 return rc;
543
544         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
545                 return -EFAULT;
546
547         return 0;
548 }
549
550 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
551                                       const struct kvm_one_reg *reg)
552 {
553         int rc;
554         unsigned long __user *uaddr =
555                         (unsigned long __user *)(unsigned long)reg->addr;
556         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
557                                             KVM_REG_SIZE_MASK |
558                                             KVM_REG_RISCV_CSR);
559         unsigned long reg_val, reg_subtype;
560
561         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
562                 return -EINVAL;
563
564         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
565                 return -EFAULT;
566
567         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
568         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
569         switch (reg_subtype) {
570         case KVM_REG_RISCV_CSR_GENERAL:
571                 rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
572                 break;
573         case KVM_REG_RISCV_CSR_AIA:
574                 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
575                 break;
576         default:
577                 rc = -EINVAL;
578                 break;
579         }
580         if (rc)
581                 return rc;
582
583         return 0;
584 }
585
586 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
587                                           const struct kvm_one_reg *reg)
588 {
589         unsigned long __user *uaddr =
590                         (unsigned long __user *)(unsigned long)reg->addr;
591         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
592                                             KVM_REG_SIZE_MASK |
593                                             KVM_REG_RISCV_ISA_EXT);
594         unsigned long reg_val = 0;
595         unsigned long host_isa_ext;
596
597         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
598                 return -EINVAL;
599
600         if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
601             reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
602                 return -EINVAL;
603
604         host_isa_ext = kvm_isa_ext_arr[reg_num];
605         if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
606                 reg_val = 1; /* Mark the given extension as available */
607
608         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
609                 return -EFAULT;
610
611         return 0;
612 }
613
614 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
615                                           const struct kvm_one_reg *reg)
616 {
617         unsigned long __user *uaddr =
618                         (unsigned long __user *)(unsigned long)reg->addr;
619         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
620                                             KVM_REG_SIZE_MASK |
621                                             KVM_REG_RISCV_ISA_EXT);
622         unsigned long reg_val;
623         unsigned long host_isa_ext;
624
625         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
626                 return -EINVAL;
627
628         if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
629             reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
630                 return -EINVAL;
631
632         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
633                 return -EFAULT;
634
635         host_isa_ext = kvm_isa_ext_arr[reg_num];
636         if (!__riscv_isa_extension_available(NULL, host_isa_ext))
637                 return  -EOPNOTSUPP;
638
639         if (!vcpu->arch.ran_atleast_once) {
640                 /*
641                  * All multi-letter extension and a few single letter
642                  * extension can be disabled
643                  */
644                 if (reg_val == 1 &&
645                     kvm_riscv_vcpu_isa_enable_allowed(reg_num))
646                         set_bit(host_isa_ext, vcpu->arch.isa);
647                 else if (!reg_val &&
648                          kvm_riscv_vcpu_isa_disable_allowed(reg_num))
649                         clear_bit(host_isa_ext, vcpu->arch.isa);
650                 else
651                         return -EINVAL;
652                 kvm_riscv_vcpu_fp_reset(vcpu);
653         } else {
654                 return -EOPNOTSUPP;
655         }
656
657         return 0;
658 }
659
660 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
661                                   const struct kvm_one_reg *reg)
662 {
663         switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
664         case KVM_REG_RISCV_CONFIG:
665                 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
666         case KVM_REG_RISCV_CORE:
667                 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
668         case KVM_REG_RISCV_CSR:
669                 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
670         case KVM_REG_RISCV_TIMER:
671                 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
672         case KVM_REG_RISCV_FP_F:
673                 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
674                                                  KVM_REG_RISCV_FP_F);
675         case KVM_REG_RISCV_FP_D:
676                 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
677                                                  KVM_REG_RISCV_FP_D);
678         case KVM_REG_RISCV_ISA_EXT:
679                 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
680         case KVM_REG_RISCV_SBI_EXT:
681                 return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
682         default:
683                 break;
684         }
685
686         return -EINVAL;
687 }
688
689 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
690                                   const struct kvm_one_reg *reg)
691 {
692         switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
693         case KVM_REG_RISCV_CONFIG:
694                 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
695         case KVM_REG_RISCV_CORE:
696                 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
697         case KVM_REG_RISCV_CSR:
698                 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
699         case KVM_REG_RISCV_TIMER:
700                 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
701         case KVM_REG_RISCV_FP_F:
702                 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
703                                                  KVM_REG_RISCV_FP_F);
704         case KVM_REG_RISCV_FP_D:
705                 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
706                                                  KVM_REG_RISCV_FP_D);
707         case KVM_REG_RISCV_ISA_EXT:
708                 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
709         case KVM_REG_RISCV_SBI_EXT:
710                 return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
711         default:
712                 break;
713         }
714
715         return -EINVAL;
716 }
717
718 long kvm_arch_vcpu_async_ioctl(struct file *filp,
719                                unsigned int ioctl, unsigned long arg)
720 {
721         struct kvm_vcpu *vcpu = filp->private_data;
722         void __user *argp = (void __user *)arg;
723
724         if (ioctl == KVM_INTERRUPT) {
725                 struct kvm_interrupt irq;
726
727                 if (copy_from_user(&irq, argp, sizeof(irq)))
728                         return -EFAULT;
729
730                 if (irq.irq == KVM_INTERRUPT_SET)
731                         return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
732                 else
733                         return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
734         }
735
736         return -ENOIOCTLCMD;
737 }
738
739 long kvm_arch_vcpu_ioctl(struct file *filp,
740                          unsigned int ioctl, unsigned long arg)
741 {
742         struct kvm_vcpu *vcpu = filp->private_data;
743         void __user *argp = (void __user *)arg;
744         long r = -EINVAL;
745
746         switch (ioctl) {
747         case KVM_SET_ONE_REG:
748         case KVM_GET_ONE_REG: {
749                 struct kvm_one_reg reg;
750
751                 r = -EFAULT;
752                 if (copy_from_user(&reg, argp, sizeof(reg)))
753                         break;
754
755                 if (ioctl == KVM_SET_ONE_REG)
756                         r = kvm_riscv_vcpu_set_reg(vcpu, &reg);
757                 else
758                         r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
759                 break;
760         }
761         default:
762                 break;
763         }
764
765         return r;
766 }
767
768 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
769                                   struct kvm_sregs *sregs)
770 {
771         return -EINVAL;
772 }
773
774 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
775                                   struct kvm_sregs *sregs)
776 {
777         return -EINVAL;
778 }
779
780 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
781 {
782         return -EINVAL;
783 }
784
785 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
786 {
787         return -EINVAL;
788 }
789
790 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
791                                   struct kvm_translation *tr)
792 {
793         return -EINVAL;
794 }
795
796 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
797 {
798         return -EINVAL;
799 }
800
801 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
802 {
803         return -EINVAL;
804 }
805
806 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
807 {
808         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
809         unsigned long mask, val;
810
811         if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
812                 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
813                 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
814
815                 csr->hvip &= ~mask;
816                 csr->hvip |= val;
817         }
818
819         /* Flush AIA high interrupts */
820         kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
821 }
822
823 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
824 {
825         unsigned long hvip;
826         struct kvm_vcpu_arch *v = &vcpu->arch;
827         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
828
829         /* Read current HVIP and VSIE CSRs */
830         csr->vsie = csr_read(CSR_VSIE);
831
832         /* Sync-up HVIP.VSSIP bit changes does by Guest */
833         hvip = csr_read(CSR_HVIP);
834         if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
835                 if (hvip & (1UL << IRQ_VS_SOFT)) {
836                         if (!test_and_set_bit(IRQ_VS_SOFT,
837                                               v->irqs_pending_mask))
838                                 set_bit(IRQ_VS_SOFT, v->irqs_pending);
839                 } else {
840                         if (!test_and_set_bit(IRQ_VS_SOFT,
841                                               v->irqs_pending_mask))
842                                 clear_bit(IRQ_VS_SOFT, v->irqs_pending);
843                 }
844         }
845
846         /* Sync-up AIA high interrupts */
847         kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
848
849         /* Sync-up timer CSRs */
850         kvm_riscv_vcpu_timer_sync(vcpu);
851 }
852
853 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
854 {
855         /*
856          * We only allow VS-mode software, timer, and external
857          * interrupts when irq is one of the local interrupts
858          * defined by RISC-V privilege specification.
859          */
860         if (irq < IRQ_LOCAL_MAX &&
861             irq != IRQ_VS_SOFT &&
862             irq != IRQ_VS_TIMER &&
863             irq != IRQ_VS_EXT)
864                 return -EINVAL;
865
866         set_bit(irq, vcpu->arch.irqs_pending);
867         smp_mb__before_atomic();
868         set_bit(irq, vcpu->arch.irqs_pending_mask);
869
870         kvm_vcpu_kick(vcpu);
871
872         return 0;
873 }
874
875 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
876 {
877         /*
878          * We only allow VS-mode software, timer, and external
879          * interrupts when irq is one of the local interrupts
880          * defined by RISC-V privilege specification.
881          */
882         if (irq < IRQ_LOCAL_MAX &&
883             irq != IRQ_VS_SOFT &&
884             irq != IRQ_VS_TIMER &&
885             irq != IRQ_VS_EXT)
886                 return -EINVAL;
887
888         clear_bit(irq, vcpu->arch.irqs_pending);
889         smp_mb__before_atomic();
890         set_bit(irq, vcpu->arch.irqs_pending_mask);
891
892         return 0;
893 }
894
895 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
896 {
897         unsigned long ie;
898
899         ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
900                 << VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
901         ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
902                 (unsigned long)mask;
903         if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
904                 return true;
905
906         /* Check AIA high interrupts */
907         return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
908 }
909
910 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
911 {
912         vcpu->arch.power_off = true;
913         kvm_make_request(KVM_REQ_SLEEP, vcpu);
914         kvm_vcpu_kick(vcpu);
915 }
916
917 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
918 {
919         vcpu->arch.power_off = false;
920         kvm_vcpu_wake_up(vcpu);
921 }
922
923 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
924                                     struct kvm_mp_state *mp_state)
925 {
926         if (vcpu->arch.power_off)
927                 mp_state->mp_state = KVM_MP_STATE_STOPPED;
928         else
929                 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
930
931         return 0;
932 }
933
934 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
935                                     struct kvm_mp_state *mp_state)
936 {
937         int ret = 0;
938
939         switch (mp_state->mp_state) {
940         case KVM_MP_STATE_RUNNABLE:
941                 vcpu->arch.power_off = false;
942                 break;
943         case KVM_MP_STATE_STOPPED:
944                 kvm_riscv_vcpu_power_off(vcpu);
945                 break;
946         default:
947                 ret = -EINVAL;
948         }
949
950         return ret;
951 }
952
953 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
954                                         struct kvm_guest_debug *dbg)
955 {
956         /* TODO; To be implemented later. */
957         return -EINVAL;
958 }
959
960 static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
961 {
962         u64 henvcfg = 0;
963
964         if (riscv_isa_extension_available(isa, SVPBMT))
965                 henvcfg |= ENVCFG_PBMTE;
966
967         if (riscv_isa_extension_available(isa, SSTC))
968                 henvcfg |= ENVCFG_STCE;
969
970         if (riscv_isa_extension_available(isa, ZICBOM))
971                 henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
972
973         if (riscv_isa_extension_available(isa, ZICBOZ))
974                 henvcfg |= ENVCFG_CBZE;
975
976         csr_write(CSR_HENVCFG, henvcfg);
977 #ifdef CONFIG_32BIT
978         csr_write(CSR_HENVCFGH, henvcfg >> 32);
979 #endif
980 }
981
982 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
983 {
984         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
985
986         csr_write(CSR_VSSTATUS, csr->vsstatus);
987         csr_write(CSR_VSIE, csr->vsie);
988         csr_write(CSR_VSTVEC, csr->vstvec);
989         csr_write(CSR_VSSCRATCH, csr->vsscratch);
990         csr_write(CSR_VSEPC, csr->vsepc);
991         csr_write(CSR_VSCAUSE, csr->vscause);
992         csr_write(CSR_VSTVAL, csr->vstval);
993         csr_write(CSR_HVIP, csr->hvip);
994         csr_write(CSR_VSATP, csr->vsatp);
995
996         kvm_riscv_vcpu_update_config(vcpu->arch.isa);
997
998         kvm_riscv_gstage_update_hgatp(vcpu);
999
1000         kvm_riscv_vcpu_timer_restore(vcpu);
1001
1002         kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
1003         kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
1004                                         vcpu->arch.isa);
1005
1006         kvm_riscv_vcpu_aia_load(vcpu, cpu);
1007
1008         vcpu->cpu = cpu;
1009 }
1010
1011 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1012 {
1013         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
1014
1015         vcpu->cpu = -1;
1016
1017         kvm_riscv_vcpu_aia_put(vcpu);
1018
1019         kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
1020                                      vcpu->arch.isa);
1021         kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
1022
1023         kvm_riscv_vcpu_timer_save(vcpu);
1024
1025         csr->vsstatus = csr_read(CSR_VSSTATUS);
1026         csr->vsie = csr_read(CSR_VSIE);
1027         csr->vstvec = csr_read(CSR_VSTVEC);
1028         csr->vsscratch = csr_read(CSR_VSSCRATCH);
1029         csr->vsepc = csr_read(CSR_VSEPC);
1030         csr->vscause = csr_read(CSR_VSCAUSE);
1031         csr->vstval = csr_read(CSR_VSTVAL);
1032         csr->hvip = csr_read(CSR_HVIP);
1033         csr->vsatp = csr_read(CSR_VSATP);
1034 }
1035
1036 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
1037 {
1038         struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
1039
1040         if (kvm_request_pending(vcpu)) {
1041                 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
1042                         kvm_vcpu_srcu_read_unlock(vcpu);
1043                         rcuwait_wait_event(wait,
1044                                 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
1045                                 TASK_INTERRUPTIBLE);
1046                         kvm_vcpu_srcu_read_lock(vcpu);
1047
1048                         if (vcpu->arch.power_off || vcpu->arch.pause) {
1049                                 /*
1050                                  * Awaken to handle a signal, request to
1051                                  * sleep again later.
1052                                  */
1053                                 kvm_make_request(KVM_REQ_SLEEP, vcpu);
1054                         }
1055                 }
1056
1057                 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
1058                         kvm_riscv_reset_vcpu(vcpu);
1059
1060                 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
1061                         kvm_riscv_gstage_update_hgatp(vcpu);
1062
1063                 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
1064                         kvm_riscv_fence_i_process(vcpu);
1065
1066                 /*
1067                  * The generic KVM_REQ_TLB_FLUSH is same as
1068                  * KVM_REQ_HFENCE_GVMA_VMID_ALL
1069                  */
1070                 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
1071                         kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
1072
1073                 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
1074                         kvm_riscv_hfence_vvma_all_process(vcpu);
1075
1076                 if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
1077                         kvm_riscv_hfence_process(vcpu);
1078         }
1079 }
1080
1081 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
1082 {
1083         struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
1084
1085         csr_write(CSR_HVIP, csr->hvip);
1086         kvm_riscv_vcpu_aia_update_hvip(vcpu);
1087 }
1088
1089 /*
1090  * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
1091  * the vCPU is running.
1092  *
1093  * This must be noinstr as instrumentation may make use of RCU, and this is not
1094  * safe during the EQS.
1095  */
1096 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
1097 {
1098         guest_state_enter_irqoff();
1099         __kvm_riscv_switch_to(&vcpu->arch);
1100         vcpu->arch.last_exit_cpu = vcpu->cpu;
1101         guest_state_exit_irqoff();
1102 }
1103
1104 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1105 {
1106         int ret;
1107         struct kvm_cpu_trap trap;
1108         struct kvm_run *run = vcpu->run;
1109
1110         /* Mark this VCPU ran at least once */
1111         vcpu->arch.ran_atleast_once = true;
1112
1113         kvm_vcpu_srcu_read_lock(vcpu);
1114
1115         switch (run->exit_reason) {
1116         case KVM_EXIT_MMIO:
1117                 /* Process MMIO value returned from user-space */
1118                 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
1119                 break;
1120         case KVM_EXIT_RISCV_SBI:
1121                 /* Process SBI value returned from user-space */
1122                 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
1123                 break;
1124         case KVM_EXIT_RISCV_CSR:
1125                 /* Process CSR value returned from user-space */
1126                 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
1127                 break;
1128         default:
1129                 ret = 0;
1130                 break;
1131         }
1132         if (ret) {
1133                 kvm_vcpu_srcu_read_unlock(vcpu);
1134                 return ret;
1135         }
1136
1137         if (run->immediate_exit) {
1138                 kvm_vcpu_srcu_read_unlock(vcpu);
1139                 return -EINTR;
1140         }
1141
1142         vcpu_load(vcpu);
1143
1144         kvm_sigset_activate(vcpu);
1145
1146         ret = 1;
1147         run->exit_reason = KVM_EXIT_UNKNOWN;
1148         while (ret > 0) {
1149                 /* Check conditions before entering the guest */
1150                 ret = xfer_to_guest_mode_handle_work(vcpu);
1151                 if (ret)
1152                         continue;
1153                 ret = 1;
1154
1155                 kvm_riscv_gstage_vmid_update(vcpu);
1156
1157                 kvm_riscv_check_vcpu_requests(vcpu);
1158
1159                 preempt_disable();
1160
1161                 /* Update AIA HW state before entering guest */
1162                 ret = kvm_riscv_vcpu_aia_update(vcpu);
1163                 if (ret <= 0) {
1164                         preempt_enable();
1165                         continue;
1166                 }
1167
1168                 local_irq_disable();
1169
1170                 /*
1171                  * Ensure we set mode to IN_GUEST_MODE after we disable
1172                  * interrupts and before the final VCPU requests check.
1173                  * See the comment in kvm_vcpu_exiting_guest_mode() and
1174                  * Documentation/virt/kvm/vcpu-requests.rst
1175                  */
1176                 vcpu->mode = IN_GUEST_MODE;
1177
1178                 kvm_vcpu_srcu_read_unlock(vcpu);
1179                 smp_mb__after_srcu_read_unlock();
1180
1181                 /*
1182                  * We might have got VCPU interrupts updated asynchronously
1183                  * so update it in HW.
1184                  */
1185                 kvm_riscv_vcpu_flush_interrupts(vcpu);
1186
1187                 /* Update HVIP CSR for current CPU */
1188                 kvm_riscv_update_hvip(vcpu);
1189
1190                 if (ret <= 0 ||
1191                     kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
1192                     kvm_request_pending(vcpu) ||
1193                     xfer_to_guest_mode_work_pending()) {
1194                         vcpu->mode = OUTSIDE_GUEST_MODE;
1195                         local_irq_enable();
1196                         preempt_enable();
1197                         kvm_vcpu_srcu_read_lock(vcpu);
1198                         continue;
1199                 }
1200
1201                 /*
1202                  * Cleanup stale TLB enteries
1203                  *
1204                  * Note: This should be done after G-stage VMID has been
1205                  * updated using kvm_riscv_gstage_vmid_ver_changed()
1206                  */
1207                 kvm_riscv_local_tlb_sanitize(vcpu);
1208
1209                 guest_timing_enter_irqoff();
1210
1211                 kvm_riscv_vcpu_enter_exit(vcpu);
1212
1213                 vcpu->mode = OUTSIDE_GUEST_MODE;
1214                 vcpu->stat.exits++;
1215
1216                 /*
1217                  * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1218                  * get an interrupt between __kvm_riscv_switch_to() and
1219                  * local_irq_enable() which can potentially change CSRs.
1220                  */
1221                 trap.sepc = vcpu->arch.guest_context.sepc;
1222                 trap.scause = csr_read(CSR_SCAUSE);
1223                 trap.stval = csr_read(CSR_STVAL);
1224                 trap.htval = csr_read(CSR_HTVAL);
1225                 trap.htinst = csr_read(CSR_HTINST);
1226
1227                 /* Syncup interrupts state with HW */
1228                 kvm_riscv_vcpu_sync_interrupts(vcpu);
1229
1230                 /*
1231                  * We must ensure that any pending interrupts are taken before
1232                  * we exit guest timing so that timer ticks are accounted as
1233                  * guest time. Transiently unmask interrupts so that any
1234                  * pending interrupts are taken.
1235                  *
1236                  * There's no barrier which ensures that pending interrupts are
1237                  * recognised, so we just hope that the CPU takes any pending
1238                  * interrupts between the enable and disable.
1239                  */
1240                 local_irq_enable();
1241                 local_irq_disable();
1242
1243                 guest_timing_exit_irqoff();
1244
1245                 local_irq_enable();
1246
1247                 preempt_enable();
1248
1249                 kvm_vcpu_srcu_read_lock(vcpu);
1250
1251                 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1252         }
1253
1254         kvm_sigset_deactivate(vcpu);
1255
1256         vcpu_put(vcpu);
1257
1258         kvm_vcpu_srcu_read_unlock(vcpu);
1259
1260         return ret;
1261 }
This page took 0.110797 seconds and 4 git commands to generate.