1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kvm_vcpu_sbi.h>
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
23 #ifdef CONFIG_RISCV_PMU_SBI
24 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
26 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
33 struct kvm_riscv_sbi_extension_entry {
34 enum KVM_RISCV_SBI_EXT_ID dis_idx;
35 const struct kvm_vcpu_sbi_extension *ext_ptr;
38 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
40 .dis_idx = KVM_RISCV_SBI_EXT_V01,
41 .ext_ptr = &vcpu_sbi_ext_v01,
44 .dis_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
45 .ext_ptr = &vcpu_sbi_ext_base,
48 .dis_idx = KVM_RISCV_SBI_EXT_TIME,
49 .ext_ptr = &vcpu_sbi_ext_time,
52 .dis_idx = KVM_RISCV_SBI_EXT_IPI,
53 .ext_ptr = &vcpu_sbi_ext_ipi,
56 .dis_idx = KVM_RISCV_SBI_EXT_RFENCE,
57 .ext_ptr = &vcpu_sbi_ext_rfence,
60 .dis_idx = KVM_RISCV_SBI_EXT_SRST,
61 .ext_ptr = &vcpu_sbi_ext_srst,
64 .dis_idx = KVM_RISCV_SBI_EXT_HSM,
65 .ext_ptr = &vcpu_sbi_ext_hsm,
68 .dis_idx = KVM_RISCV_SBI_EXT_PMU,
69 .ext_ptr = &vcpu_sbi_ext_pmu,
72 .dis_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
73 .ext_ptr = &vcpu_sbi_ext_experimental,
76 .dis_idx = KVM_RISCV_SBI_EXT_VENDOR,
77 .ext_ptr = &vcpu_sbi_ext_vendor,
81 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
83 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
85 vcpu->arch.sbi_context.return_handled = 0;
86 vcpu->stat.ecall_exit_stat++;
87 run->exit_reason = KVM_EXIT_RISCV_SBI;
88 run->riscv_sbi.extension_id = cp->a7;
89 run->riscv_sbi.function_id = cp->a6;
90 run->riscv_sbi.args[0] = cp->a0;
91 run->riscv_sbi.args[1] = cp->a1;
92 run->riscv_sbi.args[2] = cp->a2;
93 run->riscv_sbi.args[3] = cp->a3;
94 run->riscv_sbi.args[4] = cp->a4;
95 run->riscv_sbi.args[5] = cp->a5;
96 run->riscv_sbi.ret[0] = cp->a0;
97 run->riscv_sbi.ret[1] = cp->a1;
100 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
102 u32 type, u64 reason)
105 struct kvm_vcpu *tmp;
107 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
108 tmp->arch.power_off = true;
109 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
111 memset(&run->system_event, 0, sizeof(run->system_event));
112 run->system_event.type = type;
113 run->system_event.ndata = 1;
114 run->system_event.data[0] = reason;
115 run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
118 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
120 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
122 /* Handle SBI return only once */
123 if (vcpu->arch.sbi_context.return_handled)
125 vcpu->arch.sbi_context.return_handled = 1;
127 /* Update return values */
128 cp->a0 = run->riscv_sbi.ret[0];
129 cp->a1 = run->riscv_sbi.ret[1];
131 /* Move to next instruction */
132 vcpu->arch.guest_context.sepc += 4;
137 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
138 unsigned long reg_num,
139 unsigned long reg_val)
142 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
143 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
145 if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
146 (reg_val != 1 && reg_val != 0))
149 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
150 if (sbi_ext[i].dis_idx == reg_num) {
158 scontext->extension_disabled[sext->dis_idx] = !reg_val;
163 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
164 unsigned long reg_num,
165 unsigned long *reg_val)
168 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
169 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
171 if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
174 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
175 if (sbi_ext[i].dis_idx == reg_num) {
183 *reg_val = !scontext->extension_disabled[sext->dis_idx];
188 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
189 unsigned long reg_num,
190 unsigned long reg_val, bool enable)
192 unsigned long i, ext_id;
194 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
197 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
198 ext_id = i + reg_num * BITS_PER_LONG;
199 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
202 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
208 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
209 unsigned long reg_num,
210 unsigned long *reg_val)
212 unsigned long i, ext_id, ext_val;
214 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
217 for (i = 0; i < BITS_PER_LONG; i++) {
218 ext_id = i + reg_num * BITS_PER_LONG;
219 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
223 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
225 *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
231 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
232 const struct kvm_one_reg *reg)
234 unsigned long __user *uaddr =
235 (unsigned long __user *)(unsigned long)reg->addr;
236 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
238 KVM_REG_RISCV_SBI_EXT);
239 unsigned long reg_val, reg_subtype;
241 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
244 if (vcpu->arch.ran_atleast_once)
247 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
248 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
250 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
253 switch (reg_subtype) {
254 case KVM_REG_RISCV_SBI_SINGLE:
255 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
256 case KVM_REG_RISCV_SBI_MULTI_EN:
257 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
258 case KVM_REG_RISCV_SBI_MULTI_DIS:
259 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
267 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
268 const struct kvm_one_reg *reg)
271 unsigned long __user *uaddr =
272 (unsigned long __user *)(unsigned long)reg->addr;
273 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
275 KVM_REG_RISCV_SBI_EXT);
276 unsigned long reg_val, reg_subtype;
278 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
281 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
282 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
285 switch (reg_subtype) {
286 case KVM_REG_RISCV_SBI_SINGLE:
287 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
289 case KVM_REG_RISCV_SBI_MULTI_EN:
290 case KVM_REG_RISCV_SBI_MULTI_DIS:
291 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
292 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
301 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
307 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
308 struct kvm_vcpu *vcpu, unsigned long extid)
311 const struct kvm_riscv_sbi_extension_entry *sext;
312 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
314 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
316 if (sext->ext_ptr->extid_start <= extid &&
317 sext->ext_ptr->extid_end >= extid) {
318 if (sext->dis_idx < KVM_RISCV_SBI_EXT_MAX &&
319 scontext->extension_disabled[sext->dis_idx])
321 return sbi_ext[i].ext_ptr;
328 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
331 bool next_sepc = true;
332 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
333 const struct kvm_vcpu_sbi_extension *sbi_ext;
334 struct kvm_cpu_trap utrap = {0};
335 struct kvm_vcpu_sbi_return sbi_ret = {
340 bool ext_is_v01 = false;
342 sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
343 if (sbi_ext && sbi_ext->handler) {
344 #ifdef CONFIG_RISCV_SBI_V01
345 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
346 cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
349 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
351 /* Return error for unsupported SBI calls */
352 cp->a0 = SBI_ERR_NOT_SUPPORTED;
357 * When the SBI extension returns a Linux error code, it exits the ioctl
358 * loop and forwards the error to userspace.
365 /* Handle special error cases i.e trap, exit or userspace forward */
366 if (sbi_ret.utrap->scause) {
367 /* No need to increment sepc or exit ioctl loop */
369 sbi_ret.utrap->sepc = cp->sepc;
370 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
375 /* Exit ioctl loop or Propagate the error code the guest */
380 cp->a0 = sbi_ret.err_val;
386 /* a1 should only be updated when we continue the ioctl loop */
387 if (!ext_is_v01 && ret == 1)
388 cp->a1 = sbi_ret.out_val;