]> Git Repo - linux.git/blob - arch/riscv/kvm/vcpu_sbi.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / arch / riscv / kvm / vcpu_sbi.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <[email protected]>
7  */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17         .extid_start = -1UL,
18         .extid_end = -1UL,
19         .handler = NULL,
20 };
21 #endif
22
23 #ifdef CONFIG_RISCV_PMU_SBI
24 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
25 #else
26 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
27         .extid_start = -1UL,
28         .extid_end = -1UL,
29         .handler = NULL,
30 };
31 #endif
32
33 struct kvm_riscv_sbi_extension_entry {
34         enum KVM_RISCV_SBI_EXT_ID dis_idx;
35         const struct kvm_vcpu_sbi_extension *ext_ptr;
36 };
37
38 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
39         {
40                 .dis_idx = KVM_RISCV_SBI_EXT_V01,
41                 .ext_ptr = &vcpu_sbi_ext_v01,
42         },
43         {
44                 .dis_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
45                 .ext_ptr = &vcpu_sbi_ext_base,
46         },
47         {
48                 .dis_idx = KVM_RISCV_SBI_EXT_TIME,
49                 .ext_ptr = &vcpu_sbi_ext_time,
50         },
51         {
52                 .dis_idx = KVM_RISCV_SBI_EXT_IPI,
53                 .ext_ptr = &vcpu_sbi_ext_ipi,
54         },
55         {
56                 .dis_idx = KVM_RISCV_SBI_EXT_RFENCE,
57                 .ext_ptr = &vcpu_sbi_ext_rfence,
58         },
59         {
60                 .dis_idx = KVM_RISCV_SBI_EXT_SRST,
61                 .ext_ptr = &vcpu_sbi_ext_srst,
62         },
63         {
64                 .dis_idx = KVM_RISCV_SBI_EXT_HSM,
65                 .ext_ptr = &vcpu_sbi_ext_hsm,
66         },
67         {
68                 .dis_idx = KVM_RISCV_SBI_EXT_PMU,
69                 .ext_ptr = &vcpu_sbi_ext_pmu,
70         },
71         {
72                 .dis_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
73                 .ext_ptr = &vcpu_sbi_ext_experimental,
74         },
75         {
76                 .dis_idx = KVM_RISCV_SBI_EXT_VENDOR,
77                 .ext_ptr = &vcpu_sbi_ext_vendor,
78         },
79 };
80
81 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
82 {
83         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
84
85         vcpu->arch.sbi_context.return_handled = 0;
86         vcpu->stat.ecall_exit_stat++;
87         run->exit_reason = KVM_EXIT_RISCV_SBI;
88         run->riscv_sbi.extension_id = cp->a7;
89         run->riscv_sbi.function_id = cp->a6;
90         run->riscv_sbi.args[0] = cp->a0;
91         run->riscv_sbi.args[1] = cp->a1;
92         run->riscv_sbi.args[2] = cp->a2;
93         run->riscv_sbi.args[3] = cp->a3;
94         run->riscv_sbi.args[4] = cp->a4;
95         run->riscv_sbi.args[5] = cp->a5;
96         run->riscv_sbi.ret[0] = cp->a0;
97         run->riscv_sbi.ret[1] = cp->a1;
98 }
99
100 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
101                                      struct kvm_run *run,
102                                      u32 type, u64 reason)
103 {
104         unsigned long i;
105         struct kvm_vcpu *tmp;
106
107         kvm_for_each_vcpu(i, tmp, vcpu->kvm)
108                 tmp->arch.power_off = true;
109         kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
110
111         memset(&run->system_event, 0, sizeof(run->system_event));
112         run->system_event.type = type;
113         run->system_event.ndata = 1;
114         run->system_event.data[0] = reason;
115         run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
116 }
117
118 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
119 {
120         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
121
122         /* Handle SBI return only once */
123         if (vcpu->arch.sbi_context.return_handled)
124                 return 0;
125         vcpu->arch.sbi_context.return_handled = 1;
126
127         /* Update return values */
128         cp->a0 = run->riscv_sbi.ret[0];
129         cp->a1 = run->riscv_sbi.ret[1];
130
131         /* Move to next instruction */
132         vcpu->arch.guest_context.sepc += 4;
133
134         return 0;
135 }
136
137 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
138                                          unsigned long reg_num,
139                                          unsigned long reg_val)
140 {
141         unsigned long i;
142         const struct kvm_riscv_sbi_extension_entry *sext = NULL;
143         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
144
145         if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
146             (reg_val != 1 && reg_val != 0))
147                 return -EINVAL;
148
149         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
150                 if (sbi_ext[i].dis_idx == reg_num) {
151                         sext = &sbi_ext[i];
152                         break;
153                 }
154         }
155         if (!sext)
156                 return -ENOENT;
157
158         scontext->extension_disabled[sext->dis_idx] = !reg_val;
159
160         return 0;
161 }
162
163 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
164                                          unsigned long reg_num,
165                                          unsigned long *reg_val)
166 {
167         unsigned long i;
168         const struct kvm_riscv_sbi_extension_entry *sext = NULL;
169         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
170
171         if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
172                 return -EINVAL;
173
174         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
175                 if (sbi_ext[i].dis_idx == reg_num) {
176                         sext = &sbi_ext[i];
177                         break;
178                 }
179         }
180         if (!sext)
181                 return -ENOENT;
182
183         *reg_val = !scontext->extension_disabled[sext->dis_idx];
184
185         return 0;
186 }
187
188 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
189                                         unsigned long reg_num,
190                                         unsigned long reg_val, bool enable)
191 {
192         unsigned long i, ext_id;
193
194         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
195                 return -EINVAL;
196
197         for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
198                 ext_id = i + reg_num * BITS_PER_LONG;
199                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
200                         break;
201
202                 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
203         }
204
205         return 0;
206 }
207
208 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
209                                         unsigned long reg_num,
210                                         unsigned long *reg_val)
211 {
212         unsigned long i, ext_id, ext_val;
213
214         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
215                 return -EINVAL;
216
217         for (i = 0; i < BITS_PER_LONG; i++) {
218                 ext_id = i + reg_num * BITS_PER_LONG;
219                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
220                         break;
221
222                 ext_val = 0;
223                 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
224                 if (ext_val)
225                         *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
226         }
227
228         return 0;
229 }
230
231 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
232                                    const struct kvm_one_reg *reg)
233 {
234         unsigned long __user *uaddr =
235                         (unsigned long __user *)(unsigned long)reg->addr;
236         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
237                                             KVM_REG_SIZE_MASK |
238                                             KVM_REG_RISCV_SBI_EXT);
239         unsigned long reg_val, reg_subtype;
240
241         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
242                 return -EINVAL;
243
244         if (vcpu->arch.ran_atleast_once)
245                 return -EBUSY;
246
247         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
248         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
249
250         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
251                 return -EFAULT;
252
253         switch (reg_subtype) {
254         case KVM_REG_RISCV_SBI_SINGLE:
255                 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
256         case KVM_REG_RISCV_SBI_MULTI_EN:
257                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
258         case KVM_REG_RISCV_SBI_MULTI_DIS:
259                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
260         default:
261                 return -EINVAL;
262         }
263
264         return 0;
265 }
266
267 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
268                                    const struct kvm_one_reg *reg)
269 {
270         int rc;
271         unsigned long __user *uaddr =
272                         (unsigned long __user *)(unsigned long)reg->addr;
273         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
274                                             KVM_REG_SIZE_MASK |
275                                             KVM_REG_RISCV_SBI_EXT);
276         unsigned long reg_val, reg_subtype;
277
278         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
279                 return -EINVAL;
280
281         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
282         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
283
284         reg_val = 0;
285         switch (reg_subtype) {
286         case KVM_REG_RISCV_SBI_SINGLE:
287                 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
288                 break;
289         case KVM_REG_RISCV_SBI_MULTI_EN:
290         case KVM_REG_RISCV_SBI_MULTI_DIS:
291                 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
292                 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
293                         reg_val = ~reg_val;
294                 break;
295         default:
296                 rc = -EINVAL;
297         }
298         if (rc)
299                 return rc;
300
301         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
302                 return -EFAULT;
303
304         return 0;
305 }
306
307 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
308                                 struct kvm_vcpu *vcpu, unsigned long extid)
309 {
310         int i;
311         const struct kvm_riscv_sbi_extension_entry *sext;
312         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
313
314         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
315                 sext = &sbi_ext[i];
316                 if (sext->ext_ptr->extid_start <= extid &&
317                     sext->ext_ptr->extid_end >= extid) {
318                         if (sext->dis_idx < KVM_RISCV_SBI_EXT_MAX &&
319                             scontext->extension_disabled[sext->dis_idx])
320                                 return NULL;
321                         return sbi_ext[i].ext_ptr;
322                 }
323         }
324
325         return NULL;
326 }
327
328 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
329 {
330         int ret = 1;
331         bool next_sepc = true;
332         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
333         const struct kvm_vcpu_sbi_extension *sbi_ext;
334         struct kvm_cpu_trap utrap = {0};
335         struct kvm_vcpu_sbi_return sbi_ret = {
336                 .out_val = 0,
337                 .err_val = 0,
338                 .utrap = &utrap,
339         };
340         bool ext_is_v01 = false;
341
342         sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
343         if (sbi_ext && sbi_ext->handler) {
344 #ifdef CONFIG_RISCV_SBI_V01
345                 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
346                     cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
347                         ext_is_v01 = true;
348 #endif
349                 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
350         } else {
351                 /* Return error for unsupported SBI calls */
352                 cp->a0 = SBI_ERR_NOT_SUPPORTED;
353                 goto ecall_done;
354         }
355
356         /*
357          * When the SBI extension returns a Linux error code, it exits the ioctl
358          * loop and forwards the error to userspace.
359          */
360         if (ret < 0) {
361                 next_sepc = false;
362                 goto ecall_done;
363         }
364
365         /* Handle special error cases i.e trap, exit or userspace forward */
366         if (sbi_ret.utrap->scause) {
367                 /* No need to increment sepc or exit ioctl loop */
368                 ret = 1;
369                 sbi_ret.utrap->sepc = cp->sepc;
370                 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
371                 next_sepc = false;
372                 goto ecall_done;
373         }
374
375         /* Exit ioctl loop or Propagate the error code the guest */
376         if (sbi_ret.uexit) {
377                 next_sepc = false;
378                 ret = 0;
379         } else {
380                 cp->a0 = sbi_ret.err_val;
381                 ret = 1;
382         }
383 ecall_done:
384         if (next_sepc)
385                 cp->sepc += 4;
386         /* a1 should only be updated when we continue the ioctl loop */
387         if (!ext_is_v01 && ret == 1)
388                 cp->a1 = sbi_ret.out_val;
389
390         return ret;
391 }
This page took 0.055954 seconds and 4 git commands to generate.