1 // SPDX-License-Identifier: GPL-2.0-only
8 * Description: KVM functions specific to running on Book 3S
9 * processors as a NESTEDv2 guest.
13 #include "linux/blk-mq.h"
14 #include "linux/console.h"
15 #include "linux/gfp_types.h"
16 #include "linux/signal.h"
17 #include <linux/kernel.h>
18 #include <linux/kvm_host.h>
19 #include <linux/pgtable.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/hvcall.h>
24 #include <asm/pgalloc.h>
26 #include <asm/plpar_wrappers.h>
27 #include <asm/guest-state-buffer.h>
30 struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
31 EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
35 gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
38 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
39 KVMPPC_GSID_RUN_INPUT,
40 KVMPPC_GSID_RUN_OUTPUT,
45 for (int i = 0; i < ARRAY_SIZE(ids); i++)
46 size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
51 gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
52 struct kvmppc_gs_msg *gsm)
54 struct kvmhv_nestedv2_config *cfg;
59 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
60 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
61 cfg->vcpu_run_output_size);
66 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
67 rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
68 cfg->vcpu_run_input_cfg);
73 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
74 rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
75 cfg->vcpu_run_output_cfg);
84 gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
85 struct kvmppc_gs_buff *gsb)
87 struct kvmhv_nestedv2_config *cfg;
88 struct kvmppc_gs_parser gsp = { 0 };
89 struct kvmppc_gs_elem *gse;
94 rc = kvmppc_gse_parse(&gsp, gsb);
98 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
100 cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
104 static struct kvmppc_gs_msg_ops config_msg_ops = {
105 .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
106 .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
107 .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
110 static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
112 struct kvmppc_gs_bitmap gsbm = { 0 };
116 kvmppc_gsbm_fill(&gsbm);
117 kvmppc_gsbm_for_each(&gsbm, iden)
120 case KVMPPC_GSID_HOST_STATE_SIZE:
121 case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
122 case KVMPPC_GSID_PARTITION_TABLE:
123 case KVMPPC_GSID_PROCESS_TABLE:
124 case KVMPPC_GSID_RUN_INPUT:
125 case KVMPPC_GSID_RUN_OUTPUT:
128 size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
134 static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
135 struct kvmppc_gs_msg *gsm)
137 struct kvm_vcpu *vcpu;
145 kvmppc_gsm_for_each(gsm, iden)
149 if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
150 (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
154 case KVMPPC_GSID_DSCR:
155 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
157 case KVMPPC_GSID_MMCRA:
158 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
160 case KVMPPC_GSID_HFSCR:
161 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
163 case KVMPPC_GSID_PURR:
164 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
166 case KVMPPC_GSID_SPURR:
167 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
169 case KVMPPC_GSID_AMR:
170 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
172 case KVMPPC_GSID_UAMOR:
173 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
175 case KVMPPC_GSID_SIAR:
176 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
178 case KVMPPC_GSID_SDAR:
179 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
181 case KVMPPC_GSID_IAMR:
182 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
184 case KVMPPC_GSID_DAWR0:
185 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
187 case KVMPPC_GSID_DAWR1:
188 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
190 case KVMPPC_GSID_DAWRX0:
191 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
193 case KVMPPC_GSID_DAWRX1:
194 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
196 case KVMPPC_GSID_DEXCR:
197 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr);
199 case KVMPPC_GSID_HASHKEYR:
200 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr);
202 case KVMPPC_GSID_HASHPKEYR:
203 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr);
205 case KVMPPC_GSID_CIABR:
206 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
208 case KVMPPC_GSID_WORT:
209 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
211 case KVMPPC_GSID_PPR:
212 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
214 case KVMPPC_GSID_PSPB:
215 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
217 case KVMPPC_GSID_TAR:
218 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
220 case KVMPPC_GSID_FSCR:
221 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
223 case KVMPPC_GSID_EBBHR:
224 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
226 case KVMPPC_GSID_EBBRR:
227 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
229 case KVMPPC_GSID_BESCR:
230 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
233 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
235 case KVMPPC_GSID_CTRL:
236 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
238 case KVMPPC_GSID_PIDR:
239 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
241 case KVMPPC_GSID_AMOR: {
244 rc = kvmppc_gse_put_u64(gsb, iden, amor);
247 case KVMPPC_GSID_VRSAVE:
248 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
250 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
251 i = iden - KVMPPC_GSID_MMCR(0);
252 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
254 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
255 i = iden - KVMPPC_GSID_SIER(0);
256 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
258 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
259 i = iden - KVMPPC_GSID_PMC(0);
260 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
262 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
263 i = iden - KVMPPC_GSID_GPR(0);
264 rc = kvmppc_gse_put_u64(gsb, iden,
265 vcpu->arch.regs.gpr[i]);
268 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
270 case KVMPPC_GSID_XER:
271 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
273 case KVMPPC_GSID_CTR:
274 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
277 rc = kvmppc_gse_put_u64(gsb, iden,
278 vcpu->arch.regs.link);
280 case KVMPPC_GSID_NIA:
281 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
283 case KVMPPC_GSID_SRR0:
284 rc = kvmppc_gse_put_u64(gsb, iden,
285 vcpu->arch.shregs.srr0);
287 case KVMPPC_GSID_SRR1:
288 rc = kvmppc_gse_put_u64(gsb, iden,
289 vcpu->arch.shregs.srr1);
291 case KVMPPC_GSID_SPRG0:
292 rc = kvmppc_gse_put_u64(gsb, iden,
293 vcpu->arch.shregs.sprg0);
295 case KVMPPC_GSID_SPRG1:
296 rc = kvmppc_gse_put_u64(gsb, iden,
297 vcpu->arch.shregs.sprg1);
299 case KVMPPC_GSID_SPRG2:
300 rc = kvmppc_gse_put_u64(gsb, iden,
301 vcpu->arch.shregs.sprg2);
303 case KVMPPC_GSID_SPRG3:
304 rc = kvmppc_gse_put_u64(gsb, iden,
305 vcpu->arch.shregs.sprg3);
307 case KVMPPC_GSID_DAR:
308 rc = kvmppc_gse_put_u64(gsb, iden,
309 vcpu->arch.shregs.dar);
311 case KVMPPC_GSID_DSISR:
312 rc = kvmppc_gse_put_u32(gsb, iden,
313 vcpu->arch.shregs.dsisr);
315 case KVMPPC_GSID_MSR:
316 rc = kvmppc_gse_put_u64(gsb, iden,
317 vcpu->arch.shregs.msr);
319 case KVMPPC_GSID_VTB:
320 rc = kvmppc_gse_put_u64(gsb, iden,
321 vcpu->arch.vcore->vtb);
323 case KVMPPC_GSID_DPDES:
324 rc = kvmppc_gse_put_u64(gsb, iden,
325 vcpu->arch.vcore->dpdes);
327 case KVMPPC_GSID_LPCR:
328 rc = kvmppc_gse_put_u64(gsb, iden,
329 vcpu->arch.vcore->lpcr);
331 case KVMPPC_GSID_TB_OFFSET:
332 rc = kvmppc_gse_put_u64(gsb, iden,
333 vcpu->arch.vcore->tb_offset);
335 case KVMPPC_GSID_FPSCR:
336 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
338 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
339 i = iden - KVMPPC_GSID_VSRS(0);
340 memcpy(&v, &vcpu->arch.fp.fpr[i],
341 sizeof(vcpu->arch.fp.fpr[i]));
342 rc = kvmppc_gse_put_vector128(gsb, iden, &v);
345 case KVMPPC_GSID_VSCR:
346 rc = kvmppc_gse_put_u32(gsb, iden,
347 vcpu->arch.vr.vscr.u[3]);
349 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
350 i = iden - KVMPPC_GSID_VSRS(32);
351 rc = kvmppc_gse_put_vector128(gsb, iden,
352 &vcpu->arch.vr.vr[i]);
355 case KVMPPC_GSID_DEC_EXPIRY_TB: {
358 dw = vcpu->arch.dec_expires -
359 vcpu->arch.vcore->tb_offset;
360 rc = kvmppc_gse_put_u64(gsb, iden, dw);
363 case KVMPPC_GSID_LOGICAL_PVR:
365 * Though 'arch_compat == 0' would mean the default
366 * compatibility, arch_compat, being a Guest Wide
367 * Element, cannot be filled with a value of 0 in GSB
368 * as this would result into a kernel trap.
369 * Hence, when `arch_compat == 0`, arch_compat should
370 * default to L1's PVR.
372 if (!vcpu->arch.vcore->arch_compat) {
373 if (cpu_has_feature(CPU_FTR_P11_PVR))
374 arch_compat = PVR_ARCH_31_P11;
375 else if (cpu_has_feature(CPU_FTR_ARCH_31))
376 arch_compat = PVR_ARCH_31;
377 else if (cpu_has_feature(CPU_FTR_ARCH_300))
378 arch_compat = PVR_ARCH_300;
380 arch_compat = vcpu->arch.vcore->arch_compat;
382 rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
393 static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
394 struct kvmppc_gs_buff *gsb)
396 struct kvmppc_gs_parser gsp = { 0 };
397 struct kvmhv_nestedv2_io *io;
398 struct kvmppc_gs_bitmap *valids;
399 struct kvm_vcpu *vcpu;
400 struct kvmppc_gs_elem *gse;
407 rc = kvmppc_gse_parse(&gsp, gsb);
411 io = &vcpu->arch.nestedv2_io;
412 valids = &io->valids;
414 kvmppc_gsp_for_each(&gsp, iden, gse)
417 case KVMPPC_GSID_DSCR:
418 vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
420 case KVMPPC_GSID_MMCRA:
421 vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
423 case KVMPPC_GSID_HFSCR:
424 vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
426 case KVMPPC_GSID_PURR:
427 vcpu->arch.purr = kvmppc_gse_get_u64(gse);
429 case KVMPPC_GSID_SPURR:
430 vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
432 case KVMPPC_GSID_AMR:
433 vcpu->arch.amr = kvmppc_gse_get_u64(gse);
435 case KVMPPC_GSID_UAMOR:
436 vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
438 case KVMPPC_GSID_SIAR:
439 vcpu->arch.siar = kvmppc_gse_get_u64(gse);
441 case KVMPPC_GSID_SDAR:
442 vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
444 case KVMPPC_GSID_IAMR:
445 vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
447 case KVMPPC_GSID_DAWR0:
448 vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
450 case KVMPPC_GSID_DAWR1:
451 vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
453 case KVMPPC_GSID_DAWRX0:
454 vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
456 case KVMPPC_GSID_DAWRX1:
457 vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
459 case KVMPPC_GSID_DEXCR:
460 vcpu->arch.dexcr = kvmppc_gse_get_u64(gse);
462 case KVMPPC_GSID_HASHKEYR:
463 vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse);
465 case KVMPPC_GSID_HASHPKEYR:
466 vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse);
468 case KVMPPC_GSID_CIABR:
469 vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
471 case KVMPPC_GSID_WORT:
472 vcpu->arch.wort = kvmppc_gse_get_u32(gse);
474 case KVMPPC_GSID_PPR:
475 vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
477 case KVMPPC_GSID_PSPB:
478 vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
480 case KVMPPC_GSID_TAR:
481 vcpu->arch.tar = kvmppc_gse_get_u64(gse);
483 case KVMPPC_GSID_FSCR:
484 vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
486 case KVMPPC_GSID_EBBHR:
487 vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
489 case KVMPPC_GSID_EBBRR:
490 vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
492 case KVMPPC_GSID_BESCR:
493 vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
496 vcpu->arch.ic = kvmppc_gse_get_u64(gse);
498 case KVMPPC_GSID_CTRL:
499 vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
501 case KVMPPC_GSID_PIDR:
502 vcpu->arch.pid = kvmppc_gse_get_u32(gse);
504 case KVMPPC_GSID_AMOR:
506 case KVMPPC_GSID_VRSAVE:
507 vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
509 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
510 i = iden - KVMPPC_GSID_MMCR(0);
511 vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
513 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
514 i = iden - KVMPPC_GSID_SIER(0);
515 vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
517 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
518 i = iden - KVMPPC_GSID_PMC(0);
519 vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
521 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
522 i = iden - KVMPPC_GSID_GPR(0);
523 vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
526 vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
528 case KVMPPC_GSID_XER:
529 vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
531 case KVMPPC_GSID_CTR:
532 vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
535 vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
537 case KVMPPC_GSID_NIA:
538 vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
540 case KVMPPC_GSID_SRR0:
541 vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
543 case KVMPPC_GSID_SRR1:
544 vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
546 case KVMPPC_GSID_SPRG0:
547 vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
549 case KVMPPC_GSID_SPRG1:
550 vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
552 case KVMPPC_GSID_SPRG2:
553 vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
555 case KVMPPC_GSID_SPRG3:
556 vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
558 case KVMPPC_GSID_DAR:
559 vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
561 case KVMPPC_GSID_DSISR:
562 vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
564 case KVMPPC_GSID_MSR:
565 vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
567 case KVMPPC_GSID_VTB:
568 vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
570 case KVMPPC_GSID_DPDES:
571 vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse);
573 case KVMPPC_GSID_LPCR:
574 vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
576 case KVMPPC_GSID_TB_OFFSET:
577 vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
579 case KVMPPC_GSID_FPSCR:
580 vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
582 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
583 kvmppc_gse_get_vector128(gse, &v);
584 i = iden - KVMPPC_GSID_VSRS(0);
585 memcpy(&vcpu->arch.fp.fpr[i], &v,
586 sizeof(vcpu->arch.fp.fpr[i]));
589 case KVMPPC_GSID_VSCR:
590 vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
592 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
593 i = iden - KVMPPC_GSID_VSRS(32);
594 kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
597 case KVMPPC_GSID_HDAR:
598 vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
600 case KVMPPC_GSID_HDSISR:
601 vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
603 case KVMPPC_GSID_ASDR:
604 vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
606 case KVMPPC_GSID_HEIR:
607 vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
609 case KVMPPC_GSID_DEC_EXPIRY_TB: {
612 dw = kvmppc_gse_get_u64(gse);
613 vcpu->arch.dec_expires =
614 dw + vcpu->arch.vcore->tb_offset;
617 case KVMPPC_GSID_LOGICAL_PVR:
618 vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
623 kvmppc_gsbm_set(valids, iden);
629 static struct kvmppc_gs_msg_ops vcpu_message_ops = {
630 .get_size = gs_msg_ops_vcpu_get_size,
631 .fill_info = gs_msg_ops_vcpu_fill_info,
632 .refresh_info = gs_msg_ops_vcpu_refresh_info,
635 static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
636 struct kvmhv_nestedv2_io *io)
638 struct kvmhv_nestedv2_config *cfg;
639 struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
640 unsigned long guest_id, vcpu_id;
641 struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
645 guest_id = vcpu->kvm->arch.lpid;
646 vcpu_id = vcpu->vcpu_id;
648 gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
655 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
662 rc = kvmppc_gsb_receive_datum(gsb, gsm,
663 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
665 pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
669 vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
670 vcpu_id, GFP_KERNEL);
671 if (!vcpu_run_output) {
676 cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
677 cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
678 io->vcpu_run_output = vcpu_run_output;
681 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
683 pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
687 vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
692 kvmppc_gsm_include_all(vcpu_message);
694 io->vcpu_message = vcpu_message;
696 vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
697 vcpu_id, GFP_KERNEL);
698 if (!vcpu_run_input) {
700 goto free_vcpu_message;
703 io->vcpu_run_input = vcpu_run_input;
704 cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
705 cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
706 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
708 pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
709 goto free_vcpu_run_input;
712 vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
713 KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
714 if (!vcore_message) {
716 goto free_vcpu_run_input;
719 kvmppc_gsm_include_all(vcore_message);
720 kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
721 io->vcore_message = vcore_message;
723 kvmppc_gsbm_fill(&io->valids);
724 kvmppc_gsm_free(gsm);
725 kvmppc_gsb_free(gsb);
729 kvmppc_gsb_free(vcpu_run_input);
731 kvmppc_gsm_free(vcpu_message);
733 kvmppc_gsb_free(vcpu_run_output);
735 kvmppc_gsb_free(gsb);
737 kvmppc_gsm_free(gsm);
743 * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
745 * @iden: guest state ID
747 * Mark a guest state ID as having been changed by the L1 host and thus
748 * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
750 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
752 struct kvmhv_nestedv2_io *io;
753 struct kvmppc_gs_bitmap *valids;
754 struct kvmppc_gs_msg *gsm;
759 io = &vcpu->arch.nestedv2_io;
760 valids = &io->valids;
761 gsm = io->vcpu_message;
762 kvmppc_gsm_include(gsm, iden);
763 gsm = io->vcore_message;
764 kvmppc_gsm_include(gsm, iden);
765 kvmppc_gsbm_set(valids, iden);
768 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
771 * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
773 * @iden: guest state ID
775 * Reload the value for the guest state ID from the L0 host into the L1 host.
776 * This is cached so that going out to the L0 host only happens if necessary.
778 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
780 struct kvmhv_nestedv2_io *io;
781 struct kvmppc_gs_bitmap *valids;
782 struct kvmppc_gs_buff *gsb;
783 struct kvmppc_gs_msg gsm;
789 io = &vcpu->arch.nestedv2_io;
790 valids = &io->valids;
791 if (kvmppc_gsbm_test(valids, iden))
794 gsb = io->vcpu_run_input;
795 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
796 rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
798 pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
803 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
806 * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
808 * @time_limit: hdec expiry tb
810 * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
811 * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
812 * wide values need to be sent with H_GUEST_SET first.
814 * The hdec tb offset is always sent to L0 host.
816 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
818 struct kvmhv_nestedv2_io *io;
819 struct kvmppc_gs_buff *gsb;
820 struct kvmppc_gs_msg *gsm;
823 io = &vcpu->arch.nestedv2_io;
824 gsb = io->vcpu_run_input;
825 gsm = io->vcore_message;
826 rc = kvmppc_gsb_send_data(gsb, gsm);
828 pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
832 gsm = io->vcpu_message;
833 kvmppc_gsb_reset(gsb);
834 rc = kvmppc_gsm_fill_info(gsm, gsb);
836 pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
840 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
845 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
848 * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
851 * @dw0: partition table double word
852 * @dw1: process table double word
854 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
856 struct kvmppc_gs_part_table patbl;
857 struct kvmppc_gs_proc_table prtbl;
858 struct kvmppc_gs_buff *gsb;
862 size = kvmppc_gse_total_size(
863 kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
864 kvmppc_gse_total_size(
865 kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
866 sizeof(struct kvmppc_gs_header);
867 gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
871 patbl.address = dw0 & RPDB_MASK;
872 patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
873 ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
875 patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
876 rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
880 prtbl.address = dw1 & PRTB_MASK;
881 prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
882 rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
886 rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
888 pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
892 kvmppc_gsb_free(gsb);
896 kvmppc_gsb_free(gsb);
899 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
902 * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
904 * @vpa: L1 logical real address
906 int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
908 struct kvmhv_nestedv2_io *io;
909 struct kvmppc_gs_buff *gsb;
912 io = &vcpu->arch.nestedv2_io;
913 gsb = io->vcpu_run_input;
915 kvmppc_gsb_reset(gsb);
916 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
920 rc = kvmppc_gsb_send(gsb, 0);
922 pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);
925 kvmppc_gsb_reset(gsb);
928 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);
931 * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
934 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
936 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
938 struct kvmhv_nestedv2_io *io;
939 struct kvmppc_gs_buff *gsb;
940 struct kvmppc_gs_msg gsm;
942 io = &vcpu->arch.nestedv2_io;
943 gsb = io->vcpu_run_output;
945 vcpu->arch.fault_dar = 0;
946 vcpu->arch.fault_dsisr = 0;
947 vcpu->arch.fault_gpa = 0;
948 vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
950 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
951 return kvmppc_gsm_refresh_info(&gsm, gsb);
953 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
955 static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
956 struct kvmhv_nestedv2_io *io)
958 kvmppc_gsm_free(io->vcpu_message);
959 kvmppc_gsm_free(io->vcore_message);
960 kvmppc_gsb_free(io->vcpu_run_input);
961 kvmppc_gsb_free(io->vcpu_run_output);
964 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
966 struct kvmhv_nestedv2_io *io;
967 struct kvmppc_gs_bitmap *valids;
968 struct kvmppc_gs_buff *gsb;
969 struct kvmppc_gs_msg gsm;
973 io = &vcpu->arch.nestedv2_io;
974 valids = &io->valids;
976 gsb = io->vcpu_run_input;
977 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
979 for (int i = 0; i < 32; i++) {
980 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
981 kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
984 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
985 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
987 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
988 kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
990 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
991 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
993 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
994 kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
996 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
997 kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
999 rc = kvmppc_gsb_receive_data(gsb, &gsm);
1001 pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
1005 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
1007 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
1008 struct pt_regs *regs)
1010 for (int i = 0; i < 32; i++)
1011 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
1013 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
1014 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
1015 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
1016 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
1017 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
1021 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
1024 * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
1026 * @io: NESTEDv2 nested io state
1028 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
1030 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
1031 struct kvmhv_nestedv2_io *io)
1035 rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
1037 if (rc != H_SUCCESS) {
1038 pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
1040 case H_NOT_ENOUGH_RESOURCES:
1050 rc = kvmhv_nestedv2_host_create(vcpu, io);
1054 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
1057 * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
1059 * @io: NESTEDv2 nested io state
1061 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
1062 struct kvmhv_nestedv2_io *io)
1064 kvmhv_nestedv2_host_free(vcpu, io);
1066 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);