1 // SPDX-License-Identifier: GPL-2.0-only
8 * Description: KVM functions specific to running on Book 3S
9 * processors as a NESTEDv2 guest.
13 #include "linux/blk-mq.h"
14 #include "linux/console.h"
15 #include "linux/gfp_types.h"
16 #include "linux/signal.h"
17 #include <linux/kernel.h>
18 #include <linux/kvm_host.h>
19 #include <linux/pgtable.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/hvcall.h>
24 #include <asm/pgalloc.h>
26 #include <asm/plpar_wrappers.h>
27 #include <asm/guest-state-buffer.h>
30 struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
31 EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
35 gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
38 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
39 KVMPPC_GSID_RUN_INPUT,
40 KVMPPC_GSID_RUN_OUTPUT,
45 for (int i = 0; i < ARRAY_SIZE(ids); i++)
46 size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
51 gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
52 struct kvmppc_gs_msg *gsm)
54 struct kvmhv_nestedv2_config *cfg;
59 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
60 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
61 cfg->vcpu_run_output_size);
66 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
67 rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
68 cfg->vcpu_run_input_cfg);
73 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
74 kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
75 cfg->vcpu_run_output_cfg);
84 gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
85 struct kvmppc_gs_buff *gsb)
87 struct kvmhv_nestedv2_config *cfg;
88 struct kvmppc_gs_parser gsp = { 0 };
89 struct kvmppc_gs_elem *gse;
94 rc = kvmppc_gse_parse(&gsp, gsb);
98 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
100 cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
104 static struct kvmppc_gs_msg_ops config_msg_ops = {
105 .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
106 .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
107 .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
110 static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
112 struct kvmppc_gs_bitmap gsbm = { 0 };
116 kvmppc_gsbm_fill(&gsbm);
117 kvmppc_gsbm_for_each(&gsbm, iden)
120 case KVMPPC_GSID_HOST_STATE_SIZE:
121 case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
122 case KVMPPC_GSID_PARTITION_TABLE:
123 case KVMPPC_GSID_PROCESS_TABLE:
124 case KVMPPC_GSID_RUN_INPUT:
125 case KVMPPC_GSID_RUN_OUTPUT:
128 size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
134 static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
135 struct kvmppc_gs_msg *gsm)
137 struct kvm_vcpu *vcpu;
144 kvmppc_gsm_for_each(gsm, iden)
148 if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
149 (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
153 case KVMPPC_GSID_DSCR:
154 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
156 case KVMPPC_GSID_MMCRA:
157 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
159 case KVMPPC_GSID_HFSCR:
160 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
162 case KVMPPC_GSID_PURR:
163 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
165 case KVMPPC_GSID_SPURR:
166 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
168 case KVMPPC_GSID_AMR:
169 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
171 case KVMPPC_GSID_UAMOR:
172 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
174 case KVMPPC_GSID_SIAR:
175 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
177 case KVMPPC_GSID_SDAR:
178 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
180 case KVMPPC_GSID_IAMR:
181 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
183 case KVMPPC_GSID_DAWR0:
184 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
186 case KVMPPC_GSID_DAWR1:
187 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
189 case KVMPPC_GSID_DAWRX0:
190 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
192 case KVMPPC_GSID_DAWRX1:
193 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
195 case KVMPPC_GSID_CIABR:
196 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
198 case KVMPPC_GSID_WORT:
199 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
201 case KVMPPC_GSID_PPR:
202 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
204 case KVMPPC_GSID_PSPB:
205 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
207 case KVMPPC_GSID_TAR:
208 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
210 case KVMPPC_GSID_FSCR:
211 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
213 case KVMPPC_GSID_EBBHR:
214 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
216 case KVMPPC_GSID_EBBRR:
217 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
219 case KVMPPC_GSID_BESCR:
220 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
223 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
225 case KVMPPC_GSID_CTRL:
226 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
228 case KVMPPC_GSID_PIDR:
229 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
231 case KVMPPC_GSID_AMOR: {
234 rc = kvmppc_gse_put_u64(gsb, iden, amor);
237 case KVMPPC_GSID_VRSAVE:
238 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
240 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
241 i = iden - KVMPPC_GSID_MMCR(0);
242 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
244 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
245 i = iden - KVMPPC_GSID_SIER(0);
246 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
248 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
249 i = iden - KVMPPC_GSID_PMC(0);
250 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
252 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
253 i = iden - KVMPPC_GSID_GPR(0);
254 rc = kvmppc_gse_put_u64(gsb, iden,
255 vcpu->arch.regs.gpr[i]);
258 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
260 case KVMPPC_GSID_XER:
261 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
263 case KVMPPC_GSID_CTR:
264 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
267 rc = kvmppc_gse_put_u64(gsb, iden,
268 vcpu->arch.regs.link);
270 case KVMPPC_GSID_NIA:
271 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
273 case KVMPPC_GSID_SRR0:
274 rc = kvmppc_gse_put_u64(gsb, iden,
275 vcpu->arch.shregs.srr0);
277 case KVMPPC_GSID_SRR1:
278 rc = kvmppc_gse_put_u64(gsb, iden,
279 vcpu->arch.shregs.srr1);
281 case KVMPPC_GSID_SPRG0:
282 rc = kvmppc_gse_put_u64(gsb, iden,
283 vcpu->arch.shregs.sprg0);
285 case KVMPPC_GSID_SPRG1:
286 rc = kvmppc_gse_put_u64(gsb, iden,
287 vcpu->arch.shregs.sprg1);
289 case KVMPPC_GSID_SPRG2:
290 rc = kvmppc_gse_put_u64(gsb, iden,
291 vcpu->arch.shregs.sprg2);
293 case KVMPPC_GSID_SPRG3:
294 rc = kvmppc_gse_put_u64(gsb, iden,
295 vcpu->arch.shregs.sprg3);
297 case KVMPPC_GSID_DAR:
298 rc = kvmppc_gse_put_u64(gsb, iden,
299 vcpu->arch.shregs.dar);
301 case KVMPPC_GSID_DSISR:
302 rc = kvmppc_gse_put_u32(gsb, iden,
303 vcpu->arch.shregs.dsisr);
305 case KVMPPC_GSID_MSR:
306 rc = kvmppc_gse_put_u64(gsb, iden,
307 vcpu->arch.shregs.msr);
309 case KVMPPC_GSID_VTB:
310 rc = kvmppc_gse_put_u64(gsb, iden,
311 vcpu->arch.vcore->vtb);
313 case KVMPPC_GSID_LPCR:
314 rc = kvmppc_gse_put_u64(gsb, iden,
315 vcpu->arch.vcore->lpcr);
317 case KVMPPC_GSID_TB_OFFSET:
318 rc = kvmppc_gse_put_u64(gsb, iden,
319 vcpu->arch.vcore->tb_offset);
321 case KVMPPC_GSID_FPSCR:
322 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
324 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
325 i = iden - KVMPPC_GSID_VSRS(0);
326 memcpy(&v, &vcpu->arch.fp.fpr[i],
327 sizeof(vcpu->arch.fp.fpr[i]));
328 rc = kvmppc_gse_put_vector128(gsb, iden, &v);
331 case KVMPPC_GSID_VSCR:
332 rc = kvmppc_gse_put_u32(gsb, iden,
333 vcpu->arch.vr.vscr.u[3]);
335 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
336 i = iden - KVMPPC_GSID_VSRS(32);
337 rc = kvmppc_gse_put_vector128(gsb, iden,
338 &vcpu->arch.vr.vr[i]);
341 case KVMPPC_GSID_DEC_EXPIRY_TB: {
344 dw = vcpu->arch.dec_expires -
345 vcpu->arch.vcore->tb_offset;
346 rc = kvmppc_gse_put_u64(gsb, iden, dw);
349 case KVMPPC_GSID_LOGICAL_PVR:
350 rc = kvmppc_gse_put_u32(gsb, iden,
351 vcpu->arch.vcore->arch_compat);
362 static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
363 struct kvmppc_gs_buff *gsb)
365 struct kvmppc_gs_parser gsp = { 0 };
366 struct kvmhv_nestedv2_io *io;
367 struct kvmppc_gs_bitmap *valids;
368 struct kvm_vcpu *vcpu;
369 struct kvmppc_gs_elem *gse;
376 rc = kvmppc_gse_parse(&gsp, gsb);
380 io = &vcpu->arch.nestedv2_io;
381 valids = &io->valids;
383 kvmppc_gsp_for_each(&gsp, iden, gse)
386 case KVMPPC_GSID_DSCR:
387 vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
389 case KVMPPC_GSID_MMCRA:
390 vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
392 case KVMPPC_GSID_HFSCR:
393 vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
395 case KVMPPC_GSID_PURR:
396 vcpu->arch.purr = kvmppc_gse_get_u64(gse);
398 case KVMPPC_GSID_SPURR:
399 vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
401 case KVMPPC_GSID_AMR:
402 vcpu->arch.amr = kvmppc_gse_get_u64(gse);
404 case KVMPPC_GSID_UAMOR:
405 vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
407 case KVMPPC_GSID_SIAR:
408 vcpu->arch.siar = kvmppc_gse_get_u64(gse);
410 case KVMPPC_GSID_SDAR:
411 vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
413 case KVMPPC_GSID_IAMR:
414 vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
416 case KVMPPC_GSID_DAWR0:
417 vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
419 case KVMPPC_GSID_DAWR1:
420 vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
422 case KVMPPC_GSID_DAWRX0:
423 vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
425 case KVMPPC_GSID_DAWRX1:
426 vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
428 case KVMPPC_GSID_CIABR:
429 vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
431 case KVMPPC_GSID_WORT:
432 vcpu->arch.wort = kvmppc_gse_get_u32(gse);
434 case KVMPPC_GSID_PPR:
435 vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
437 case KVMPPC_GSID_PSPB:
438 vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
440 case KVMPPC_GSID_TAR:
441 vcpu->arch.tar = kvmppc_gse_get_u64(gse);
443 case KVMPPC_GSID_FSCR:
444 vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
446 case KVMPPC_GSID_EBBHR:
447 vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
449 case KVMPPC_GSID_EBBRR:
450 vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
452 case KVMPPC_GSID_BESCR:
453 vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
456 vcpu->arch.ic = kvmppc_gse_get_u64(gse);
458 case KVMPPC_GSID_CTRL:
459 vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
461 case KVMPPC_GSID_PIDR:
462 vcpu->arch.pid = kvmppc_gse_get_u32(gse);
464 case KVMPPC_GSID_AMOR:
466 case KVMPPC_GSID_VRSAVE:
467 vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
469 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
470 i = iden - KVMPPC_GSID_MMCR(0);
471 vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
473 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
474 i = iden - KVMPPC_GSID_SIER(0);
475 vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
477 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
478 i = iden - KVMPPC_GSID_PMC(0);
479 vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
481 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
482 i = iden - KVMPPC_GSID_GPR(0);
483 vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
486 vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
488 case KVMPPC_GSID_XER:
489 vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
491 case KVMPPC_GSID_CTR:
492 vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
495 vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
497 case KVMPPC_GSID_NIA:
498 vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
500 case KVMPPC_GSID_SRR0:
501 vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
503 case KVMPPC_GSID_SRR1:
504 vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
506 case KVMPPC_GSID_SPRG0:
507 vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
509 case KVMPPC_GSID_SPRG1:
510 vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
512 case KVMPPC_GSID_SPRG2:
513 vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
515 case KVMPPC_GSID_SPRG3:
516 vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
518 case KVMPPC_GSID_DAR:
519 vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
521 case KVMPPC_GSID_DSISR:
522 vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
524 case KVMPPC_GSID_MSR:
525 vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
527 case KVMPPC_GSID_VTB:
528 vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
530 case KVMPPC_GSID_LPCR:
531 vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
533 case KVMPPC_GSID_TB_OFFSET:
534 vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
536 case KVMPPC_GSID_FPSCR:
537 vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
539 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
540 kvmppc_gse_get_vector128(gse, &v);
541 i = iden - KVMPPC_GSID_VSRS(0);
542 memcpy(&vcpu->arch.fp.fpr[i], &v,
543 sizeof(vcpu->arch.fp.fpr[i]));
546 case KVMPPC_GSID_VSCR:
547 vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
549 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
550 i = iden - KVMPPC_GSID_VSRS(32);
551 kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
554 case KVMPPC_GSID_HDAR:
555 vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
557 case KVMPPC_GSID_HDSISR:
558 vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
560 case KVMPPC_GSID_ASDR:
561 vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
563 case KVMPPC_GSID_HEIR:
564 vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
566 case KVMPPC_GSID_DEC_EXPIRY_TB: {
569 dw = kvmppc_gse_get_u64(gse);
570 vcpu->arch.dec_expires =
571 dw + vcpu->arch.vcore->tb_offset;
574 case KVMPPC_GSID_LOGICAL_PVR:
575 vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
580 kvmppc_gsbm_set(valids, iden);
586 static struct kvmppc_gs_msg_ops vcpu_message_ops = {
587 .get_size = gs_msg_ops_vcpu_get_size,
588 .fill_info = gs_msg_ops_vcpu_fill_info,
589 .refresh_info = gs_msg_ops_vcpu_refresh_info,
592 static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
593 struct kvmhv_nestedv2_io *io)
595 struct kvmhv_nestedv2_config *cfg;
596 struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
597 unsigned long guest_id, vcpu_id;
598 struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
602 guest_id = vcpu->kvm->arch.lpid;
603 vcpu_id = vcpu->vcpu_id;
605 gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
612 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
619 rc = kvmppc_gsb_receive_datum(gsb, gsm,
620 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
622 pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
626 vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
627 vcpu_id, GFP_KERNEL);
628 if (!vcpu_run_output) {
633 cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
634 cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
635 io->vcpu_run_output = vcpu_run_output;
638 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
640 pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
644 vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
649 kvmppc_gsm_include_all(vcpu_message);
651 io->vcpu_message = vcpu_message;
653 vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
654 vcpu_id, GFP_KERNEL);
655 if (!vcpu_run_input) {
657 goto free_vcpu_message;
660 io->vcpu_run_input = vcpu_run_input;
661 cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
662 cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
663 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
665 pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
666 goto free_vcpu_run_input;
669 vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
670 KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
671 if (!vcore_message) {
673 goto free_vcpu_run_input;
676 kvmppc_gsm_include_all(vcore_message);
677 kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
678 io->vcore_message = vcore_message;
680 kvmppc_gsbm_fill(&io->valids);
681 kvmppc_gsm_free(gsm);
682 kvmppc_gsb_free(gsb);
686 kvmppc_gsb_free(vcpu_run_input);
688 kvmppc_gsm_free(vcpu_message);
690 kvmppc_gsb_free(vcpu_run_output);
692 kvmppc_gsb_free(gsb);
694 kvmppc_gsm_free(gsm);
700 * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
702 * @iden: guest state ID
704 * Mark a guest state ID as having been changed by the L1 host and thus
705 * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
707 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
709 struct kvmhv_nestedv2_io *io;
710 struct kvmppc_gs_bitmap *valids;
711 struct kvmppc_gs_msg *gsm;
716 io = &vcpu->arch.nestedv2_io;
717 valids = &io->valids;
718 gsm = io->vcpu_message;
719 kvmppc_gsm_include(gsm, iden);
720 gsm = io->vcore_message;
721 kvmppc_gsm_include(gsm, iden);
722 kvmppc_gsbm_set(valids, iden);
725 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
728 * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
730 * @iden: guest state ID
732 * Reload the value for the guest state ID from the L0 host into the L1 host.
733 * This is cached so that going out to the L0 host only happens if necessary.
735 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
737 struct kvmhv_nestedv2_io *io;
738 struct kvmppc_gs_bitmap *valids;
739 struct kvmppc_gs_buff *gsb;
740 struct kvmppc_gs_msg gsm;
746 io = &vcpu->arch.nestedv2_io;
747 valids = &io->valids;
748 if (kvmppc_gsbm_test(valids, iden))
751 gsb = io->vcpu_run_input;
752 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
753 rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
755 pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
760 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
763 * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
765 * @time_limit: hdec expiry tb
767 * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
768 * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
769 * wide values need to be sent with H_GUEST_SET first.
771 * The hdec tb offset is always sent to L0 host.
773 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
775 struct kvmhv_nestedv2_io *io;
776 struct kvmppc_gs_buff *gsb;
777 struct kvmppc_gs_msg *gsm;
780 io = &vcpu->arch.nestedv2_io;
781 gsb = io->vcpu_run_input;
782 gsm = io->vcore_message;
783 rc = kvmppc_gsb_send_data(gsb, gsm);
785 pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
789 gsm = io->vcpu_message;
790 kvmppc_gsb_reset(gsb);
791 rc = kvmppc_gsm_fill_info(gsm, gsb);
793 pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
797 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
802 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
805 * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
808 * @dw0: partition table double word
809 * @dw1: process table double word
811 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
813 struct kvmppc_gs_part_table patbl;
814 struct kvmppc_gs_proc_table prtbl;
815 struct kvmppc_gs_buff *gsb;
819 size = kvmppc_gse_total_size(
820 kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
821 kvmppc_gse_total_size(
822 kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
823 sizeof(struct kvmppc_gs_header);
824 gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
828 patbl.address = dw0 & RPDB_MASK;
829 patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
830 ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
832 patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
833 rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
837 prtbl.address = dw1 & PRTB_MASK;
838 prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
839 rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
843 rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
845 pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
849 kvmppc_gsb_free(gsb);
853 kvmppc_gsb_free(gsb);
856 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
859 * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
862 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
864 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
866 struct kvmhv_nestedv2_io *io;
867 struct kvmppc_gs_buff *gsb;
868 struct kvmppc_gs_msg gsm;
870 io = &vcpu->arch.nestedv2_io;
871 gsb = io->vcpu_run_output;
873 vcpu->arch.fault_dar = 0;
874 vcpu->arch.fault_dsisr = 0;
875 vcpu->arch.fault_gpa = 0;
876 vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
878 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
879 return kvmppc_gsm_refresh_info(&gsm, gsb);
881 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
883 static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
884 struct kvmhv_nestedv2_io *io)
886 kvmppc_gsm_free(io->vcpu_message);
887 kvmppc_gsm_free(io->vcore_message);
888 kvmppc_gsb_free(io->vcpu_run_input);
889 kvmppc_gsb_free(io->vcpu_run_output);
892 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
894 struct kvmhv_nestedv2_io *io;
895 struct kvmppc_gs_bitmap *valids;
896 struct kvmppc_gs_buff *gsb;
897 struct kvmppc_gs_msg gsm;
901 io = &vcpu->arch.nestedv2_io;
902 valids = &io->valids;
904 gsb = io->vcpu_run_input;
905 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
907 for (int i = 0; i < 32; i++) {
908 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
909 kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
912 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
913 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
915 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
916 kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
918 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
919 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
921 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
922 kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
924 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
925 kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
927 rc = kvmppc_gsb_receive_data(gsb, &gsm);
929 pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
933 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
935 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
936 struct pt_regs *regs)
938 for (int i = 0; i < 32; i++)
939 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
941 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
942 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
943 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
944 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
945 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
949 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
952 * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
954 * @io: NESTEDv2 nested io state
956 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
958 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
959 struct kvmhv_nestedv2_io *io)
963 rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
965 if (rc != H_SUCCESS) {
966 pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
968 case H_NOT_ENOUGH_RESOURCES:
978 rc = kvmhv_nestedv2_host_create(vcpu, io);
982 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
985 * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
987 * @io: NESTEDv2 nested io state
989 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
990 struct kvmhv_nestedv2_io *io)
992 kvmhv_nestedv2_host_free(vcpu, io);
994 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);