1 // SPDX-License-Identifier: GPL-2.0
3 * hosting IBM Z kernel virtual machines (s390x)
5 * Copyright IBM Corp. 2008, 2020
13 #define KMSG_COMPONENT "kvm-s390"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/mman.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/string.h>
33 #include <linux/pgtable.h>
34 #include <linux/mmu_notifier.h>
36 #include <asm/access-regs.h>
37 #include <asm/asm-offsets.h>
38 #include <asm/lowcore.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
54 #define CREATE_TRACE_POINTS
56 #include "trace-s390.h"
58 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
60 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
61 (KVM_MAX_VCPUS + LOCAL_IRQS))
63 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
64 KVM_GENERIC_VM_STATS(),
65 STATS_DESC_COUNTER(VM, inject_io),
66 STATS_DESC_COUNTER(VM, inject_float_mchk),
67 STATS_DESC_COUNTER(VM, inject_pfault_done),
68 STATS_DESC_COUNTER(VM, inject_service_signal),
69 STATS_DESC_COUNTER(VM, inject_virtio),
70 STATS_DESC_COUNTER(VM, aen_forward),
71 STATS_DESC_COUNTER(VM, gmap_shadow_reuse),
72 STATS_DESC_COUNTER(VM, gmap_shadow_create),
73 STATS_DESC_COUNTER(VM, gmap_shadow_r1_entry),
74 STATS_DESC_COUNTER(VM, gmap_shadow_r2_entry),
75 STATS_DESC_COUNTER(VM, gmap_shadow_r3_entry),
76 STATS_DESC_COUNTER(VM, gmap_shadow_sg_entry),
77 STATS_DESC_COUNTER(VM, gmap_shadow_pg_entry),
80 const struct kvm_stats_header kvm_vm_stats_header = {
81 .name_size = KVM_STATS_NAME_SIZE,
82 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
83 .id_offset = sizeof(struct kvm_stats_header),
84 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
85 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
86 sizeof(kvm_vm_stats_desc),
89 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
90 KVM_GENERIC_VCPU_STATS(),
91 STATS_DESC_COUNTER(VCPU, exit_userspace),
92 STATS_DESC_COUNTER(VCPU, exit_null),
93 STATS_DESC_COUNTER(VCPU, exit_external_request),
94 STATS_DESC_COUNTER(VCPU, exit_io_request),
95 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
96 STATS_DESC_COUNTER(VCPU, exit_stop_request),
97 STATS_DESC_COUNTER(VCPU, exit_validity),
98 STATS_DESC_COUNTER(VCPU, exit_instruction),
99 STATS_DESC_COUNTER(VCPU, exit_pei),
100 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
101 STATS_DESC_COUNTER(VCPU, instruction_lctl),
102 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
103 STATS_DESC_COUNTER(VCPU, instruction_stctl),
104 STATS_DESC_COUNTER(VCPU, instruction_stctg),
105 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
106 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
107 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
108 STATS_DESC_COUNTER(VCPU, deliver_ckc),
109 STATS_DESC_COUNTER(VCPU, deliver_cputm),
110 STATS_DESC_COUNTER(VCPU, deliver_external_call),
111 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
112 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
113 STATS_DESC_COUNTER(VCPU, deliver_virtio),
114 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
115 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
116 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
117 STATS_DESC_COUNTER(VCPU, deliver_program),
118 STATS_DESC_COUNTER(VCPU, deliver_io),
119 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
120 STATS_DESC_COUNTER(VCPU, exit_wait_state),
121 STATS_DESC_COUNTER(VCPU, inject_ckc),
122 STATS_DESC_COUNTER(VCPU, inject_cputm),
123 STATS_DESC_COUNTER(VCPU, inject_external_call),
124 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
125 STATS_DESC_COUNTER(VCPU, inject_mchk),
126 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
127 STATS_DESC_COUNTER(VCPU, inject_program),
128 STATS_DESC_COUNTER(VCPU, inject_restart),
129 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
130 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
131 STATS_DESC_COUNTER(VCPU, instruction_epsw),
132 STATS_DESC_COUNTER(VCPU, instruction_gs),
133 STATS_DESC_COUNTER(VCPU, instruction_io_other),
134 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
135 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
136 STATS_DESC_COUNTER(VCPU, instruction_lpswey),
137 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
138 STATS_DESC_COUNTER(VCPU, instruction_ptff),
139 STATS_DESC_COUNTER(VCPU, instruction_sck),
140 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
141 STATS_DESC_COUNTER(VCPU, instruction_stidp),
142 STATS_DESC_COUNTER(VCPU, instruction_spx),
143 STATS_DESC_COUNTER(VCPU, instruction_stpx),
144 STATS_DESC_COUNTER(VCPU, instruction_stap),
145 STATS_DESC_COUNTER(VCPU, instruction_iske),
146 STATS_DESC_COUNTER(VCPU, instruction_ri),
147 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
148 STATS_DESC_COUNTER(VCPU, instruction_sske),
149 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
150 STATS_DESC_COUNTER(VCPU, instruction_stsi),
151 STATS_DESC_COUNTER(VCPU, instruction_stfl),
152 STATS_DESC_COUNTER(VCPU, instruction_tb),
153 STATS_DESC_COUNTER(VCPU, instruction_tpi),
154 STATS_DESC_COUNTER(VCPU, instruction_tprot),
155 STATS_DESC_COUNTER(VCPU, instruction_tsch),
156 STATS_DESC_COUNTER(VCPU, instruction_sie),
157 STATS_DESC_COUNTER(VCPU, instruction_essa),
158 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
164 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
165 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
166 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
167 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
168 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
169 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
170 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
171 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
172 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
173 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
174 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
175 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
176 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
177 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
178 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
179 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
180 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
181 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
182 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
183 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
184 STATS_DESC_COUNTER(VCPU, pfault_sync)
187 const struct kvm_stats_header kvm_vcpu_stats_header = {
188 .name_size = KVM_STATS_NAME_SIZE,
189 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
190 .id_offset = sizeof(struct kvm_stats_header),
191 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
192 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
193 sizeof(kvm_vcpu_stats_desc),
196 /* allow nested virtualization in KVM (if enabled by user space) */
198 module_param(nested, int, S_IRUGO);
199 MODULE_PARM_DESC(nested, "Nested virtualization support");
201 /* allow 1m huge page guest backing, if !nested */
203 module_param(hpage, int, 0444);
204 MODULE_PARM_DESC(hpage, "1m huge page backing support");
206 /* maximum percentage of steal time for polling. >100 is treated like 100 */
207 static u8 halt_poll_max_steal = 10;
208 module_param(halt_poll_max_steal, byte, 0644);
209 MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
211 /* if set to true, the GISA will be initialized and used if available */
212 static bool use_gisa = true;
213 module_param(use_gisa, bool, 0644);
214 MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
216 /* maximum diag9c forwarding per second */
217 unsigned int diag9c_forwarding_hz;
218 module_param(diag9c_forwarding_hz, uint, 0644);
219 MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
222 * allow asynchronous deinit for protected guests; enable by default since
223 * the feature is opt-in anyway
225 static int async_destroy = 1;
226 module_param(async_destroy, int, 0444);
227 MODULE_PARM_DESC(async_destroy, "Asynchronous destroy for protected guests");
230 * For now we handle at most 16 double words as this is what the s390 base
231 * kernel handles and stores in the prefix page. If we ever need to go beyond
232 * this, this requires changes to code, but the external uapi can stay.
234 #define SIZE_INTERNAL 16
237 * Base feature mask that defines default mask for facilities. Consists of the
238 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
240 static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
242 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
243 * and defines the facilities that can be enabled via a cpu model.
245 static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
247 static unsigned long kvm_s390_fac_size(void)
249 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
250 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
251 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
252 sizeof(stfle_fac_list));
254 return SIZE_INTERNAL;
257 /* available cpu features supported by kvm */
258 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
259 /* available subfunctions indicated via query / "test bit" */
260 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
262 static struct gmap_notifier gmap_notifier;
263 static struct gmap_notifier vsie_gmap_notifier;
264 debug_info_t *kvm_s390_dbf;
265 debug_info_t *kvm_s390_dbf_uv;
267 /* Section: not file related */
268 /* forward declarations */
269 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
271 static int sca_switch_to_extended(struct kvm *kvm);
273 static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
278 * The TOD jumps by delta, we have to compensate this by adding
279 * -delta to the epoch.
283 /* sign-extension - we're adding to signed values below */
288 if (scb->ecd & ECD_MEF) {
289 scb->epdx += delta_idx;
290 if (scb->epoch < delta)
296 * This callback is executed during stop_machine(). All CPUs are therefore
297 * temporarily stopped. In order not to change guest behavior, we have to
298 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
299 * so a CPU won't be stopped while calculating with the epoch.
301 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
305 struct kvm_vcpu *vcpu;
307 unsigned long long *delta = v;
309 list_for_each_entry(kvm, &vm_list, vm_list) {
310 kvm_for_each_vcpu(i, vcpu, kvm) {
311 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
313 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
314 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
316 if (vcpu->arch.cputm_enabled)
317 vcpu->arch.cputm_start += *delta;
318 if (vcpu->arch.vsie_block)
319 kvm_clock_sync_scb(vcpu->arch.vsie_block,
326 static struct notifier_block kvm_clock_notifier = {
327 .notifier_call = kvm_clock_sync,
330 static void allow_cpu_feat(unsigned long nr)
332 set_bit_inv(nr, kvm_s390_available_cpu_feat);
335 static inline int plo_test_bit(unsigned char nr)
337 unsigned long function = (unsigned long)nr | 0x100;
341 " lgr 0,%[function]\n"
342 /* Parameter registers are ignored for "test bit" */
346 : [function] "d" (function)
347 : CC_CLOBBER_LIST("0"));
348 return CC_TRANSFORM(cc) == 0;
351 static __always_inline void pfcr_query(u8 (*query)[16])
355 " .insn rsy,0xeb0000000016,0,0,%[query]\n"
356 : [query] "=QS" (*query)
361 static __always_inline void __sortl_query(u8 (*query)[32])
366 /* Parameter registers are ignored */
367 " .insn rre,0xb9380000,2,4\n"
368 : [query] "=R" (*query)
373 static __always_inline void __dfltcc_query(u8 (*query)[32])
378 /* Parameter registers are ignored */
379 " .insn rrf,0xb9390000,2,4,6,0\n"
380 : [query] "=R" (*query)
385 static void __init kvm_s390_cpu_feat_init(void)
389 for (i = 0; i < 256; ++i) {
391 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
394 if (test_facility(28)) /* TOD-clock steering */
395 ptff(kvm_s390_available_subfunc.ptff,
396 sizeof(kvm_s390_available_subfunc.ptff),
399 if (test_facility(17)) { /* MSA */
400 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
401 kvm_s390_available_subfunc.kmac);
402 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
403 kvm_s390_available_subfunc.kmc);
404 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
405 kvm_s390_available_subfunc.km);
406 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
407 kvm_s390_available_subfunc.kimd);
408 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.klmd);
411 if (test_facility(76)) /* MSA3 */
412 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.pckmo);
414 if (test_facility(77)) { /* MSA4 */
415 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
416 kvm_s390_available_subfunc.kmctr);
417 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
418 kvm_s390_available_subfunc.kmf);
419 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
420 kvm_s390_available_subfunc.kmo);
421 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
422 kvm_s390_available_subfunc.pcc);
424 if (test_facility(57)) /* MSA5 */
425 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
426 kvm_s390_available_subfunc.ppno);
428 if (test_facility(146)) /* MSA8 */
429 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
430 kvm_s390_available_subfunc.kma);
432 if (test_facility(155)) /* MSA9 */
433 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
434 kvm_s390_available_subfunc.kdsa);
436 if (test_facility(150)) /* SORTL */
437 __sortl_query(&kvm_s390_available_subfunc.sortl);
439 if (test_facility(151)) /* DFLTCC */
440 __dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
442 if (test_facility(201)) /* PFCR */
443 pfcr_query(&kvm_s390_available_subfunc.pfcr);
445 if (MACHINE_HAS_ESOP)
446 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
448 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
449 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
451 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
452 !test_facility(3) || !nested)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
455 if (sclp.has_64bscao)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
462 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
464 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
466 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
468 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
470 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
472 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
473 * all skey handling functions read/set the skey from the PGSTE
474 * instead of the real storage key.
476 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
477 * pages being detected as preserved although they are resident.
479 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
480 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
482 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
483 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
484 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
486 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
487 * cannot easily shadow the SCA because of the ipte lock.
491 static int __init __kvm_s390_init(void)
495 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
499 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
500 if (!kvm_s390_dbf_uv)
503 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
504 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
507 kvm_s390_cpu_feat_init();
509 /* Register floating interrupt controller interface. */
510 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
512 pr_err("A FLIC registration call failed with rc=%d\n", rc);
516 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
517 rc = kvm_s390_pci_init();
519 pr_err("Unable to allocate AIFT for PCI\n");
524 rc = kvm_s390_gib_init(GAL_ISC);
528 gmap_notifier.notifier_call = kvm_gmap_notifier;
529 gmap_register_pte_notifier(&gmap_notifier);
530 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
531 gmap_register_pte_notifier(&vsie_gmap_notifier);
532 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
533 &kvm_clock_notifier);
538 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
543 debug_unregister(kvm_s390_dbf_uv);
545 debug_unregister(kvm_s390_dbf);
549 static void __kvm_s390_exit(void)
551 gmap_unregister_pte_notifier(&gmap_notifier);
552 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
553 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
554 &kvm_clock_notifier);
556 kvm_s390_gib_destroy();
557 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
559 debug_unregister(kvm_s390_dbf);
560 debug_unregister(kvm_s390_dbf_uv);
563 /* Section: device related */
564 long kvm_arch_dev_ioctl(struct file *filp,
565 unsigned int ioctl, unsigned long arg)
567 if (ioctl == KVM_S390_ENABLE_SIE)
568 return s390_enable_sie();
572 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
577 case KVM_CAP_S390_PSW:
578 case KVM_CAP_S390_GMAP:
579 case KVM_CAP_SYNC_MMU:
580 #ifdef CONFIG_KVM_S390_UCONTROL
581 case KVM_CAP_S390_UCONTROL:
583 case KVM_CAP_ASYNC_PF:
584 case KVM_CAP_SYNC_REGS:
585 case KVM_CAP_ONE_REG:
586 case KVM_CAP_ENABLE_CAP:
587 case KVM_CAP_S390_CSS_SUPPORT:
588 case KVM_CAP_IOEVENTFD:
589 case KVM_CAP_S390_IRQCHIP:
590 case KVM_CAP_VM_ATTRIBUTES:
591 case KVM_CAP_MP_STATE:
592 case KVM_CAP_IMMEDIATE_EXIT:
593 case KVM_CAP_S390_INJECT_IRQ:
594 case KVM_CAP_S390_USER_SIGP:
595 case KVM_CAP_S390_USER_STSI:
596 case KVM_CAP_S390_SKEYS:
597 case KVM_CAP_S390_IRQ_STATE:
598 case KVM_CAP_S390_USER_INSTR0:
599 case KVM_CAP_S390_CMMA_MIGRATION:
600 case KVM_CAP_S390_AIS:
601 case KVM_CAP_S390_AIS_MIGRATION:
602 case KVM_CAP_S390_VCPU_RESETS:
603 case KVM_CAP_SET_GUEST_DEBUG:
604 case KVM_CAP_S390_DIAG318:
605 case KVM_CAP_IRQFD_RESAMPLE:
608 case KVM_CAP_SET_GUEST_DEBUG2:
609 r = KVM_GUESTDBG_VALID_MASK;
611 case KVM_CAP_S390_HPAGE_1M:
613 if (hpage && !(kvm && kvm_is_ucontrol(kvm)))
616 case KVM_CAP_S390_MEM_OP:
619 case KVM_CAP_S390_MEM_OP_EXTENSION:
621 * Flag bits indicating which extensions are supported.
622 * If r > 0, the base extension must also be supported/indicated,
623 * in order to maintain backwards compatibility.
625 r = KVM_S390_MEMOP_EXTENSION_CAP_BASE |
626 KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG;
628 case KVM_CAP_NR_VCPUS:
629 case KVM_CAP_MAX_VCPUS:
630 case KVM_CAP_MAX_VCPU_ID:
631 r = KVM_S390_BSCA_CPU_SLOTS;
632 if (!kvm_s390_use_sca_entries())
634 else if (sclp.has_esca && sclp.has_64bscao)
635 r = KVM_S390_ESCA_CPU_SLOTS;
636 if (ext == KVM_CAP_NR_VCPUS)
637 r = min_t(unsigned int, num_online_cpus(), r);
639 case KVM_CAP_S390_COW:
640 r = MACHINE_HAS_ESOP;
642 case KVM_CAP_S390_VECTOR_REGISTERS:
643 r = test_facility(129);
645 case KVM_CAP_S390_RI:
646 r = test_facility(64);
648 case KVM_CAP_S390_GS:
649 r = test_facility(133);
651 case KVM_CAP_S390_BPB:
652 r = test_facility(82);
654 case KVM_CAP_S390_PROTECTED_ASYNC_DISABLE:
655 r = async_destroy && is_prot_virt_host();
657 case KVM_CAP_S390_PROTECTED:
658 r = is_prot_virt_host();
660 case KVM_CAP_S390_PROTECTED_DUMP: {
661 u64 pv_cmds_dump[] = {
662 BIT_UVC_CMD_DUMP_INIT,
663 BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE,
664 BIT_UVC_CMD_DUMP_CPU,
665 BIT_UVC_CMD_DUMP_COMPLETE,
669 r = is_prot_virt_host();
671 for (i = 0; i < ARRAY_SIZE(pv_cmds_dump); i++) {
672 if (!test_bit_inv(pv_cmds_dump[i],
673 (unsigned long *)&uv_info.inst_calls_list)) {
680 case KVM_CAP_S390_ZPCI_OP:
681 r = kvm_s390_pci_interp_allowed();
683 case KVM_CAP_S390_CPU_TOPOLOGY:
684 r = test_facility(11);
692 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
695 gfn_t cur_gfn, last_gfn;
696 unsigned long gaddr, vmaddr;
697 struct gmap *gmap = kvm->arch.gmap;
698 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
700 /* Loop over all guest segments */
701 cur_gfn = memslot->base_gfn;
702 last_gfn = memslot->base_gfn + memslot->npages;
703 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
704 gaddr = gfn_to_gpa(cur_gfn);
705 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
706 if (kvm_is_error_hva(vmaddr))
709 bitmap_zero(bitmap, _PAGE_ENTRIES);
710 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
711 for (i = 0; i < _PAGE_ENTRIES; i++) {
712 if (test_bit(i, bitmap))
713 mark_page_dirty(kvm, cur_gfn + i);
716 if (fatal_signal_pending(current))
722 /* Section: vm related */
723 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
726 * Get (and clear) the dirty memory log for a memory slot.
728 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
729 struct kvm_dirty_log *log)
733 struct kvm_memory_slot *memslot;
736 if (kvm_is_ucontrol(kvm))
739 mutex_lock(&kvm->slots_lock);
742 if (log->slot >= KVM_USER_MEM_SLOTS)
745 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
749 /* Clear the dirty log */
751 n = kvm_dirty_bitmap_bytes(memslot);
752 memset(memslot->dirty_bitmap, 0, n);
756 mutex_unlock(&kvm->slots_lock);
760 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
763 struct kvm_vcpu *vcpu;
765 kvm_for_each_vcpu(i, vcpu, kvm) {
766 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
770 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
778 case KVM_CAP_S390_IRQCHIP:
779 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
780 kvm->arch.use_irqchip = 1;
783 case KVM_CAP_S390_USER_SIGP:
784 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
785 kvm->arch.user_sigp = 1;
788 case KVM_CAP_S390_VECTOR_REGISTERS:
789 mutex_lock(&kvm->lock);
790 if (kvm->created_vcpus) {
792 } else if (cpu_has_vx()) {
793 set_kvm_facility(kvm->arch.model.fac_mask, 129);
794 set_kvm_facility(kvm->arch.model.fac_list, 129);
795 if (test_facility(134)) {
796 set_kvm_facility(kvm->arch.model.fac_mask, 134);
797 set_kvm_facility(kvm->arch.model.fac_list, 134);
799 if (test_facility(135)) {
800 set_kvm_facility(kvm->arch.model.fac_mask, 135);
801 set_kvm_facility(kvm->arch.model.fac_list, 135);
803 if (test_facility(148)) {
804 set_kvm_facility(kvm->arch.model.fac_mask, 148);
805 set_kvm_facility(kvm->arch.model.fac_list, 148);
807 if (test_facility(152)) {
808 set_kvm_facility(kvm->arch.model.fac_mask, 152);
809 set_kvm_facility(kvm->arch.model.fac_list, 152);
811 if (test_facility(192)) {
812 set_kvm_facility(kvm->arch.model.fac_mask, 192);
813 set_kvm_facility(kvm->arch.model.fac_list, 192);
815 if (test_facility(198)) {
816 set_kvm_facility(kvm->arch.model.fac_mask, 198);
817 set_kvm_facility(kvm->arch.model.fac_list, 198);
819 if (test_facility(199)) {
820 set_kvm_facility(kvm->arch.model.fac_mask, 199);
821 set_kvm_facility(kvm->arch.model.fac_list, 199);
826 mutex_unlock(&kvm->lock);
827 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
828 r ? "(not available)" : "(success)");
830 case KVM_CAP_S390_RI:
832 mutex_lock(&kvm->lock);
833 if (kvm->created_vcpus) {
835 } else if (test_facility(64)) {
836 set_kvm_facility(kvm->arch.model.fac_mask, 64);
837 set_kvm_facility(kvm->arch.model.fac_list, 64);
840 mutex_unlock(&kvm->lock);
841 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
842 r ? "(not available)" : "(success)");
844 case KVM_CAP_S390_AIS:
845 mutex_lock(&kvm->lock);
846 if (kvm->created_vcpus) {
849 set_kvm_facility(kvm->arch.model.fac_mask, 72);
850 set_kvm_facility(kvm->arch.model.fac_list, 72);
853 mutex_unlock(&kvm->lock);
854 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
855 r ? "(not available)" : "(success)");
857 case KVM_CAP_S390_GS:
859 mutex_lock(&kvm->lock);
860 if (kvm->created_vcpus) {
862 } else if (test_facility(133)) {
863 set_kvm_facility(kvm->arch.model.fac_mask, 133);
864 set_kvm_facility(kvm->arch.model.fac_list, 133);
867 mutex_unlock(&kvm->lock);
868 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
869 r ? "(not available)" : "(success)");
871 case KVM_CAP_S390_HPAGE_1M:
872 mutex_lock(&kvm->lock);
873 if (kvm->created_vcpus)
875 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
879 mmap_write_lock(kvm->mm);
880 kvm->mm->context.allow_gmap_hpage_1m = 1;
881 mmap_write_unlock(kvm->mm);
883 * We might have to create fake 4k page
884 * tables. To avoid that the hardware works on
885 * stale PGSTEs, we emulate these instructions.
887 kvm->arch.use_skf = 0;
888 kvm->arch.use_pfmfi = 0;
890 mutex_unlock(&kvm->lock);
891 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
892 r ? "(not available)" : "(success)");
894 case KVM_CAP_S390_USER_STSI:
895 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
896 kvm->arch.user_stsi = 1;
899 case KVM_CAP_S390_USER_INSTR0:
900 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
901 kvm->arch.user_instr0 = 1;
902 icpt_operexc_on_all_vcpus(kvm);
905 case KVM_CAP_S390_CPU_TOPOLOGY:
907 mutex_lock(&kvm->lock);
908 if (kvm->created_vcpus) {
910 } else if (test_facility(11)) {
911 set_kvm_facility(kvm->arch.model.fac_mask, 11);
912 set_kvm_facility(kvm->arch.model.fac_list, 11);
915 mutex_unlock(&kvm->lock);
916 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s",
917 r ? "(not available)" : "(success)");
926 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
930 switch (attr->attr) {
931 case KVM_S390_VM_MEM_LIMIT_SIZE:
933 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
934 kvm->arch.mem_limit);
935 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
945 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
949 switch (attr->attr) {
950 case KVM_S390_VM_MEM_ENABLE_CMMA:
955 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
956 mutex_lock(&kvm->lock);
957 if (kvm->created_vcpus)
959 else if (kvm->mm->context.allow_gmap_hpage_1m)
962 kvm->arch.use_cmma = 1;
963 /* Not compatible with cmma. */
964 kvm->arch.use_pfmfi = 0;
967 mutex_unlock(&kvm->lock);
969 case KVM_S390_VM_MEM_CLR_CMMA:
974 if (!kvm->arch.use_cmma)
977 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
978 mutex_lock(&kvm->lock);
979 idx = srcu_read_lock(&kvm->srcu);
980 s390_reset_cmma(kvm->arch.gmap->mm);
981 srcu_read_unlock(&kvm->srcu, idx);
982 mutex_unlock(&kvm->lock);
985 case KVM_S390_VM_MEM_LIMIT_SIZE: {
986 unsigned long new_limit;
988 if (kvm_is_ucontrol(kvm))
991 if (get_user(new_limit, (u64 __user *)attr->addr))
994 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
995 new_limit > kvm->arch.mem_limit)
1001 /* gmap_create takes last usable address */
1002 if (new_limit != KVM_S390_NO_MEM_LIMIT)
1006 mutex_lock(&kvm->lock);
1007 if (!kvm->created_vcpus) {
1008 /* gmap_create will round the limit up */
1009 struct gmap *new = gmap_create(current->mm, new_limit);
1014 gmap_remove(kvm->arch.gmap);
1016 kvm->arch.gmap = new;
1020 mutex_unlock(&kvm->lock);
1021 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
1022 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
1023 (void *) kvm->arch.gmap->asce);
1033 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
1035 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
1037 struct kvm_vcpu *vcpu;
1040 kvm_s390_vcpu_block_all(kvm);
1042 kvm_for_each_vcpu(i, vcpu, kvm) {
1043 kvm_s390_vcpu_crypto_setup(vcpu);
1044 /* recreate the shadow crycb by leaving the VSIE handler */
1045 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1048 kvm_s390_vcpu_unblock_all(kvm);
1051 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
1053 mutex_lock(&kvm->lock);
1054 switch (attr->attr) {
1055 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1056 if (!test_kvm_facility(kvm, 76)) {
1057 mutex_unlock(&kvm->lock);
1061 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1062 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1063 kvm->arch.crypto.aes_kw = 1;
1064 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
1066 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1067 if (!test_kvm_facility(kvm, 76)) {
1068 mutex_unlock(&kvm->lock);
1072 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1073 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1074 kvm->arch.crypto.dea_kw = 1;
1075 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
1077 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1078 if (!test_kvm_facility(kvm, 76)) {
1079 mutex_unlock(&kvm->lock);
1082 kvm->arch.crypto.aes_kw = 0;
1083 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
1084 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1085 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
1087 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1088 if (!test_kvm_facility(kvm, 76)) {
1089 mutex_unlock(&kvm->lock);
1092 kvm->arch.crypto.dea_kw = 0;
1093 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
1094 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1095 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
1097 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1098 if (!ap_instructions_available()) {
1099 mutex_unlock(&kvm->lock);
1102 kvm->arch.crypto.apie = 1;
1104 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1105 if (!ap_instructions_available()) {
1106 mutex_unlock(&kvm->lock);
1109 kvm->arch.crypto.apie = 0;
1112 mutex_unlock(&kvm->lock);
1116 kvm_s390_vcpu_crypto_reset_all(kvm);
1117 mutex_unlock(&kvm->lock);
1121 static void kvm_s390_vcpu_pci_setup(struct kvm_vcpu *vcpu)
1123 /* Only set the ECB bits after guest requests zPCI interpretation */
1124 if (!vcpu->kvm->arch.use_zpci_interp)
1127 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI;
1128 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI;
1131 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm)
1133 struct kvm_vcpu *vcpu;
1136 lockdep_assert_held(&kvm->lock);
1138 if (!kvm_s390_pci_interp_allowed())
1142 * If host is configured for PCI and the necessary facilities are
1143 * available, turn on interpretation for the life of this guest
1145 kvm->arch.use_zpci_interp = 1;
1147 kvm_s390_vcpu_block_all(kvm);
1149 kvm_for_each_vcpu(i, vcpu, kvm) {
1150 kvm_s390_vcpu_pci_setup(vcpu);
1151 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
1154 kvm_s390_vcpu_unblock_all(kvm);
1157 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1160 struct kvm_vcpu *vcpu;
1162 kvm_for_each_vcpu(cx, vcpu, kvm)
1163 kvm_s390_sync_request(req, vcpu);
1167 * Must be called with kvm->srcu held to avoid races on memslots, and with
1168 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1170 static int kvm_s390_vm_start_migration(struct kvm *kvm)
1172 struct kvm_memory_slot *ms;
1173 struct kvm_memslots *slots;
1174 unsigned long ram_pages = 0;
1177 /* migration mode already enabled */
1178 if (kvm->arch.migration_mode)
1180 slots = kvm_memslots(kvm);
1181 if (!slots || kvm_memslots_empty(slots))
1184 if (!kvm->arch.use_cmma) {
1185 kvm->arch.migration_mode = 1;
1188 /* mark all the pages in active slots as dirty */
1189 kvm_for_each_memslot(ms, bkt, slots) {
1190 if (!ms->dirty_bitmap)
1193 * The second half of the bitmap is only used on x86,
1194 * and would be wasted otherwise, so we put it to good
1195 * use here to keep track of the state of the storage
1198 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1199 ram_pages += ms->npages;
1201 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1202 kvm->arch.migration_mode = 1;
1203 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
1208 * Must be called with kvm->slots_lock to avoid races with ourselves and
1209 * kvm_s390_vm_start_migration.
1211 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1213 /* migration mode already disabled */
1214 if (!kvm->arch.migration_mode)
1216 kvm->arch.migration_mode = 0;
1217 if (kvm->arch.use_cmma)
1218 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1222 static int kvm_s390_vm_set_migration(struct kvm *kvm,
1223 struct kvm_device_attr *attr)
1227 mutex_lock(&kvm->slots_lock);
1228 switch (attr->attr) {
1229 case KVM_S390_VM_MIGRATION_START:
1230 res = kvm_s390_vm_start_migration(kvm);
1232 case KVM_S390_VM_MIGRATION_STOP:
1233 res = kvm_s390_vm_stop_migration(kvm);
1238 mutex_unlock(&kvm->slots_lock);
1243 static int kvm_s390_vm_get_migration(struct kvm *kvm,
1244 struct kvm_device_attr *attr)
1246 u64 mig = kvm->arch.migration_mode;
1248 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1251 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1256 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1258 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1260 struct kvm_s390_vm_tod_clock gtod;
1262 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
1265 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
1267 __kvm_s390_set_tod_clock(kvm, >od);
1269 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1270 gtod.epoch_idx, gtod.tod);
1275 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1279 if (copy_from_user(>od_high, (void __user *)attr->addr,
1285 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
1290 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1292 struct kvm_s390_vm_tod_clock gtod = { 0 };
1294 if (copy_from_user(>od.tod, (void __user *)attr->addr,
1298 __kvm_s390_set_tod_clock(kvm, >od);
1299 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
1303 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1310 mutex_lock(&kvm->lock);
1312 * For protected guests, the TOD is managed by the ultravisor, so trying
1313 * to change it will never bring the expected results.
1315 if (kvm_s390_pv_is_protected(kvm)) {
1320 switch (attr->attr) {
1321 case KVM_S390_VM_TOD_EXT:
1322 ret = kvm_s390_set_tod_ext(kvm, attr);
1324 case KVM_S390_VM_TOD_HIGH:
1325 ret = kvm_s390_set_tod_high(kvm, attr);
1327 case KVM_S390_VM_TOD_LOW:
1328 ret = kvm_s390_set_tod_low(kvm, attr);
1336 mutex_unlock(&kvm->lock);
1340 static void kvm_s390_get_tod_clock(struct kvm *kvm,
1341 struct kvm_s390_vm_tod_clock *gtod)
1343 union tod_clock clk;
1347 store_tod_clock_ext(&clk);
1349 gtod->tod = clk.tod + kvm->arch.epoch;
1350 gtod->epoch_idx = 0;
1351 if (test_kvm_facility(kvm, 139)) {
1352 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1353 if (gtod->tod < clk.tod)
1354 gtod->epoch_idx += 1;
1360 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1362 struct kvm_s390_vm_tod_clock gtod;
1364 memset(>od, 0, sizeof(gtod));
1365 kvm_s390_get_tod_clock(kvm, >od);
1366 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1369 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1370 gtod.epoch_idx, gtod.tod);
1374 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1378 if (copy_to_user((void __user *)attr->addr, >od_high,
1381 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
1386 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1390 gtod = kvm_s390_get_tod_clock_fast(kvm);
1391 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
1393 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
1398 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1405 switch (attr->attr) {
1406 case KVM_S390_VM_TOD_EXT:
1407 ret = kvm_s390_get_tod_ext(kvm, attr);
1409 case KVM_S390_VM_TOD_HIGH:
1410 ret = kvm_s390_get_tod_high(kvm, attr);
1412 case KVM_S390_VM_TOD_LOW:
1413 ret = kvm_s390_get_tod_low(kvm, attr);
1422 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1424 struct kvm_s390_vm_cpu_processor *proc;
1425 u16 lowest_ibc, unblocked_ibc;
1428 mutex_lock(&kvm->lock);
1429 if (kvm->created_vcpus) {
1433 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1438 if (!copy_from_user(proc, (void __user *)attr->addr,
1440 kvm->arch.model.cpuid = proc->cpuid;
1441 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1442 unblocked_ibc = sclp.ibc & 0xfff;
1443 if (lowest_ibc && proc->ibc) {
1444 if (proc->ibc > unblocked_ibc)
1445 kvm->arch.model.ibc = unblocked_ibc;
1446 else if (proc->ibc < lowest_ibc)
1447 kvm->arch.model.ibc = lowest_ibc;
1449 kvm->arch.model.ibc = proc->ibc;
1451 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1452 S390_ARCH_FAC_LIST_SIZE_BYTE);
1453 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1454 kvm->arch.model.ibc,
1455 kvm->arch.model.cpuid);
1456 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1457 kvm->arch.model.fac_list[0],
1458 kvm->arch.model.fac_list[1],
1459 kvm->arch.model.fac_list[2]);
1464 mutex_unlock(&kvm->lock);
1468 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1469 struct kvm_device_attr *attr)
1471 struct kvm_s390_vm_cpu_feat data;
1473 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1475 if (!bitmap_subset((unsigned long *) data.feat,
1476 kvm_s390_available_cpu_feat,
1477 KVM_S390_VM_CPU_FEAT_NR_BITS))
1480 mutex_lock(&kvm->lock);
1481 if (kvm->created_vcpus) {
1482 mutex_unlock(&kvm->lock);
1485 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1486 mutex_unlock(&kvm->lock);
1487 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1494 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1495 struct kvm_device_attr *attr)
1497 mutex_lock(&kvm->lock);
1498 if (kvm->created_vcpus) {
1499 mutex_unlock(&kvm->lock);
1503 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1504 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1505 mutex_unlock(&kvm->lock);
1508 mutex_unlock(&kvm->lock);
1510 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1511 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1512 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1513 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1514 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1515 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1518 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1519 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1521 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1524 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1527 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1530 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1533 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1536 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1539 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1542 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1545 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1548 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1551 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1554 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1557 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1560 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1562 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1567 VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx",
1568 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1569 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1574 #define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK \
1576 ((struct kvm_s390_vm_cpu_uv_feat){ \
1583 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1585 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
1586 unsigned long data, filter;
1588 filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1589 if (get_user(data, &ptr->feat))
1591 if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
1594 mutex_lock(&kvm->lock);
1595 if (kvm->created_vcpus) {
1596 mutex_unlock(&kvm->lock);
1599 kvm->arch.model.uv_feat_guest.feat = data;
1600 mutex_unlock(&kvm->lock);
1602 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
1607 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1611 switch (attr->attr) {
1612 case KVM_S390_VM_CPU_PROCESSOR:
1613 ret = kvm_s390_set_processor(kvm, attr);
1615 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1616 ret = kvm_s390_set_processor_feat(kvm, attr);
1618 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1619 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1621 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1622 ret = kvm_s390_set_uv_feat(kvm, attr);
1628 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1630 struct kvm_s390_vm_cpu_processor *proc;
1633 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
1638 proc->cpuid = kvm->arch.model.cpuid;
1639 proc->ibc = kvm->arch.model.ibc;
1640 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1641 S390_ARCH_FAC_LIST_SIZE_BYTE);
1642 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1643 kvm->arch.model.ibc,
1644 kvm->arch.model.cpuid);
1645 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1646 kvm->arch.model.fac_list[0],
1647 kvm->arch.model.fac_list[1],
1648 kvm->arch.model.fac_list[2]);
1649 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1656 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1658 struct kvm_s390_vm_cpu_machine *mach;
1661 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
1666 get_cpu_id((struct cpuid *) &mach->cpuid);
1667 mach->ibc = sclp.ibc;
1668 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1669 S390_ARCH_FAC_LIST_SIZE_BYTE);
1670 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1671 sizeof(stfle_fac_list));
1672 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1673 kvm->arch.model.ibc,
1674 kvm->arch.model.cpuid);
1675 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1679 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1683 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1690 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1691 struct kvm_device_attr *attr)
1693 struct kvm_s390_vm_cpu_feat data;
1695 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1696 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1698 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1705 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1706 struct kvm_device_attr *attr)
1708 struct kvm_s390_vm_cpu_feat data;
1710 bitmap_to_arr64(data.feat, kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
1711 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1713 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1720 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1721 struct kvm_device_attr *attr)
1723 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1724 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1727 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1728 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1729 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1730 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1731 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1732 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1733 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1735 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1736 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1738 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1739 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1741 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1742 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1743 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1744 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1745 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1746 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1747 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1748 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1749 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1750 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1751 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1752 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1753 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1754 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1755 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1756 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1757 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1758 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1759 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1760 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1761 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1762 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1763 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1764 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1765 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1766 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1767 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1768 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1769 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1770 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1771 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1772 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1773 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
1774 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1775 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1776 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1777 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1778 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
1779 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1780 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1781 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1782 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1783 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
1784 VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx",
1785 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1786 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1791 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1792 struct kvm_device_attr *attr)
1794 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1795 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1798 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1799 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1800 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1801 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1802 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1803 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1804 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1805 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1806 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1807 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1808 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1809 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1810 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1811 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1812 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1813 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1814 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1815 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1816 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1817 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1818 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1819 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1820 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1821 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1822 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1823 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1824 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1825 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1826 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1827 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1828 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1829 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1830 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1831 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1832 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1833 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1834 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1835 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1836 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1837 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1838 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1839 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1840 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1841 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
1842 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1843 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1844 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
1845 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1846 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1847 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1848 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1849 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
1850 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1851 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1852 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1853 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1854 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
1855 VM_EVENT(kvm, 3, "GET: host PFCR subfunc 0x%16.16lx.%16.16lx",
1856 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
1857 ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
1862 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1864 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1865 unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
1867 if (put_user(feat, &dst->feat))
1869 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1874 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
1876 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
1879 BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
1881 feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
1882 if (put_user(feat, &dst->feat))
1884 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
1889 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1893 switch (attr->attr) {
1894 case KVM_S390_VM_CPU_PROCESSOR:
1895 ret = kvm_s390_get_processor(kvm, attr);
1897 case KVM_S390_VM_CPU_MACHINE:
1898 ret = kvm_s390_get_machine(kvm, attr);
1900 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1901 ret = kvm_s390_get_processor_feat(kvm, attr);
1903 case KVM_S390_VM_CPU_MACHINE_FEAT:
1904 ret = kvm_s390_get_machine_feat(kvm, attr);
1906 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1907 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1909 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1910 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1912 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
1913 ret = kvm_s390_get_processor_uv_feat(kvm, attr);
1915 case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
1916 ret = kvm_s390_get_machine_uv_feat(kvm, attr);
1923 * kvm_s390_update_topology_change_report - update CPU topology change report
1924 * @kvm: guest KVM description
1925 * @val: set or clear the MTCR bit
1927 * Updates the Multiprocessor Topology-Change-Report bit to signal
1928 * the guest with a topology change.
1929 * This is only relevant if the topology facility is present.
1931 * The SCA version, bsca or esca, doesn't matter as offset is the same.
1933 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val)
1935 union sca_utility new, old;
1936 struct bsca_block *sca;
1938 read_lock(&kvm->arch.sca_lock);
1939 sca = kvm->arch.sca;
1940 old = READ_ONCE(sca->utility);
1944 } while (!try_cmpxchg(&sca->utility.val, &old.val, new.val));
1945 read_unlock(&kvm->arch.sca_lock);
1948 static int kvm_s390_set_topo_change_indication(struct kvm *kvm,
1949 struct kvm_device_attr *attr)
1951 if (!test_kvm_facility(kvm, 11))
1954 kvm_s390_update_topology_change_report(kvm, !!attr->attr);
1958 static int kvm_s390_get_topo_change_indication(struct kvm *kvm,
1959 struct kvm_device_attr *attr)
1963 if (!test_kvm_facility(kvm, 11))
1966 read_lock(&kvm->arch.sca_lock);
1967 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr;
1968 read_unlock(&kvm->arch.sca_lock);
1970 return put_user(topo, (u8 __user *)attr->addr);
1973 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1977 switch (attr->group) {
1978 case KVM_S390_VM_MEM_CTRL:
1979 ret = kvm_s390_set_mem_control(kvm, attr);
1981 case KVM_S390_VM_TOD:
1982 ret = kvm_s390_set_tod(kvm, attr);
1984 case KVM_S390_VM_CPU_MODEL:
1985 ret = kvm_s390_set_cpu_model(kvm, attr);
1987 case KVM_S390_VM_CRYPTO:
1988 ret = kvm_s390_vm_set_crypto(kvm, attr);
1990 case KVM_S390_VM_MIGRATION:
1991 ret = kvm_s390_vm_set_migration(kvm, attr);
1993 case KVM_S390_VM_CPU_TOPOLOGY:
1994 ret = kvm_s390_set_topo_change_indication(kvm, attr);
2004 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2008 switch (attr->group) {
2009 case KVM_S390_VM_MEM_CTRL:
2010 ret = kvm_s390_get_mem_control(kvm, attr);
2012 case KVM_S390_VM_TOD:
2013 ret = kvm_s390_get_tod(kvm, attr);
2015 case KVM_S390_VM_CPU_MODEL:
2016 ret = kvm_s390_get_cpu_model(kvm, attr);
2018 case KVM_S390_VM_MIGRATION:
2019 ret = kvm_s390_vm_get_migration(kvm, attr);
2021 case KVM_S390_VM_CPU_TOPOLOGY:
2022 ret = kvm_s390_get_topo_change_indication(kvm, attr);
2032 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
2036 switch (attr->group) {
2037 case KVM_S390_VM_MEM_CTRL:
2038 switch (attr->attr) {
2039 case KVM_S390_VM_MEM_ENABLE_CMMA:
2040 case KVM_S390_VM_MEM_CLR_CMMA:
2041 ret = sclp.has_cmma ? 0 : -ENXIO;
2043 case KVM_S390_VM_MEM_LIMIT_SIZE:
2051 case KVM_S390_VM_TOD:
2052 switch (attr->attr) {
2053 case KVM_S390_VM_TOD_LOW:
2054 case KVM_S390_VM_TOD_HIGH:
2062 case KVM_S390_VM_CPU_MODEL:
2063 switch (attr->attr) {
2064 case KVM_S390_VM_CPU_PROCESSOR:
2065 case KVM_S390_VM_CPU_MACHINE:
2066 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
2067 case KVM_S390_VM_CPU_MACHINE_FEAT:
2068 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
2069 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
2070 case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
2071 case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
2079 case KVM_S390_VM_CRYPTO:
2080 switch (attr->attr) {
2081 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
2082 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
2083 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
2084 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
2087 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
2088 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
2089 ret = ap_instructions_available() ? 0 : -ENXIO;
2096 case KVM_S390_VM_MIGRATION:
2099 case KVM_S390_VM_CPU_TOPOLOGY:
2100 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO;
2110 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2114 int srcu_idx, i, r = 0;
2116 if (args->flags != 0)
2119 /* Is this guest using storage keys? */
2120 if (!mm_uses_skeys(current->mm))
2121 return KVM_S390_GET_SKEYS_NONE;
2123 /* Enforce sane limit on memory allocation */
2124 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2127 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2131 mmap_read_lock(current->mm);
2132 srcu_idx = srcu_read_lock(&kvm->srcu);
2133 for (i = 0; i < args->count; i++) {
2134 hva = gfn_to_hva(kvm, args->start_gfn + i);
2135 if (kvm_is_error_hva(hva)) {
2140 r = get_guest_storage_key(current->mm, hva, &keys[i]);
2144 srcu_read_unlock(&kvm->srcu, srcu_idx);
2145 mmap_read_unlock(current->mm);
2148 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
2149 sizeof(uint8_t) * args->count);
2158 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
2162 int srcu_idx, i, r = 0;
2165 if (args->flags != 0)
2168 /* Enforce sane limit on memory allocation */
2169 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
2172 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
2176 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
2177 sizeof(uint8_t) * args->count);
2183 /* Enable storage key handling for the guest */
2184 r = s390_enable_skey();
2189 mmap_read_lock(current->mm);
2190 srcu_idx = srcu_read_lock(&kvm->srcu);
2191 while (i < args->count) {
2193 hva = gfn_to_hva(kvm, args->start_gfn + i);
2194 if (kvm_is_error_hva(hva)) {
2199 /* Lowest order bit is reserved */
2200 if (keys[i] & 0x01) {
2205 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
2207 r = fixup_user_fault(current->mm, hva,
2208 FAULT_FLAG_WRITE, &unlocked);
2215 srcu_read_unlock(&kvm->srcu, srcu_idx);
2216 mmap_read_unlock(current->mm);
2223 * Base address and length must be sent at the start of each block, therefore
2224 * it's cheaper to send some clean data, as long as it's less than the size of
2227 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
2228 /* for consistency */
2229 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
2231 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2232 u8 *res, unsigned long bufsize)
2234 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
2237 while (args->count < bufsize) {
2238 hva = gfn_to_hva(kvm, cur_gfn);
2240 * We return an error if the first value was invalid, but we
2241 * return successfully if at least one value was copied.
2243 if (kvm_is_error_hva(hva))
2244 return args->count ? 0 : -EFAULT;
2245 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2247 res[args->count++] = (pgstev >> 24) & 0x43;
2254 static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
2257 return ____gfn_to_memslot(slots, gfn, true);
2260 static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2261 unsigned long cur_gfn)
2263 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
2264 unsigned long ofs = cur_gfn - ms->base_gfn;
2265 struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
2267 if (ms->base_gfn + ms->npages <= cur_gfn) {
2268 mnode = rb_next(mnode);
2269 /* If we are above the highest slot, wrap around */
2271 mnode = rb_first(&slots->gfn_tree);
2273 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2277 if (cur_gfn < ms->base_gfn)
2280 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2281 while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
2282 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
2283 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
2285 return ms->base_gfn + ofs;
2288 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2289 u8 *res, unsigned long bufsize)
2291 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2292 struct kvm_memslots *slots = kvm_memslots(kvm);
2293 struct kvm_memory_slot *ms;
2295 if (unlikely(kvm_memslots_empty(slots)))
2298 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2299 ms = gfn_to_memslot(kvm, cur_gfn);
2301 args->start_gfn = cur_gfn;
2304 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2305 mem_end = kvm_s390_get_gfn_end(slots);
2307 while (args->count < bufsize) {
2308 hva = gfn_to_hva(kvm, cur_gfn);
2309 if (kvm_is_error_hva(hva))
2311 /* Decrement only if we actually flipped the bit to 0 */
2312 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2313 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2314 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2316 /* Save the value */
2317 res[args->count++] = (pgstev >> 24) & 0x43;
2318 /* If the next bit is too far away, stop. */
2319 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2321 /* If we reached the previous "next", find the next one */
2322 if (cur_gfn == next_gfn)
2323 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2324 /* Reached the end of memory or of the buffer, stop */
2325 if ((next_gfn >= mem_end) ||
2326 (next_gfn - args->start_gfn >= bufsize))
2329 /* Reached the end of the current memslot, take the next one. */
2330 if (cur_gfn - ms->base_gfn >= ms->npages) {
2331 ms = gfn_to_memslot(kvm, cur_gfn);
2340 * This function searches for the next page with dirty CMMA attributes, and
2341 * saves the attributes in the buffer up to either the end of the buffer or
2342 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2343 * no trailing clean bytes are saved.
2344 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2345 * output buffer will indicate 0 as length.
2347 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2348 struct kvm_s390_cmma_log *args)
2350 unsigned long bufsize;
2351 int srcu_idx, peek, ret;
2354 if (!kvm->arch.use_cmma)
2356 /* Invalid/unsupported flags were specified */
2357 if (args->flags & ~KVM_S390_CMMA_PEEK)
2359 /* Migration mode query, and we are not doing a migration */
2360 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
2361 if (!peek && !kvm->arch.migration_mode)
2363 /* CMMA is disabled or was not used, or the buffer has length zero */
2364 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
2365 if (!bufsize || !kvm->mm->context.uses_cmm) {
2366 memset(args, 0, sizeof(*args));
2369 /* We are not peeking, and there are no dirty pages */
2370 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2371 memset(args, 0, sizeof(*args));
2375 values = vmalloc(bufsize);
2379 mmap_read_lock(kvm->mm);
2380 srcu_idx = srcu_read_lock(&kvm->srcu);
2382 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2384 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
2385 srcu_read_unlock(&kvm->srcu, srcu_idx);
2386 mmap_read_unlock(kvm->mm);
2388 if (kvm->arch.migration_mode)
2389 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2391 args->remaining = 0;
2393 if (copy_to_user((void __user *)args->values, values, args->count))
2401 * This function sets the CMMA attributes for the given pages. If the input
2402 * buffer has zero length, no action is taken, otherwise the attributes are
2403 * set and the mm->context.uses_cmm flag is set.
2405 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2406 const struct kvm_s390_cmma_log *args)
2408 unsigned long hva, mask, pgstev, i;
2410 int srcu_idx, r = 0;
2414 if (!kvm->arch.use_cmma)
2416 /* invalid/unsupported flags */
2417 if (args->flags != 0)
2419 /* Enforce sane limit on memory allocation */
2420 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2423 if (args->count == 0)
2426 bits = vmalloc(array_size(sizeof(*bits), args->count));
2430 r = copy_from_user(bits, (void __user *)args->values, args->count);
2436 mmap_read_lock(kvm->mm);
2437 srcu_idx = srcu_read_lock(&kvm->srcu);
2438 for (i = 0; i < args->count; i++) {
2439 hva = gfn_to_hva(kvm, args->start_gfn + i);
2440 if (kvm_is_error_hva(hva)) {
2446 pgstev = pgstev << 24;
2447 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
2448 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2450 srcu_read_unlock(&kvm->srcu, srcu_idx);
2451 mmap_read_unlock(kvm->mm);
2453 if (!kvm->mm->context.uses_cmm) {
2454 mmap_write_lock(kvm->mm);
2455 kvm->mm->context.uses_cmm = 1;
2456 mmap_write_unlock(kvm->mm);
2464 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2466 * @kvm: the VM whose protected vCPUs are to be converted
2467 * @rc: return value for the RC field of the UVC (in case of error)
2468 * @rrc: return value for the RRC field of the UVC (in case of error)
2470 * Does not stop in case of error, tries to convert as many
2471 * CPUs as possible. In case of error, the RC and RRC of the last error are
2474 * Return: 0 in case of success, otherwise -EIO
2476 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2478 struct kvm_vcpu *vcpu;
2484 * We ignore failures and try to destroy as many CPUs as possible.
2485 * At the same time we must not free the assigned resources when
2486 * this fails, as the ultravisor has still access to that memory.
2487 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2489 * We want to return the first failure rc and rrc, though.
2491 kvm_for_each_vcpu(i, vcpu, kvm) {
2492 mutex_lock(&vcpu->mutex);
2493 if (kvm_s390_pv_destroy_cpu(vcpu, &_rc, &_rrc) && !ret) {
2498 mutex_unlock(&vcpu->mutex);
2500 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
2502 kvm_s390_gisa_enable(kvm);
2507 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2509 * @kvm: the VM whose protected vCPUs are to be converted
2510 * @rc: return value for the RC field of the UVC (in case of error)
2511 * @rrc: return value for the RRC field of the UVC (in case of error)
2513 * Tries to undo the conversion in case of error.
2515 * Return: 0 in case of success, otherwise -EIO
2517 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2523 struct kvm_vcpu *vcpu;
2525 /* Disable the GISA if the ultravisor does not support AIV. */
2526 if (!uv_has_feature(BIT_UV_FEAT_AIV))
2527 kvm_s390_gisa_disable(kvm);
2529 kvm_for_each_vcpu(i, vcpu, kvm) {
2530 mutex_lock(&vcpu->mutex);
2531 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2532 mutex_unlock(&vcpu->mutex);
2537 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2542 * Here we provide user space with a direct interface to query UV
2543 * related data like UV maxima and available features as well as
2544 * feature specific data.
2546 * To facilitate future extension of the data structures we'll try to
2547 * write data up to the maximum requested length.
2549 static ssize_t kvm_s390_handle_pv_info(struct kvm_s390_pv_info *info)
2553 switch (info->header.id) {
2554 case KVM_PV_INFO_VM: {
2555 len_min = sizeof(info->header) + sizeof(info->vm);
2557 if (info->header.len_max < len_min)
2560 memcpy(info->vm.inst_calls_list,
2561 uv_info.inst_calls_list,
2562 sizeof(uv_info.inst_calls_list));
2564 /* It's max cpuid not max cpus, so it's off by one */
2565 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1;
2566 info->vm.max_guests = uv_info.max_num_sec_conf;
2567 info->vm.max_guest_addr = uv_info.max_sec_stor_addr;
2568 info->vm.feature_indication = uv_info.uv_feature_indications;
2572 case KVM_PV_INFO_DUMP: {
2573 len_min = sizeof(info->header) + sizeof(info->dump);
2575 if (info->header.len_max < len_min)
2578 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len;
2579 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len;
2580 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len;
2588 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd,
2589 struct kvm_s390_pv_dmp dmp)
2592 void __user *result_buff = (void __user *)dmp.buff_addr;
2594 switch (dmp.subcmd) {
2595 case KVM_PV_DUMP_INIT: {
2596 if (kvm->arch.pv.dumping)
2600 * Block SIE entry as concurrent dump UVCs could lead
2603 kvm_s390_vcpu_block_all(kvm);
2605 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2606 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc);
2607 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x",
2610 kvm->arch.pv.dumping = true;
2612 kvm_s390_vcpu_unblock_all(kvm);
2617 case KVM_PV_DUMP_CONFIG_STOR_STATE: {
2618 if (!kvm->arch.pv.dumping)
2622 * gaddr is an output parameter since we might stop
2623 * early. As dmp will be copied back in our caller, we
2624 * don't need to do it ourselves.
2626 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len,
2627 &cmd->rc, &cmd->rrc);
2630 case KVM_PV_DUMP_COMPLETE: {
2631 if (!kvm->arch.pv.dumping)
2635 if (dmp.buff_len < uv_info.conf_dump_finalize_len)
2638 r = kvm_s390_pv_dump_complete(kvm, result_buff,
2639 &cmd->rc, &cmd->rrc);
2650 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2652 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM);
2653 void __user *argp = (void __user *)cmd->data;
2658 mutex_lock(&kvm->lock);
2661 case KVM_PV_ENABLE: {
2663 if (kvm_s390_pv_is_protected(kvm))
2667 * FMT 4 SIE needs esca. As we never switch back to bsca from
2668 * esca, we need no cleanup in the error cases below
2670 r = sca_switch_to_extended(kvm);
2674 r = s390_disable_cow_sharing();
2678 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2682 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2684 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
2686 /* we need to block service interrupts from now on */
2687 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2690 case KVM_PV_ASYNC_CLEANUP_PREPARE:
2692 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy)
2695 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2697 * If a CPU could not be destroyed, destroy VM will also fail.
2698 * There is no point in trying to destroy it. Instead return
2699 * the rc and rrc from the first CPU that failed destroying.
2703 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc);
2705 /* no need to block service interrupts any more */
2706 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2708 case KVM_PV_ASYNC_CLEANUP_PERFORM:
2712 /* kvm->lock must not be held; this is asserted inside the function. */
2713 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc);
2715 case KVM_PV_DISABLE: {
2717 if (!kvm_s390_pv_is_protected(kvm))
2720 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2722 * If a CPU could not be destroyed, destroy VM will also fail.
2723 * There is no point in trying to destroy it. Instead return
2724 * the rc and rrc from the first CPU that failed destroying.
2728 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc);
2730 /* no need to block service interrupts any more */
2731 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
2734 case KVM_PV_SET_SEC_PARMS: {
2735 struct kvm_s390_pv_sec_parm parms = {};
2739 if (!kvm_s390_pv_is_protected(kvm))
2743 if (copy_from_user(&parms, argp, sizeof(parms)))
2746 /* Currently restricted to 8KB */
2748 if (parms.length > PAGE_SIZE * 2)
2752 hdr = vmalloc(parms.length);
2757 if (!copy_from_user(hdr, (void __user *)parms.origin,
2759 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2760 &cmd->rc, &cmd->rrc);
2765 case KVM_PV_UNPACK: {
2766 struct kvm_s390_pv_unp unp = {};
2769 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
2773 if (copy_from_user(&unp, argp, sizeof(unp)))
2776 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2777 &cmd->rc, &cmd->rrc);
2780 case KVM_PV_VERIFY: {
2782 if (!kvm_s390_pv_is_protected(kvm))
2785 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2786 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2787 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2791 case KVM_PV_PREP_RESET: {
2793 if (!kvm_s390_pv_is_protected(kvm))
2796 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2797 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2798 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2802 case KVM_PV_UNSHARE_ALL: {
2804 if (!kvm_s390_pv_is_protected(kvm))
2807 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2808 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2809 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2814 struct kvm_s390_pv_info info = {};
2818 * No need to check the VM protection here.
2820 * Maybe user space wants to query some of the data
2821 * when the VM is still unprotected. If we see the
2822 * need to fence a new data command we can still
2823 * return an error in the info handler.
2827 if (copy_from_user(&info, argp, sizeof(info.header)))
2831 if (info.header.len_max < sizeof(info.header))
2834 data_len = kvm_s390_handle_pv_info(&info);
2840 * If a data command struct is extended (multiple
2841 * times) this can be used to determine how much of it
2844 info.header.len_written = data_len;
2847 if (copy_to_user(argp, &info, data_len))
2854 struct kvm_s390_pv_dmp dmp;
2857 if (!kvm_s390_pv_is_protected(kvm))
2861 if (copy_from_user(&dmp, argp, sizeof(dmp)))
2864 r = kvm_s390_pv_dmp(kvm, cmd, dmp);
2868 if (copy_to_user(argp, &dmp, sizeof(dmp))) {
2879 mutex_unlock(&kvm->lock);
2884 static int mem_op_validate_common(struct kvm_s390_mem_op *mop, u64 supported_flags)
2886 if (mop->flags & ~supported_flags || !mop->size)
2888 if (mop->size > MEM_OP_MAX_SIZE)
2890 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
2899 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2901 void __user *uaddr = (void __user *)mop->buf;
2902 enum gacc_mode acc_mode;
2903 void *tmpbuf = NULL;
2906 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION |
2907 KVM_S390_MEMOP_F_CHECK_ONLY);
2911 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2912 tmpbuf = vmalloc(mop->size);
2917 srcu_idx = srcu_read_lock(&kvm->srcu);
2919 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2924 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE;
2925 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2926 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key);
2929 if (acc_mode == GACC_FETCH) {
2930 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2931 mop->size, GACC_FETCH, mop->key);
2934 if (copy_to_user(uaddr, tmpbuf, mop->size))
2937 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2941 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf,
2942 mop->size, GACC_STORE, mop->key);
2946 srcu_read_unlock(&kvm->srcu, srcu_idx);
2952 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2954 void __user *uaddr = (void __user *)mop->buf;
2955 void __user *old_addr = (void __user *)mop->old_addr;
2958 char raw[sizeof(__uint128_t)];
2959 } old = { .quad = 0}, new = { .quad = 0 };
2960 unsigned int off_in_quad = sizeof(new) - mop->size;
2964 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_SKEY_PROTECTION);
2968 * This validates off_in_quad. Checking that size is a power
2969 * of two is not necessary, as cmpxchg_guest_abs_with_key
2970 * takes care of that
2972 if (mop->size > sizeof(new))
2974 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size))
2976 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size))
2979 srcu_idx = srcu_read_lock(&kvm->srcu);
2981 if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
2986 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad,
2987 new.quad, mop->key, &success);
2988 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size))
2992 srcu_read_unlock(&kvm->srcu, srcu_idx);
2996 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
2999 * This is technically a heuristic only, if the kvm->lock is not
3000 * taken, it is not guaranteed that the vm is/remains non-protected.
3001 * This is ok from a kernel perspective, wrongdoing is detected
3002 * on the access, -EFAULT is returned and the vm may crash the
3003 * next time it accesses the memory in question.
3004 * There is no sane usecase to do switching and a memop on two
3005 * different CPUs at the same time.
3007 if (kvm_s390_pv_get_handle(kvm))
3011 case KVM_S390_MEMOP_ABSOLUTE_READ:
3012 case KVM_S390_MEMOP_ABSOLUTE_WRITE:
3013 return kvm_s390_vm_mem_op_abs(kvm, mop);
3014 case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
3015 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop);
3021 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
3023 struct kvm *kvm = filp->private_data;
3024 void __user *argp = (void __user *)arg;
3025 struct kvm_device_attr attr;
3029 case KVM_S390_INTERRUPT: {
3030 struct kvm_s390_interrupt s390int;
3033 if (copy_from_user(&s390int, argp, sizeof(s390int)))
3035 r = kvm_s390_inject_vm(kvm, &s390int);
3038 case KVM_CREATE_IRQCHIP: {
3040 if (kvm->arch.use_irqchip)
3044 case KVM_SET_DEVICE_ATTR: {
3046 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3048 r = kvm_s390_vm_set_attr(kvm, &attr);
3051 case KVM_GET_DEVICE_ATTR: {
3053 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3055 r = kvm_s390_vm_get_attr(kvm, &attr);
3058 case KVM_HAS_DEVICE_ATTR: {
3060 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
3062 r = kvm_s390_vm_has_attr(kvm, &attr);
3065 case KVM_S390_GET_SKEYS: {
3066 struct kvm_s390_skeys args;
3069 if (copy_from_user(&args, argp,
3070 sizeof(struct kvm_s390_skeys)))
3072 r = kvm_s390_get_skeys(kvm, &args);
3075 case KVM_S390_SET_SKEYS: {
3076 struct kvm_s390_skeys args;
3079 if (copy_from_user(&args, argp,
3080 sizeof(struct kvm_s390_skeys)))
3082 r = kvm_s390_set_skeys(kvm, &args);
3085 case KVM_S390_GET_CMMA_BITS: {
3086 struct kvm_s390_cmma_log args;
3089 if (copy_from_user(&args, argp, sizeof(args)))
3091 mutex_lock(&kvm->slots_lock);
3092 r = kvm_s390_get_cmma_bits(kvm, &args);
3093 mutex_unlock(&kvm->slots_lock);
3095 r = copy_to_user(argp, &args, sizeof(args));
3101 case KVM_S390_SET_CMMA_BITS: {
3102 struct kvm_s390_cmma_log args;
3105 if (copy_from_user(&args, argp, sizeof(args)))
3107 mutex_lock(&kvm->slots_lock);
3108 r = kvm_s390_set_cmma_bits(kvm, &args);
3109 mutex_unlock(&kvm->slots_lock);
3112 case KVM_S390_PV_COMMAND: {
3113 struct kvm_pv_cmd args;
3115 /* protvirt means user cpu state */
3116 kvm_s390_set_user_cpu_state_ctrl(kvm);
3118 if (!is_prot_virt_host()) {
3122 if (copy_from_user(&args, argp, sizeof(args))) {
3130 /* must be called without kvm->lock */
3131 r = kvm_s390_handle_pv(kvm, &args);
3132 if (copy_to_user(argp, &args, sizeof(args))) {
3138 case KVM_S390_MEM_OP: {
3139 struct kvm_s390_mem_op mem_op;
3141 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3142 r = kvm_s390_vm_mem_op(kvm, &mem_op);
3147 case KVM_S390_ZPCI_OP: {
3148 struct kvm_s390_zpci_op args;
3151 if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3153 if (copy_from_user(&args, argp, sizeof(args))) {
3157 r = kvm_s390_pci_zpci_op(kvm, &args);
3167 static int kvm_s390_apxa_installed(void)
3169 struct ap_config_info info;
3171 if (ap_instructions_available()) {
3172 if (ap_qci(&info) == 0)
3180 * The format of the crypto control block (CRYCB) is specified in the 3 low
3181 * order bits of the CRYCB designation (CRYCBD) field as follows:
3182 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
3183 * AP extended addressing (APXA) facility are installed.
3184 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
3185 * Format 2: Both the APXA and MSAX3 facilities are installed
3187 static void kvm_s390_set_crycb_format(struct kvm *kvm)
3189 kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
3191 /* Clear the CRYCB format bits - i.e., set format 0 by default */
3192 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
3194 /* Check whether MSAX3 is installed */
3195 if (!test_kvm_facility(kvm, 76))
3198 if (kvm_s390_apxa_installed())
3199 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
3201 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
3205 * kvm_arch_crypto_set_masks
3207 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3209 * @apm: the mask identifying the accessible AP adapters
3210 * @aqm: the mask identifying the accessible AP domains
3211 * @adm: the mask identifying the accessible AP control domains
3213 * Set the masks that identify the adapters, domains and control domains to
3214 * which the KVM guest is granted access.
3216 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3219 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
3220 unsigned long *aqm, unsigned long *adm)
3222 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
3224 kvm_s390_vcpu_block_all(kvm);
3226 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
3227 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
3228 memcpy(crycb->apcb1.apm, apm, 32);
3229 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
3230 apm[0], apm[1], apm[2], apm[3]);
3231 memcpy(crycb->apcb1.aqm, aqm, 32);
3232 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
3233 aqm[0], aqm[1], aqm[2], aqm[3]);
3234 memcpy(crycb->apcb1.adm, adm, 32);
3235 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
3236 adm[0], adm[1], adm[2], adm[3]);
3239 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
3240 memcpy(crycb->apcb0.apm, apm, 8);
3241 memcpy(crycb->apcb0.aqm, aqm, 2);
3242 memcpy(crycb->apcb0.adm, adm, 2);
3243 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
3244 apm[0], *((unsigned short *)aqm),
3245 *((unsigned short *)adm));
3247 default: /* Can not happen */
3251 /* recreate the shadow crycb for each vcpu */
3252 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3253 kvm_s390_vcpu_unblock_all(kvm);
3255 EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
3258 * kvm_arch_crypto_clear_masks
3260 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3263 * Clear the masks that identify the adapters, domains and control domains to
3264 * which the KVM guest is granted access.
3266 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3269 void kvm_arch_crypto_clear_masks(struct kvm *kvm)
3271 kvm_s390_vcpu_block_all(kvm);
3273 memset(&kvm->arch.crypto.crycb->apcb0, 0,
3274 sizeof(kvm->arch.crypto.crycb->apcb0));
3275 memset(&kvm->arch.crypto.crycb->apcb1, 0,
3276 sizeof(kvm->arch.crypto.crycb->apcb1));
3278 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
3279 /* recreate the shadow crycb for each vcpu */
3280 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
3281 kvm_s390_vcpu_unblock_all(kvm);
3283 EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
3285 static u64 kvm_s390_get_initial_cpuid(void)
3290 cpuid.version = 0xff;
3291 return *((u64 *) &cpuid);
3294 static void kvm_s390_crypto_init(struct kvm *kvm)
3296 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
3297 kvm_s390_set_crycb_format(kvm);
3298 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
3300 if (!test_kvm_facility(kvm, 76))
3303 /* Enable AES/DEA protected key functions by default */
3304 kvm->arch.crypto.aes_kw = 1;
3305 kvm->arch.crypto.dea_kw = 1;
3306 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
3307 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
3308 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
3309 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
3312 static void sca_dispose(struct kvm *kvm)
3314 if (kvm->arch.use_esca)
3315 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
3317 free_page((unsigned long)(kvm->arch.sca));
3318 kvm->arch.sca = NULL;
3321 void kvm_arch_free_vm(struct kvm *kvm)
3323 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
3324 kvm_s390_pci_clear_list(kvm);
3326 __kvm_arch_free_vm(kvm);
3329 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
3331 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
3333 char debug_name[16];
3334 static unsigned long sca_offset;
3337 #ifdef CONFIG_KVM_S390_UCONTROL
3338 if (type & ~KVM_VM_S390_UCONTROL)
3340 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
3347 rc = s390_enable_sie();
3353 if (!sclp.has_64bscao)
3354 alloc_flags |= GFP_DMA;
3355 rwlock_init(&kvm->arch.sca_lock);
3356 /* start with basic SCA */
3357 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
3360 mutex_lock(&kvm_lock);
3362 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
3364 kvm->arch.sca = (struct bsca_block *)
3365 ((char *) kvm->arch.sca + sca_offset);
3366 mutex_unlock(&kvm_lock);
3368 sprintf(debug_name, "kvm-%u", current->pid);
3370 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
3374 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
3375 kvm->arch.sie_page2 =
3376 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
3377 if (!kvm->arch.sie_page2)
3380 kvm->arch.sie_page2->kvm = kvm;
3381 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
3383 for (i = 0; i < kvm_s390_fac_size(); i++) {
3384 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
3385 (kvm_s390_fac_base[i] |
3386 kvm_s390_fac_ext[i]);
3387 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
3388 kvm_s390_fac_base[i];
3390 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
3392 /* we are always in czam mode - even on pre z14 machines */
3393 set_kvm_facility(kvm->arch.model.fac_mask, 138);
3394 set_kvm_facility(kvm->arch.model.fac_list, 138);
3395 /* we emulate STHYI in kvm */
3396 set_kvm_facility(kvm->arch.model.fac_mask, 74);
3397 set_kvm_facility(kvm->arch.model.fac_list, 74);
3398 if (MACHINE_HAS_TLB_GUEST) {
3399 set_kvm_facility(kvm->arch.model.fac_mask, 147);
3400 set_kvm_facility(kvm->arch.model.fac_list, 147);
3403 if (css_general_characteristics.aiv && test_facility(65))
3404 set_kvm_facility(kvm->arch.model.fac_mask, 65);
3406 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
3407 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
3409 kvm->arch.model.uv_feat_guest.feat = 0;
3411 kvm_s390_crypto_init(kvm);
3413 if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
3414 mutex_lock(&kvm->lock);
3415 kvm_s390_pci_init_list(kvm);
3416 kvm_s390_vcpu_pci_enable_interp(kvm);
3417 mutex_unlock(&kvm->lock);
3420 mutex_init(&kvm->arch.float_int.ais_lock);
3421 spin_lock_init(&kvm->arch.float_int.lock);
3422 for (i = 0; i < FIRQ_LIST_COUNT; i++)
3423 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
3424 init_waitqueue_head(&kvm->arch.ipte_wq);
3425 mutex_init(&kvm->arch.ipte_mutex);
3427 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
3428 VM_EVENT(kvm, 3, "vm created with type %lu", type);
3430 if (type & KVM_VM_S390_UCONTROL) {
3431 kvm->arch.gmap = NULL;
3432 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
3434 if (sclp.hamax == U64_MAX)
3435 kvm->arch.mem_limit = TASK_SIZE_MAX;
3437 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
3439 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
3440 if (!kvm->arch.gmap)
3442 kvm->arch.gmap->private = kvm;
3443 kvm->arch.gmap->pfault_enabled = 0;
3446 kvm->arch.use_pfmfi = sclp.has_pfmfi;
3447 kvm->arch.use_skf = sclp.has_skey;
3448 spin_lock_init(&kvm->arch.start_stop_lock);
3449 kvm_s390_vsie_init(kvm);
3451 kvm_s390_gisa_init(kvm);
3452 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
3453 kvm->arch.pv.set_aside = NULL;
3454 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
3458 free_page((unsigned long)kvm->arch.sie_page2);
3459 debug_unregister(kvm->arch.dbf);
3461 KVM_EVENT(3, "creation of vm failed: %d", rc);
3465 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
3469 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
3470 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3471 kvm_s390_clear_local_irqs(vcpu);
3472 kvm_clear_async_pf_completion_queue(vcpu);
3473 if (!kvm_is_ucontrol(vcpu->kvm))
3475 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
3477 if (kvm_is_ucontrol(vcpu->kvm))
3478 gmap_remove(vcpu->arch.gmap);
3480 if (vcpu->kvm->arch.use_cmma)
3481 kvm_s390_vcpu_unsetup_cmma(vcpu);
3482 /* We can not hold the vcpu mutex here, we are already dying */
3483 if (kvm_s390_pv_cpu_get_handle(vcpu))
3484 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
3485 free_page((unsigned long)(vcpu->arch.sie_block));
3488 void kvm_arch_destroy_vm(struct kvm *kvm)
3492 kvm_destroy_vcpus(kvm);
3494 kvm_s390_gisa_destroy(kvm);
3496 * We are already at the end of life and kvm->lock is not taken.
3497 * This is ok as the file descriptor is closed by now and nobody
3498 * can mess with the pv state.
3500 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc);
3502 * Remove the mmu notifier only when the whole KVM VM is torn down,
3503 * and only if one was registered to begin with. If the VM is
3504 * currently not protected, but has been previously been protected,
3505 * then it's possible that the notifier is still registered.
3507 if (kvm->arch.pv.mmu_notifier.ops)
3508 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm);
3510 debug_unregister(kvm->arch.dbf);
3511 free_page((unsigned long)kvm->arch.sie_page2);
3512 if (!kvm_is_ucontrol(kvm))
3513 gmap_remove(kvm->arch.gmap);
3514 kvm_s390_destroy_adapters(kvm);
3515 kvm_s390_clear_float_irqs(kvm);
3516 kvm_s390_vsie_destroy(kvm);
3517 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
3520 /* Section: vcpu related */
3521 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
3523 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
3524 if (!vcpu->arch.gmap)
3526 vcpu->arch.gmap->private = vcpu->kvm;
3531 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
3533 if (!kvm_s390_use_sca_entries())
3535 read_lock(&vcpu->kvm->arch.sca_lock);
3536 if (vcpu->kvm->arch.use_esca) {
3537 struct esca_block *sca = vcpu->kvm->arch.sca;
3539 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3540 sca->cpu[vcpu->vcpu_id].sda = 0;
3542 struct bsca_block *sca = vcpu->kvm->arch.sca;
3544 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3545 sca->cpu[vcpu->vcpu_id].sda = 0;
3547 read_unlock(&vcpu->kvm->arch.sca_lock);
3550 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
3552 if (!kvm_s390_use_sca_entries()) {
3553 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca);
3555 /* we still need the basic sca for the ipte control */
3556 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3557 vcpu->arch.sie_block->scaol = sca_phys;
3560 read_lock(&vcpu->kvm->arch.sca_lock);
3561 if (vcpu->kvm->arch.use_esca) {
3562 struct esca_block *sca = vcpu->kvm->arch.sca;
3563 phys_addr_t sca_phys = virt_to_phys(sca);
3565 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3566 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3567 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK;
3568 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3569 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
3571 struct bsca_block *sca = vcpu->kvm->arch.sca;
3572 phys_addr_t sca_phys = virt_to_phys(sca);
3574 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block);
3575 vcpu->arch.sie_block->scaoh = sca_phys >> 32;
3576 vcpu->arch.sie_block->scaol = sca_phys;
3577 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
3579 read_unlock(&vcpu->kvm->arch.sca_lock);
3582 /* Basic SCA to Extended SCA data copy routines */
3583 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
3586 d->sigp_ctrl.c = s->sigp_ctrl.c;
3587 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
3590 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
3594 d->ipte_control = s->ipte_control;
3596 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
3597 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
3600 static int sca_switch_to_extended(struct kvm *kvm)
3602 struct bsca_block *old_sca = kvm->arch.sca;
3603 struct esca_block *new_sca;
3604 struct kvm_vcpu *vcpu;
3605 unsigned long vcpu_idx;
3607 phys_addr_t new_sca_phys;
3609 if (kvm->arch.use_esca)
3612 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3616 new_sca_phys = virt_to_phys(new_sca);
3617 scaoh = new_sca_phys >> 32;
3618 scaol = new_sca_phys & ESCA_SCAOL_MASK;
3620 kvm_s390_vcpu_block_all(kvm);
3621 write_lock(&kvm->arch.sca_lock);
3623 sca_copy_b_to_e(new_sca, old_sca);
3625 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
3626 vcpu->arch.sie_block->scaoh = scaoh;
3627 vcpu->arch.sie_block->scaol = scaol;
3628 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
3630 kvm->arch.sca = new_sca;
3631 kvm->arch.use_esca = 1;
3633 write_unlock(&kvm->arch.sca_lock);
3634 kvm_s390_vcpu_unblock_all(kvm);
3636 free_page((unsigned long)old_sca);
3638 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
3639 old_sca, kvm->arch.sca);
3643 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
3647 if (!kvm_s390_use_sca_entries()) {
3648 if (id < KVM_MAX_VCPUS)
3652 if (id < KVM_S390_BSCA_CPU_SLOTS)
3654 if (!sclp.has_esca || !sclp.has_64bscao)
3657 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
3659 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
3662 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3663 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3665 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
3666 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3667 vcpu->arch.cputm_start = get_tod_clock_fast();
3668 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3671 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3672 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3674 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
3675 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3676 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3677 vcpu->arch.cputm_start = 0;
3678 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3681 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3682 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3684 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3685 vcpu->arch.cputm_enabled = true;
3686 __start_cpu_timer_accounting(vcpu);
3689 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3690 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3692 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3693 __stop_cpu_timer_accounting(vcpu);
3694 vcpu->arch.cputm_enabled = false;
3697 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3699 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3700 __enable_cpu_timer_accounting(vcpu);
3704 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3706 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3707 __disable_cpu_timer_accounting(vcpu);
3711 /* set the cpu timer - may only be called from the VCPU thread itself */
3712 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3714 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3715 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
3716 if (vcpu->arch.cputm_enabled)
3717 vcpu->arch.cputm_start = get_tod_clock_fast();
3718 vcpu->arch.sie_block->cputm = cputm;
3719 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
3723 /* update and get the cpu timer - can also be called from other VCPU threads */
3724 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3729 if (unlikely(!vcpu->arch.cputm_enabled))
3730 return vcpu->arch.sie_block->cputm;
3732 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3734 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3736 * If the writer would ever execute a read in the critical
3737 * section, e.g. in irq context, we have a deadlock.
3739 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3740 value = vcpu->arch.sie_block->cputm;
3741 /* if cputm_start is 0, accounting is being started/stopped */
3742 if (likely(vcpu->arch.cputm_start))
3743 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3744 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3749 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3752 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
3753 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3754 __start_cpu_timer_accounting(vcpu);
3758 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3761 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
3762 __stop_cpu_timer_accounting(vcpu);
3763 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
3767 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
3769 mutex_lock(&vcpu->kvm->lock);
3771 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
3772 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
3774 mutex_unlock(&vcpu->kvm->lock);
3775 if (!kvm_is_ucontrol(vcpu->kvm)) {
3776 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
3779 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3780 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3783 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3785 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3786 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3791 static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3793 /* At least one ECC subfunction must be present */
3794 return kvm_has_pckmo_subfunc(kvm, 32) ||
3795 kvm_has_pckmo_subfunc(kvm, 33) ||
3796 kvm_has_pckmo_subfunc(kvm, 34) ||
3797 kvm_has_pckmo_subfunc(kvm, 40) ||
3798 kvm_has_pckmo_subfunc(kvm, 41);
3802 static bool kvm_has_pckmo_hmac(struct kvm *kvm)
3804 /* At least one HMAC subfunction must be present */
3805 return kvm_has_pckmo_subfunc(kvm, 118) ||
3806 kvm_has_pckmo_subfunc(kvm, 122);
3809 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3812 * If the AP instructions are not being interpreted and the MSAX3
3813 * facility is not configured for the guest, there is nothing to set up.
3815 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
3818 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
3819 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
3820 vcpu->arch.sie_block->eca &= ~ECA_APIE;
3821 vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC);
3823 if (vcpu->kvm->arch.crypto.apie)
3824 vcpu->arch.sie_block->eca |= ECA_APIE;
3826 /* Set up protected key support */
3827 if (vcpu->kvm->arch.crypto.aes_kw) {
3828 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
3829 /* ecc/hmac is also wrapped with AES key */
3830 if (kvm_has_pckmo_ecc(vcpu->kvm))
3831 vcpu->arch.sie_block->ecd |= ECD_ECC;
3832 if (kvm_has_pckmo_hmac(vcpu->kvm))
3833 vcpu->arch.sie_block->ecd |= ECD_HMAC;
3836 if (vcpu->kvm->arch.crypto.dea_kw)
3837 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
3840 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3842 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo));
3843 vcpu->arch.sie_block->cbrlo = 0;
3846 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3848 void *cbrlo_page = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3853 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page);
3857 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3859 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3861 vcpu->arch.sie_block->ibc = model->ibc;
3862 if (test_kvm_facility(vcpu->kvm, 7))
3863 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list);
3866 static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3871 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3875 if (test_kvm_facility(vcpu->kvm, 78))
3876 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
3877 else if (test_kvm_facility(vcpu->kvm, 8))
3878 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
3880 kvm_s390_vcpu_setup_model(vcpu);
3882 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3883 if (MACHINE_HAS_ESOP)
3884 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
3885 if (test_kvm_facility(vcpu->kvm, 9))
3886 vcpu->arch.sie_block->ecb |= ECB_SRSI;
3887 if (test_kvm_facility(vcpu->kvm, 11))
3888 vcpu->arch.sie_block->ecb |= ECB_PTF;
3889 if (test_kvm_facility(vcpu->kvm, 73))
3890 vcpu->arch.sie_block->ecb |= ECB_TE;
3891 if (!kvm_is_ucontrol(vcpu->kvm))
3892 vcpu->arch.sie_block->ecb |= ECB_SPECI;
3894 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
3895 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
3896 if (test_kvm_facility(vcpu->kvm, 130))
3897 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3898 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
3900 vcpu->arch.sie_block->eca |= ECA_CEI;
3902 vcpu->arch.sie_block->eca |= ECA_IB;
3904 vcpu->arch.sie_block->eca |= ECA_SII;
3905 if (sclp.has_sigpif)
3906 vcpu->arch.sie_block->eca |= ECA_SIGPI;
3907 if (test_kvm_facility(vcpu->kvm, 129)) {
3908 vcpu->arch.sie_block->eca |= ECA_VX;
3909 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3911 if (test_kvm_facility(vcpu->kvm, 139))
3912 vcpu->arch.sie_block->ecd |= ECD_MEF;
3913 if (test_kvm_facility(vcpu->kvm, 156))
3914 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
3915 if (vcpu->arch.sie_block->gd) {
3916 vcpu->arch.sie_block->eca |= ECA_AIV;
3917 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3918 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3920 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC;
3921 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb);
3924 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
3926 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
3928 if (vcpu->kvm->arch.use_cmma) {
3929 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3933 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3934 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
3936 vcpu->arch.sie_block->hpid = HPID_KVM;
3938 kvm_s390_vcpu_crypto_setup(vcpu);
3940 kvm_s390_vcpu_pci_setup(vcpu);
3942 mutex_lock(&vcpu->kvm->lock);
3943 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3944 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3946 kvm_s390_vcpu_unsetup_cmma(vcpu);
3948 mutex_unlock(&vcpu->kvm->lock);
3953 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3955 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3960 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
3962 struct sie_page *sie_page;
3965 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
3966 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
3970 vcpu->arch.sie_block = &sie_page->sie_block;
3971 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb);
3973 /* the real guest size will always be smaller than msl */
3974 vcpu->arch.sie_block->mso = 0;
3975 vcpu->arch.sie_block->msl = sclp.hamax;
3977 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
3978 spin_lock_init(&vcpu->arch.local_int.lock);
3979 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
3980 seqcount_init(&vcpu->arch.cputm_seqcount);
3982 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3983 kvm_clear_async_pf_completion_queue(vcpu);
3984 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3991 vcpu->arch.acrs_loaded = false;
3992 kvm_s390_set_prefix(vcpu, 0);
3993 if (test_kvm_facility(vcpu->kvm, 64))
3994 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3995 if (test_kvm_facility(vcpu->kvm, 82))
3996 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3997 if (test_kvm_facility(vcpu->kvm, 133))
3998 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3999 if (test_kvm_facility(vcpu->kvm, 156))
4000 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
4001 /* fprs can be synchronized via vrs, even if the guest has no vx. With
4002 * cpu_has_vx(), (load|store)_fpu_regs() will work with vrs format.
4005 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
4007 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
4009 if (kvm_is_ucontrol(vcpu->kvm)) {
4010 rc = __kvm_ucontrol_vcpu_init(vcpu);
4012 goto out_free_sie_block;
4015 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
4016 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
4017 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
4019 rc = kvm_s390_vcpu_setup(vcpu);
4021 goto out_ucontrol_uninit;
4023 kvm_s390_update_topology_change_report(vcpu->kvm, 1);
4026 out_ucontrol_uninit:
4027 if (kvm_is_ucontrol(vcpu->kvm))
4028 gmap_remove(vcpu->arch.gmap);
4030 free_page((unsigned long)(vcpu->arch.sie_block));
4034 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4036 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4037 return kvm_s390_vcpu_has_irq(vcpu, 0);
4040 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
4042 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
4045 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
4047 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4051 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
4053 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
4056 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
4058 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4062 bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
4064 return atomic_read(&vcpu->arch.sie_block->prog20) &
4065 (PROG_BLOCK_SIE | PROG_REQUEST);
4068 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
4070 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
4074 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
4075 * If the CPU is not running (e.g. waiting as idle) the function will
4076 * return immediately. */
4077 void exit_sie(struct kvm_vcpu *vcpu)
4079 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
4080 kvm_s390_vsie_kick(vcpu);
4081 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
4085 /* Kick a guest cpu out of SIE to process a request synchronously */
4086 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
4088 __kvm_make_request(req, vcpu);
4089 kvm_s390_vcpu_request(vcpu);
4092 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
4095 struct kvm *kvm = gmap->private;
4096 struct kvm_vcpu *vcpu;
4097 unsigned long prefix;
4100 trace_kvm_s390_gmap_notifier(start, end, gmap_is_shadow(gmap));
4102 if (gmap_is_shadow(gmap))
4104 if (start >= 1UL << 31)
4105 /* We are only interested in prefix pages */
4107 kvm_for_each_vcpu(i, vcpu, kvm) {
4108 /* match against both prefix pages */
4109 prefix = kvm_s390_get_prefix(vcpu);
4110 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
4111 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
4113 kvm_s390_sync_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4118 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
4120 /* do not poll with more than halt_poll_max_steal percent of steal time */
4121 if (get_lowcore()->avg_steal_timer * 100 / (TICK_USEC << 12) >=
4122 READ_ONCE(halt_poll_max_steal)) {
4123 vcpu->stat.halt_no_poll_steal++;
4129 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
4131 /* kvm common code refers to this, but never calls it */
4136 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
4137 struct kvm_one_reg *reg)
4142 case KVM_REG_S390_TODPR:
4143 r = put_user(vcpu->arch.sie_block->todpr,
4144 (u32 __user *)reg->addr);
4146 case KVM_REG_S390_EPOCHDIFF:
4147 r = put_user(vcpu->arch.sie_block->epoch,
4148 (u64 __user *)reg->addr);
4150 case KVM_REG_S390_CPU_TIMER:
4151 r = put_user(kvm_s390_get_cpu_timer(vcpu),
4152 (u64 __user *)reg->addr);
4154 case KVM_REG_S390_CLOCK_COMP:
4155 r = put_user(vcpu->arch.sie_block->ckc,
4156 (u64 __user *)reg->addr);
4158 case KVM_REG_S390_PFTOKEN:
4159 r = put_user(vcpu->arch.pfault_token,
4160 (u64 __user *)reg->addr);
4162 case KVM_REG_S390_PFCOMPARE:
4163 r = put_user(vcpu->arch.pfault_compare,
4164 (u64 __user *)reg->addr);
4166 case KVM_REG_S390_PFSELECT:
4167 r = put_user(vcpu->arch.pfault_select,
4168 (u64 __user *)reg->addr);
4170 case KVM_REG_S390_PP:
4171 r = put_user(vcpu->arch.sie_block->pp,
4172 (u64 __user *)reg->addr);
4174 case KVM_REG_S390_GBEA:
4175 r = put_user(vcpu->arch.sie_block->gbea,
4176 (u64 __user *)reg->addr);
4185 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
4186 struct kvm_one_reg *reg)
4192 case KVM_REG_S390_TODPR:
4193 r = get_user(vcpu->arch.sie_block->todpr,
4194 (u32 __user *)reg->addr);
4196 case KVM_REG_S390_EPOCHDIFF:
4197 r = get_user(vcpu->arch.sie_block->epoch,
4198 (u64 __user *)reg->addr);
4200 case KVM_REG_S390_CPU_TIMER:
4201 r = get_user(val, (u64 __user *)reg->addr);
4203 kvm_s390_set_cpu_timer(vcpu, val);
4205 case KVM_REG_S390_CLOCK_COMP:
4206 r = get_user(vcpu->arch.sie_block->ckc,
4207 (u64 __user *)reg->addr);
4209 case KVM_REG_S390_PFTOKEN:
4210 r = get_user(vcpu->arch.pfault_token,
4211 (u64 __user *)reg->addr);
4212 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4213 kvm_clear_async_pf_completion_queue(vcpu);
4215 case KVM_REG_S390_PFCOMPARE:
4216 r = get_user(vcpu->arch.pfault_compare,
4217 (u64 __user *)reg->addr);
4219 case KVM_REG_S390_PFSELECT:
4220 r = get_user(vcpu->arch.pfault_select,
4221 (u64 __user *)reg->addr);
4223 case KVM_REG_S390_PP:
4224 r = get_user(vcpu->arch.sie_block->pp,
4225 (u64 __user *)reg->addr);
4227 case KVM_REG_S390_GBEA:
4228 r = get_user(vcpu->arch.sie_block->gbea,
4229 (u64 __user *)reg->addr);
4238 static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
4240 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
4241 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
4242 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
4244 kvm_clear_async_pf_completion_queue(vcpu);
4245 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
4246 kvm_s390_vcpu_stop(vcpu);
4247 kvm_s390_clear_local_irqs(vcpu);
4250 static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
4252 /* Initial reset is a superset of the normal reset */
4253 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
4256 * This equals initial cpu reset in pop, but we don't switch to ESA.
4257 * We do not only reset the internal data, but also ...
4259 vcpu->arch.sie_block->gpsw.mask = 0;
4260 vcpu->arch.sie_block->gpsw.addr = 0;
4261 kvm_s390_set_prefix(vcpu, 0);
4262 kvm_s390_set_cpu_timer(vcpu, 0);
4263 vcpu->arch.sie_block->ckc = 0;
4264 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
4265 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
4266 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
4268 /* ... the data in sync regs */
4269 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
4270 vcpu->run->s.regs.ckc = 0;
4271 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
4272 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
4273 vcpu->run->psw_addr = 0;
4274 vcpu->run->psw_mask = 0;
4275 vcpu->run->s.regs.todpr = 0;
4276 vcpu->run->s.regs.cputm = 0;
4277 vcpu->run->s.regs.ckc = 0;
4278 vcpu->run->s.regs.pp = 0;
4279 vcpu->run->s.regs.gbea = 1;
4280 vcpu->run->s.regs.fpc = 0;
4282 * Do not reset these registers in the protected case, as some of
4283 * them are overlaid and they are not accessible in this case
4286 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4287 vcpu->arch.sie_block->gbea = 1;
4288 vcpu->arch.sie_block->pp = 0;
4289 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4290 vcpu->arch.sie_block->todpr = 0;
4294 static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
4296 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
4298 /* Clear reset is a superset of the initial reset */
4299 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4301 memset(®s->gprs, 0, sizeof(regs->gprs));
4302 memset(®s->vrs, 0, sizeof(regs->vrs));
4303 memset(®s->acrs, 0, sizeof(regs->acrs));
4304 memset(®s->gscb, 0, sizeof(regs->gscb));
4307 regs->etoken_extension = 0;
4310 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4313 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
4318 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4321 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
4326 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4327 struct kvm_sregs *sregs)
4331 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
4332 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
4338 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4339 struct kvm_sregs *sregs)
4343 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
4344 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
4350 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4356 vcpu->run->s.regs.fpc = fpu->fpc;
4358 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
4359 (freg_t *) fpu->fprs);
4361 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4367 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
4372 convert_vx_to_fp((freg_t *) fpu->fprs,
4373 (__vector128 *) vcpu->run->s.regs.vrs);
4375 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
4376 fpu->fpc = vcpu->run->s.regs.fpc;
4382 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
4386 if (!is_vcpu_stopped(vcpu))
4389 vcpu->run->psw_mask = psw.mask;
4390 vcpu->run->psw_addr = psw.addr;
4395 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
4396 struct kvm_translation *tr)
4398 return -EINVAL; /* not implemented yet */
4401 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
4402 KVM_GUESTDBG_USE_HW_BP | \
4403 KVM_GUESTDBG_ENABLE)
4405 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
4406 struct kvm_guest_debug *dbg)
4412 vcpu->guest_debug = 0;
4413 kvm_s390_clear_bp_data(vcpu);
4415 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
4419 if (!sclp.has_gpere) {
4424 if (dbg->control & KVM_GUESTDBG_ENABLE) {
4425 vcpu->guest_debug = dbg->control;
4426 /* enforce guest PER */
4427 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
4429 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
4430 rc = kvm_s390_import_bp_data(vcpu, dbg);
4432 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4433 vcpu->arch.guestdbg.last_bp = 0;
4437 vcpu->guest_debug = 0;
4438 kvm_s390_clear_bp_data(vcpu);
4439 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
4447 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4448 struct kvm_mp_state *mp_state)
4454 /* CHECK_STOP and LOAD are not supported yet */
4455 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
4456 KVM_MP_STATE_OPERATING;
4462 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4463 struct kvm_mp_state *mp_state)
4469 /* user space knows about this interface - let it control the state */
4470 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
4472 switch (mp_state->mp_state) {
4473 case KVM_MP_STATE_STOPPED:
4474 rc = kvm_s390_vcpu_stop(vcpu);
4476 case KVM_MP_STATE_OPERATING:
4477 rc = kvm_s390_vcpu_start(vcpu);
4479 case KVM_MP_STATE_LOAD:
4480 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
4484 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
4486 case KVM_MP_STATE_CHECK_STOP:
4487 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
4496 static bool ibs_enabled(struct kvm_vcpu *vcpu)
4498 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
4501 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
4504 kvm_s390_vcpu_request_handled(vcpu);
4505 if (!kvm_request_pending(vcpu))
4508 * If the guest prefix changed, re-arm the ipte notifier for the
4509 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
4510 * This ensures that the ipte instruction for this request has
4511 * already finished. We might race against a second unmapper that
4512 * wants to set the blocking bit. Lets just retry the request loop.
4514 if (kvm_check_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu)) {
4516 rc = gmap_mprotect_notify(vcpu->arch.gmap,
4517 kvm_s390_get_prefix(vcpu),
4518 PAGE_SIZE * 2, PROT_WRITE);
4520 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
4526 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
4527 vcpu->arch.sie_block->ihcpu = 0xffff;
4531 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
4532 if (!ibs_enabled(vcpu)) {
4533 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
4534 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
4539 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
4540 if (ibs_enabled(vcpu)) {
4541 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
4542 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
4547 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
4548 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
4552 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
4554 * Disable CMM virtualization; we will emulate the ESSA
4555 * instruction manually, in order to provide additional
4556 * functionalities needed for live migration.
4558 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
4562 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
4564 * Re-enable CMM virtualization if CMMA is available and
4565 * CMM has been used.
4567 if ((vcpu->kvm->arch.use_cmma) &&
4568 (vcpu->kvm->mm->context.uses_cmm))
4569 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
4573 /* we left the vsie handler, nothing to do, just clear the request */
4574 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
4579 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4581 struct kvm_vcpu *vcpu;
4582 union tod_clock clk;
4587 store_tod_clock_ext(&clk);
4589 kvm->arch.epoch = gtod->tod - clk.tod;
4591 if (test_kvm_facility(kvm, 139)) {
4592 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
4593 if (kvm->arch.epoch > gtod->tod)
4594 kvm->arch.epdx -= 1;
4597 kvm_s390_vcpu_block_all(kvm);
4598 kvm_for_each_vcpu(i, vcpu, kvm) {
4599 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
4600 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
4603 kvm_s390_vcpu_unblock_all(kvm);
4607 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
4609 if (!mutex_trylock(&kvm->lock))
4611 __kvm_s390_set_tod_clock(kvm, gtod);
4612 mutex_unlock(&kvm->lock);
4616 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
4617 unsigned long token)
4619 struct kvm_s390_interrupt inti;
4620 struct kvm_s390_irq irq;
4623 irq.u.ext.ext_params2 = token;
4624 irq.type = KVM_S390_INT_PFAULT_INIT;
4625 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
4627 inti.type = KVM_S390_INT_PFAULT_DONE;
4628 inti.parm64 = token;
4629 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
4633 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
4634 struct kvm_async_pf *work)
4636 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
4637 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
4642 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
4643 struct kvm_async_pf *work)
4645 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
4646 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
4649 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
4650 struct kvm_async_pf *work)
4652 /* s390 will always inject the page directly */
4655 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
4658 * s390 will always inject the page directly,
4659 * but we still want check_async_completion to cleanup
4664 static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
4667 struct kvm_arch_async_pf arch;
4669 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4671 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4672 vcpu->arch.pfault_compare)
4674 if (psw_extint_disabled(vcpu))
4676 if (kvm_s390_vcpu_has_irq(vcpu, 0))
4678 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
4680 if (!vcpu->arch.gmap->pfault_enabled)
4683 hva = gfn_to_hva(vcpu->kvm, current->thread.gmap_teid.addr);
4684 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
4687 return kvm_setup_async_pf(vcpu, current->thread.gmap_teid.addr * PAGE_SIZE, hva, &arch);
4690 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
4695 * On s390 notifications for arriving pages will be delivered directly
4696 * to the guest but the house keeping for completed pfaults is
4697 * handled outside the worker.
4699 kvm_check_async_pf_completion(vcpu);
4701 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4702 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
4707 if (!kvm_is_ucontrol(vcpu->kvm)) {
4708 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4709 if (rc || guestdbg_exit_pending(vcpu))
4713 rc = kvm_s390_handle_requests(vcpu);
4717 if (guestdbg_enabled(vcpu)) {
4718 kvm_s390_backup_guest_per_regs(vcpu);
4719 kvm_s390_patch_guest_per_regs(vcpu);
4722 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
4724 vcpu->arch.sie_block->icptcode = 0;
4725 current->thread.gmap_int_code = 0;
4726 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4727 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4728 trace_kvm_s390_sie_enter(vcpu, cpuflags);
4733 static int vcpu_post_run_addressing_exception(struct kvm_vcpu *vcpu)
4735 struct kvm_s390_pgm_info pgm_info = {
4736 .code = PGM_ADDRESSING,
4741 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4742 trace_kvm_s390_sie_fault(vcpu);
4745 * We want to inject an addressing exception, which is defined as a
4746 * suppressing or terminating exception. However, since we came here
4747 * by a DAT access exception, the PSW still points to the faulting
4748 * instruction since DAT exceptions are nullifying. So we've got
4749 * to look up the current opcode to get the length of the instruction
4750 * to be able to forward the PSW.
4752 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
4753 ilen = insn_length(opcode);
4757 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4758 * Forward by arbitrary ilc, injection will take care of
4759 * nullification if necessary.
4761 pgm_info = vcpu->arch.pgm;
4764 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4765 kvm_s390_forward_psw(vcpu, ilen);
4766 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
4769 static int vcpu_post_run_handle_fault(struct kvm_vcpu *vcpu)
4771 unsigned int flags = 0;
4772 unsigned long gaddr;
4775 gaddr = current->thread.gmap_teid.addr * PAGE_SIZE;
4776 if (kvm_s390_cur_gmap_fault_is_write())
4777 flags = FAULT_FLAG_WRITE;
4779 switch (current->thread.gmap_int_code & PGM_INT_CODE_MASK) {
4781 vcpu->stat.exit_null++;
4783 case PGM_NON_SECURE_STORAGE_ACCESS:
4784 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4785 "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4786 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4788 * This is normal operation; a page belonging to a protected
4789 * guest has not been imported yet. Try to import the page into
4790 * the protected guest.
4792 if (gmap_convert_to_secure(vcpu->arch.gmap, gaddr) == -EINVAL)
4793 send_sig(SIGSEGV, current, 0);
4795 case PGM_SECURE_STORAGE_ACCESS:
4796 case PGM_SECURE_STORAGE_VIOLATION:
4797 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4798 "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4799 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4801 * This can happen after a reboot with asynchronous teardown;
4802 * the new guest (normal or protected) will run on top of the
4803 * previous protected guest. The old pages need to be destroyed
4804 * so the new guest can use them.
4806 if (gmap_destroy_page(vcpu->arch.gmap, gaddr)) {
4808 * Either KVM messed up the secure guest mapping or the
4809 * same page is mapped into multiple secure guests.
4811 * This exception is only triggered when a guest 2 is
4812 * running and can therefore never occur in kernel
4815 pr_warn_ratelimited("Secure storage violation (%x) in task: %s, pid %d\n",
4816 current->thread.gmap_int_code, current->comm,
4818 send_sig(SIGSEGV, current, 0);
4821 case PGM_PROTECTION:
4822 case PGM_SEGMENT_TRANSLATION:
4823 case PGM_PAGE_TRANSLATION:
4825 case PGM_REGION_FIRST_TRANS:
4826 case PGM_REGION_SECOND_TRANS:
4827 case PGM_REGION_THIRD_TRANS:
4828 KVM_BUG(current->thread.gmap_teid.as != PSW_BITS_AS_PRIMARY, vcpu->kvm,
4829 "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4830 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4831 if (vcpu->arch.gmap->pfault_enabled) {
4832 rc = gmap_fault(vcpu->arch.gmap, gaddr, flags | FAULT_FLAG_RETRY_NOWAIT);
4834 return vcpu_post_run_addressing_exception(vcpu);
4835 if (rc == -EAGAIN) {
4836 trace_kvm_s390_major_guest_pfault(vcpu);
4837 if (kvm_arch_setup_async_pf(vcpu))
4839 vcpu->stat.pfault_sync++;
4844 rc = gmap_fault(vcpu->arch.gmap, gaddr, flags);
4845 if (rc == -EFAULT) {
4846 if (kvm_is_ucontrol(vcpu->kvm)) {
4847 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4848 vcpu->run->s390_ucontrol.trans_exc_code = gaddr;
4849 vcpu->run->s390_ucontrol.pgm_code = 0x10;
4852 return vcpu_post_run_addressing_exception(vcpu);
4856 KVM_BUG(1, vcpu->kvm, "Unexpected program interrupt 0x%x, TEID 0x%016lx",
4857 current->thread.gmap_int_code, current->thread.gmap_teid.val);
4858 send_sig(SIGSEGV, current, 0);
4864 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4866 struct mcck_volatile_info *mcck_info;
4867 struct sie_page *sie_page;
4870 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4871 vcpu->arch.sie_block->icptcode);
4872 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4874 if (guestdbg_enabled(vcpu))
4875 kvm_s390_restore_guest_per_regs(vcpu);
4877 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4878 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
4880 if (exit_reason == -EINTR) {
4881 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4882 sie_page = container_of(vcpu->arch.sie_block,
4883 struct sie_page, sie_block);
4884 mcck_info = &sie_page->mcck_info;
4885 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4889 if (vcpu->arch.sie_block->icptcode > 0) {
4890 rc = kvm_handle_sie_intercept(vcpu);
4892 if (rc != -EOPNOTSUPP)
4894 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4895 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4896 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4897 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4901 return vcpu_post_run_handle_fault(vcpu);
4904 #define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
4905 static int __vcpu_run(struct kvm_vcpu *vcpu)
4907 int rc, exit_reason;
4908 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
4911 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4912 * ning the guest), so that memslots (and other stuff) are protected
4914 kvm_vcpu_srcu_read_lock(vcpu);
4917 rc = vcpu_pre_run(vcpu);
4918 if (rc || guestdbg_exit_pending(vcpu))
4921 kvm_vcpu_srcu_read_unlock(vcpu);
4923 * As PF_VCPU will be used in fault handler, between
4924 * guest_enter and guest_exit should be no uaccess.
4926 local_irq_disable();
4927 guest_enter_irqoff();
4928 __disable_cpu_timer_accounting(vcpu);
4930 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4931 memcpy(sie_page->pv_grregs,
4932 vcpu->run->s.regs.gprs,
4933 sizeof(sie_page->pv_grregs));
4935 exit_reason = sie64a(vcpu->arch.sie_block,
4936 vcpu->run->s.regs.gprs,
4937 vcpu->arch.gmap->asce);
4938 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4939 memcpy(vcpu->run->s.regs.gprs,
4940 sie_page->pv_grregs,
4941 sizeof(sie_page->pv_grregs));
4943 * We're not allowed to inject interrupts on intercepts
4944 * that leave the guest state in an "in-between" state
4945 * where the next SIE entry will do a continuation.
4946 * Fence interrupts in our "internal" PSW.
4948 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4949 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4950 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4953 local_irq_disable();
4954 __enable_cpu_timer_accounting(vcpu);
4955 guest_exit_irqoff();
4957 kvm_vcpu_srcu_read_lock(vcpu);
4959 rc = vcpu_post_run(vcpu, exit_reason);
4960 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
4962 kvm_vcpu_srcu_read_unlock(vcpu);
4966 static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
4968 struct kvm_run *kvm_run = vcpu->run;
4969 struct runtime_instr_cb *riccb;
4972 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4973 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
4974 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4975 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
4976 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4977 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4978 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4979 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4981 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4982 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4983 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4984 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
4985 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4986 kvm_clear_async_pf_completion_queue(vcpu);
4988 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4989 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4990 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4991 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
4994 * If userspace sets the riccb (e.g. after migration) to a valid state,
4995 * we should enable RI here instead of doing the lazy enablement.
4997 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4998 test_kvm_facility(vcpu->kvm, 64) &&
5000 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
5001 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
5002 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
5005 * If userspace sets the gscb (e.g. after migration) to non-zero,
5006 * we should enable GS here instead of doing the lazy enablement.
5008 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
5009 test_kvm_facility(vcpu->kvm, 133) &&
5011 !vcpu->arch.gs_enabled) {
5012 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
5013 vcpu->arch.sie_block->ecb |= ECB_GS;
5014 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
5015 vcpu->arch.gs_enabled = 1;
5017 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
5018 test_kvm_facility(vcpu->kvm, 82)) {
5019 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
5020 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
5022 if (MACHINE_HAS_GS) {
5024 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
5025 if (current->thread.gs_cb) {
5026 vcpu->arch.host_gscb = current->thread.gs_cb;
5027 save_gs_cb(vcpu->arch.host_gscb);
5029 if (vcpu->arch.gs_enabled) {
5030 current->thread.gs_cb = (struct gs_cb *)
5031 &vcpu->run->s.regs.gscb;
5032 restore_gs_cb(current->thread.gs_cb);
5036 /* SIE will load etoken directly from SDNX and therefore kvm_run */
5039 static void sync_regs(struct kvm_vcpu *vcpu)
5041 struct kvm_run *kvm_run = vcpu->run;
5043 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
5044 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
5045 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
5046 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
5047 /* some control register changes require a tlb flush */
5048 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5050 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
5051 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
5052 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
5054 save_access_regs(vcpu->arch.host_acrs);
5055 restore_access_regs(vcpu->run->s.regs.acrs);
5056 vcpu->arch.acrs_loaded = true;
5057 kvm_s390_fpu_load(vcpu->run);
5058 /* Sync fmt2 only data */
5059 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
5060 sync_regs_fmt2(vcpu);
5063 * In several places we have to modify our internal view to
5064 * not do things that are disallowed by the ultravisor. For
5065 * example we must not inject interrupts after specific exits
5066 * (e.g. 112 prefix page not secure). We do this by turning
5067 * off the machine check, external and I/O interrupt bits
5068 * of our PSW copy. To avoid getting validity intercepts, we
5069 * do only accept the condition code from userspace.
5071 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
5072 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
5076 kvm_run->kvm_dirty_regs = 0;
5079 static void store_regs_fmt2(struct kvm_vcpu *vcpu)
5081 struct kvm_run *kvm_run = vcpu->run;
5083 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
5084 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
5085 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
5086 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
5087 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
5088 if (MACHINE_HAS_GS) {
5090 local_ctl_set_bit(2, CR2_GUARDED_STORAGE_BIT);
5091 if (vcpu->arch.gs_enabled)
5092 save_gs_cb(current->thread.gs_cb);
5093 current->thread.gs_cb = vcpu->arch.host_gscb;
5094 restore_gs_cb(vcpu->arch.host_gscb);
5095 if (!vcpu->arch.host_gscb)
5096 local_ctl_clear_bit(2, CR2_GUARDED_STORAGE_BIT);
5097 vcpu->arch.host_gscb = NULL;
5100 /* SIE will save etoken directly into SDNX and therefore kvm_run */
5103 static void store_regs(struct kvm_vcpu *vcpu)
5105 struct kvm_run *kvm_run = vcpu->run;
5107 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
5108 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
5109 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
5110 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
5111 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
5112 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
5113 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
5114 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
5115 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
5116 save_access_regs(vcpu->run->s.regs.acrs);
5117 restore_access_regs(vcpu->arch.host_acrs);
5118 vcpu->arch.acrs_loaded = false;
5119 kvm_s390_fpu_store(vcpu->run);
5120 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
5121 store_regs_fmt2(vcpu);
5124 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
5126 struct kvm_run *kvm_run = vcpu->run;
5127 DECLARE_KERNEL_FPU_ONSTACK32(fpu);
5131 * Running a VM while dumping always has the potential to
5132 * produce inconsistent dump data. But for PV vcpus a SIE
5133 * entry while dumping could also lead to a fatal validity
5134 * intercept which we absolutely want to avoid.
5136 if (vcpu->kvm->arch.pv.dumping)
5139 if (!vcpu->wants_to_run)
5142 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
5143 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
5148 if (guestdbg_exit_pending(vcpu)) {
5149 kvm_s390_prepare_debug_exit(vcpu);
5154 kvm_sigset_activate(vcpu);
5157 * no need to check the return value of vcpu_start as it can only have
5158 * an error for protvirt, but protvirt means user cpu state
5160 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
5161 kvm_s390_vcpu_start(vcpu);
5162 } else if (is_vcpu_stopped(vcpu)) {
5163 pr_err_ratelimited("can't run stopped vcpu %d\n",
5169 kernel_fpu_begin(&fpu, KERNEL_FPC | KERNEL_VXR);
5171 enable_cpu_timer_accounting(vcpu);
5174 rc = __vcpu_run(vcpu);
5176 if (signal_pending(current) && !rc) {
5177 kvm_run->exit_reason = KVM_EXIT_INTR;
5181 if (guestdbg_exit_pending(vcpu) && !rc) {
5182 kvm_s390_prepare_debug_exit(vcpu);
5186 if (rc == -EREMOTE) {
5187 /* userspace support is needed, kvm_run has been prepared */
5191 disable_cpu_timer_accounting(vcpu);
5193 kernel_fpu_end(&fpu, KERNEL_FPC | KERNEL_VXR);
5195 kvm_sigset_deactivate(vcpu);
5197 vcpu->stat.exit_userspace++;
5204 * store status at address
5205 * we use have two special cases:
5206 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5207 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5209 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
5211 unsigned char archmode = 1;
5212 freg_t fprs[NUM_FPRS];
5217 px = kvm_s390_get_prefix(vcpu);
5218 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
5219 if (write_guest_abs(vcpu, 163, &archmode, 1))
5222 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
5223 if (write_guest_real(vcpu, 163, &archmode, 1))
5227 gpa -= __LC_FPREGS_SAVE_AREA;
5229 /* manually convert vector registers if necessary */
5231 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
5232 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5235 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
5236 vcpu->run->s.regs.fprs, 128);
5238 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
5239 vcpu->run->s.regs.gprs, 128);
5240 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
5241 &vcpu->arch.sie_block->gpsw, 16);
5242 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
5244 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
5245 &vcpu->run->s.regs.fpc, 4);
5246 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
5247 &vcpu->arch.sie_block->todpr, 4);
5248 cputm = kvm_s390_get_cpu_timer(vcpu);
5249 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
5251 clkcomp = vcpu->arch.sie_block->ckc >> 8;
5252 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
5254 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
5255 &vcpu->run->s.regs.acrs, 64);
5256 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
5257 &vcpu->arch.sie_block->gcr, 128);
5258 return rc ? -EFAULT : 0;
5261 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
5264 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
5265 * switch in the run ioctl. Let's update our copies before we save
5266 * it into the save area
5268 kvm_s390_fpu_store(vcpu->run);
5269 save_access_regs(vcpu->run->s.regs.acrs);
5271 return kvm_s390_store_status_unloaded(vcpu, addr);
5274 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5276 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
5277 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
5280 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
5283 struct kvm_vcpu *vcpu;
5285 kvm_for_each_vcpu(i, vcpu, kvm) {
5286 __disable_ibs_on_vcpu(vcpu);
5290 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
5294 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
5295 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
5298 int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
5300 int i, online_vcpus, r = 0, started_vcpus = 0;
5302 if (!is_vcpu_stopped(vcpu))
5305 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
5306 /* Only one cpu at a time may enter/leave the STOPPED state. */
5307 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5308 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5310 /* Let's tell the UV that we want to change into the operating state */
5311 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5312 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
5314 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5319 for (i = 0; i < online_vcpus; i++) {
5320 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
5324 if (started_vcpus == 0) {
5325 /* we're the only active VCPU -> speed it up */
5326 __enable_ibs_on_vcpu(vcpu);
5327 } else if (started_vcpus == 1) {
5329 * As we are starting a second VCPU, we have to disable
5330 * the IBS facility on all VCPUs to remove potentially
5331 * outstanding ENABLE requests.
5333 __disable_ibs_on_all_vcpus(vcpu->kvm);
5336 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
5338 * The real PSW might have changed due to a RESTART interpreted by the
5339 * ultravisor. We block all interrupts and let the next sie exit
5342 if (kvm_s390_pv_cpu_is_protected(vcpu))
5343 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
5345 * Another VCPU might have used IBS while we were offline.
5346 * Let's play safe and flush the VCPU at startup.
5348 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
5349 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5353 int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
5355 int i, online_vcpus, r = 0, started_vcpus = 0;
5356 struct kvm_vcpu *started_vcpu = NULL;
5358 if (is_vcpu_stopped(vcpu))
5361 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
5362 /* Only one cpu at a time may enter/leave the STOPPED state. */
5363 spin_lock(&vcpu->kvm->arch.start_stop_lock);
5364 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
5366 /* Let's tell the UV that we want to change into the stopped state */
5367 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5368 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
5370 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5376 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
5377 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
5378 * have been fully processed. This will ensure that the VCPU
5379 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
5381 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
5382 kvm_s390_clear_stop_irq(vcpu);
5384 __disable_ibs_on_vcpu(vcpu);
5386 for (i = 0; i < online_vcpus; i++) {
5387 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
5389 if (!is_vcpu_stopped(tmp)) {
5395 if (started_vcpus == 1) {
5397 * As we only have one VCPU left, we want to enable the
5398 * IBS facility for that VCPU to speed it up.
5400 __enable_ibs_on_vcpu(started_vcpu);
5403 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
5407 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5408 struct kvm_enable_cap *cap)
5416 case KVM_CAP_S390_CSS_SUPPORT:
5417 if (!vcpu->kvm->arch.css_support) {
5418 vcpu->kvm->arch.css_support = 1;
5419 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
5420 trace_kvm_s390_enable_css(vcpu->kvm);
5431 static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
5432 struct kvm_s390_mem_op *mop)
5434 void __user *uaddr = (void __user *)mop->buf;
5438 if (mop->flags || !mop->size)
5440 if (mop->size + mop->sida_offset < mop->size)
5442 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
5444 if (!kvm_s390_pv_cpu_is_protected(vcpu))
5447 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
5450 case KVM_S390_MEMOP_SIDA_READ:
5451 if (copy_to_user(uaddr, sida_addr, mop->size))
5455 case KVM_S390_MEMOP_SIDA_WRITE:
5456 if (copy_from_user(sida_addr, uaddr, mop->size))
5463 static long kvm_s390_vcpu_mem_op(struct kvm_vcpu *vcpu,
5464 struct kvm_s390_mem_op *mop)
5466 void __user *uaddr = (void __user *)mop->buf;
5467 enum gacc_mode acc_mode;
5468 void *tmpbuf = NULL;
5471 r = mem_op_validate_common(mop, KVM_S390_MEMOP_F_INJECT_EXCEPTION |
5472 KVM_S390_MEMOP_F_CHECK_ONLY |
5473 KVM_S390_MEMOP_F_SKEY_PROTECTION);
5476 if (mop->ar >= NUM_ACRS)
5478 if (kvm_s390_pv_cpu_is_protected(vcpu))
5480 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
5481 tmpbuf = vmalloc(mop->size);
5486 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE;
5487 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
5488 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size,
5489 acc_mode, mop->key);
5492 if (acc_mode == GACC_FETCH) {
5493 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5494 mop->size, mop->key);
5497 if (copy_to_user(uaddr, tmpbuf, mop->size)) {
5502 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
5506 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf,
5507 mop->size, mop->key);
5511 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
5512 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
5519 static long kvm_s390_vcpu_memsida_op(struct kvm_vcpu *vcpu,
5520 struct kvm_s390_mem_op *mop)
5524 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5527 case KVM_S390_MEMOP_LOGICAL_READ:
5528 case KVM_S390_MEMOP_LOGICAL_WRITE:
5529 r = kvm_s390_vcpu_mem_op(vcpu, mop);
5531 case KVM_S390_MEMOP_SIDA_READ:
5532 case KVM_S390_MEMOP_SIDA_WRITE:
5533 /* we are locked against sida going away by the vcpu->mutex */
5534 r = kvm_s390_vcpu_sida_op(vcpu, mop);
5540 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5544 long kvm_arch_vcpu_async_ioctl(struct file *filp,
5545 unsigned int ioctl, unsigned long arg)
5547 struct kvm_vcpu *vcpu = filp->private_data;
5548 void __user *argp = (void __user *)arg;
5552 case KVM_S390_IRQ: {
5553 struct kvm_s390_irq s390irq;
5555 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
5557 rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5560 case KVM_S390_INTERRUPT: {
5561 struct kvm_s390_interrupt s390int;
5562 struct kvm_s390_irq s390irq = {};
5564 if (copy_from_user(&s390int, argp, sizeof(s390int)))
5566 if (s390int_to_s390irq(&s390int, &s390irq))
5568 rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
5577 * To simplify single stepping of userspace-emulated instructions,
5578 * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
5579 * should_handle_per_ifetch()). However, if userspace emulation injects
5580 * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
5581 * after (and not before) the interrupt delivery.
5584 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
5589 static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
5590 struct kvm_pv_cmd *cmd)
5592 struct kvm_s390_pv_dmp dmp;
5596 /* Dump initialization is a prerequisite */
5597 if (!vcpu->kvm->arch.pv.dumping)
5600 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
5603 /* We only handle this subcmd right now */
5604 if (dmp.subcmd != KVM_PV_DUMP_CPU)
5607 /* CPU dump length is the same as create cpu storage donation. */
5608 if (dmp.buff_len != uv_info.guest_cpu_stor_len)
5611 data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
5615 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
5617 VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
5618 vcpu->vcpu_id, cmd->rc, cmd->rrc);
5623 /* On success copy over the dump data */
5624 if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
5631 long kvm_arch_vcpu_ioctl(struct file *filp,
5632 unsigned int ioctl, unsigned long arg)
5634 struct kvm_vcpu *vcpu = filp->private_data;
5635 void __user *argp = (void __user *)arg;
5643 case KVM_S390_STORE_STATUS:
5644 idx = srcu_read_lock(&vcpu->kvm->srcu);
5645 r = kvm_s390_store_status_unloaded(vcpu, arg);
5646 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5648 case KVM_S390_SET_INITIAL_PSW: {
5652 if (copy_from_user(&psw, argp, sizeof(psw)))
5654 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
5657 case KVM_S390_CLEAR_RESET:
5659 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
5660 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5661 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5662 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
5663 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
5667 case KVM_S390_INITIAL_RESET:
5669 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
5670 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5671 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5672 UVC_CMD_CPU_RESET_INITIAL,
5674 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
5678 case KVM_S390_NORMAL_RESET:
5680 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
5681 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
5682 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
5683 UVC_CMD_CPU_RESET, &rc, &rrc);
5684 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
5688 case KVM_SET_ONE_REG:
5689 case KVM_GET_ONE_REG: {
5690 struct kvm_one_reg reg;
5692 if (kvm_s390_pv_cpu_is_protected(vcpu))
5695 if (copy_from_user(®, argp, sizeof(reg)))
5697 if (ioctl == KVM_SET_ONE_REG)
5698 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
5700 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
5703 #ifdef CONFIG_KVM_S390_UCONTROL
5704 case KVM_S390_UCAS_MAP: {
5705 struct kvm_s390_ucas_mapping ucasmap;
5707 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5712 if (!kvm_is_ucontrol(vcpu->kvm)) {
5717 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
5718 ucasmap.vcpu_addr, ucasmap.length);
5721 case KVM_S390_UCAS_UNMAP: {
5722 struct kvm_s390_ucas_mapping ucasmap;
5724 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
5729 if (!kvm_is_ucontrol(vcpu->kvm)) {
5734 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
5739 case KVM_S390_VCPU_FAULT: {
5740 r = gmap_fault(vcpu->arch.gmap, arg, 0);
5743 case KVM_ENABLE_CAP:
5745 struct kvm_enable_cap cap;
5747 if (copy_from_user(&cap, argp, sizeof(cap)))
5749 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5752 case KVM_S390_MEM_OP: {
5753 struct kvm_s390_mem_op mem_op;
5755 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
5756 r = kvm_s390_vcpu_memsida_op(vcpu, &mem_op);
5761 case KVM_S390_SET_IRQ_STATE: {
5762 struct kvm_s390_irq_state irq_state;
5765 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5767 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
5768 irq_state.len == 0 ||
5769 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
5773 /* do not use irq_state.flags, it will break old QEMUs */
5774 r = kvm_s390_set_irq_state(vcpu,
5775 (void __user *) irq_state.buf,
5779 case KVM_S390_GET_IRQ_STATE: {
5780 struct kvm_s390_irq_state irq_state;
5783 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
5785 if (irq_state.len == 0) {
5789 /* do not use irq_state.flags, it will break old QEMUs */
5790 r = kvm_s390_get_irq_state(vcpu,
5791 (__u8 __user *) irq_state.buf,
5795 case KVM_S390_PV_CPU_COMMAND: {
5796 struct kvm_pv_cmd cmd;
5799 if (!is_prot_virt_host())
5803 if (copy_from_user(&cmd, argp, sizeof(cmd)))
5810 /* We only handle this cmd right now */
5811 if (cmd.cmd != KVM_PV_DUMP)
5814 r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
5816 /* Always copy over UV rc / rrc data */
5817 if (copy_to_user((__u8 __user *)argp, &cmd.rc,
5818 sizeof(cmd.rc) + sizeof(cmd.rrc)))
5830 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5832 #ifdef CONFIG_KVM_S390_UCONTROL
5833 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5834 && (kvm_is_ucontrol(vcpu->kvm))) {
5835 vmf->page = virt_to_page(vcpu->arch.sie_block);
5836 get_page(vmf->page);
5840 return VM_FAULT_SIGBUS;
5843 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
5848 /* Section: memory related */
5849 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5850 const struct kvm_memory_slot *old,
5851 struct kvm_memory_slot *new,
5852 enum kvm_mr_change change)
5856 if (kvm_is_ucontrol(kvm))
5859 /* When we are protected, we should not change the memory slots */
5860 if (kvm_s390_pv_get_handle(kvm))
5863 if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) {
5865 * A few sanity checks. We can have memory slots which have to be
5866 * located/ended at a segment boundary (1MB). The memory in userland is
5867 * ok to be fragmented into various different vmas. It is okay to mmap()
5868 * and munmap() stuff in this slot after doing this call at any time
5871 if (new->userspace_addr & 0xffffful)
5874 size = new->npages * PAGE_SIZE;
5875 if (size & 0xffffful)
5878 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
5882 if (!kvm->arch.migration_mode)
5886 * Turn off migration mode when:
5887 * - userspace creates a new memslot with dirty logging off,
5888 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and
5889 * dirty logging is turned off.
5890 * Migration mode expects dirty page logging being enabled to store
5893 if (change != KVM_MR_DELETE &&
5894 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
5895 WARN(kvm_s390_vm_stop_migration(kvm),
5896 "Failed to stop migration mode");
5901 void kvm_arch_commit_memory_region(struct kvm *kvm,
5902 struct kvm_memory_slot *old,
5903 const struct kvm_memory_slot *new,
5904 enum kvm_mr_change change)
5910 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5911 old->npages * PAGE_SIZE);
5914 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5915 old->npages * PAGE_SIZE);
5920 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5921 new->base_gfn * PAGE_SIZE,
5922 new->npages * PAGE_SIZE);
5924 case KVM_MR_FLAGS_ONLY:
5927 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5930 pr_warn("failed to commit memory region\n");
5934 static inline unsigned long nonhyp_mask(int i)
5936 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5938 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5941 static int __init kvm_s390_init(void)
5945 if (!sclp.has_sief2) {
5946 pr_info("SIE is not available\n");
5950 if (nested && hpage) {
5951 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
5955 for (i = 0; i < 16; i++)
5956 kvm_s390_fac_base[i] |=
5957 stfle_fac_list[i] & nonhyp_mask(i);
5959 r = __kvm_s390_init();
5963 r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
5971 static void __exit kvm_s390_exit(void)
5978 module_init(kvm_s390_init);
5979 module_exit(kvm_s390_exit);
5982 * Enable autoloading of the kvm module.
5983 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5984 * since x86 takes a different approach.
5986 #include <linux/miscdevice.h>
5987 MODULE_ALIAS_MISCDEV(KVM_MINOR);
5988 MODULE_ALIAS("devname:kvm");