2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/random.h>
28 #include <linux/slab.h>
29 #include <linux/timer.h>
30 #include <linux/vmalloc.h>
31 #include <linux/bitmap.h>
32 #include <linux/sched/signal.h>
33 #include <linux/string.h>
35 #include <asm/asm-offsets.h>
36 #include <asm/lowcore.h>
38 #include <asm/pgtable.h>
41 #include <asm/switch_to.h>
44 #include <asm/cpacf.h>
45 #include <asm/timex.h>
49 #define KMSG_COMPONENT "kvm-s390"
51 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
53 #define CREATE_TRACE_POINTS
55 #include "trace-s390.h"
57 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
59 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
62 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
64 struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "userspace_handled", VCPU_STAT(exit_userspace) },
66 { "exit_null", VCPU_STAT(exit_null) },
67 { "exit_validity", VCPU_STAT(exit_validity) },
68 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
69 { "exit_external_request", VCPU_STAT(exit_external_request) },
70 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
71 { "exit_instruction", VCPU_STAT(exit_instruction) },
72 { "exit_pei", VCPU_STAT(exit_pei) },
73 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
75 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
76 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
77 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
78 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
79 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
80 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
81 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
82 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
84 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
85 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
86 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
87 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
88 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
89 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
90 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
91 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
93 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
94 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
95 { "instruction_spx", VCPU_STAT(instruction_spx) },
96 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
97 { "instruction_stap", VCPU_STAT(instruction_stap) },
98 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
99 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
100 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
101 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
102 { "instruction_essa", VCPU_STAT(instruction_essa) },
103 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
104 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
105 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
106 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
107 { "instruction_sie", VCPU_STAT(instruction_sie) },
108 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
109 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
110 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
111 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
112 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
113 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
114 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
115 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
116 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
117 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
118 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
119 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
120 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
121 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
122 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
123 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
124 { "diagnose_10", VCPU_STAT(diagnose_10) },
125 { "diagnose_44", VCPU_STAT(diagnose_44) },
126 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
127 { "diagnose_258", VCPU_STAT(diagnose_258) },
128 { "diagnose_308", VCPU_STAT(diagnose_308) },
129 { "diagnose_500", VCPU_STAT(diagnose_500) },
133 /* allow nested virtualization in KVM (if enabled by user space) */
135 module_param(nested, int, S_IRUGO);
136 MODULE_PARM_DESC(nested, "Nested virtualization support");
138 /* upper facilities limit for kvm */
139 unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
141 unsigned long kvm_s390_fac_list_mask_size(void)
143 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
144 return ARRAY_SIZE(kvm_s390_fac_list_mask);
147 /* available cpu features supported by kvm */
148 static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
149 /* available subfunctions indicated via query / "test bit" */
150 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
152 static struct gmap_notifier gmap_notifier;
153 static struct gmap_notifier vsie_gmap_notifier;
154 debug_info_t *kvm_s390_dbf;
156 /* Section: not file related */
157 int kvm_arch_hardware_enable(void)
159 /* every s390 is virtualization enabled ;-) */
163 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
167 * This callback is executed during stop_machine(). All CPUs are therefore
168 * temporarily stopped. In order not to change guest behavior, we have to
169 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
170 * so a CPU won't be stopped while calculating with the epoch.
172 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
176 struct kvm_vcpu *vcpu;
178 unsigned long long *delta = v;
180 list_for_each_entry(kvm, &vm_list, vm_list) {
181 kvm->arch.epoch -= *delta;
182 kvm_for_each_vcpu(i, vcpu, kvm) {
183 vcpu->arch.sie_block->epoch -= *delta;
184 if (vcpu->arch.cputm_enabled)
185 vcpu->arch.cputm_start += *delta;
186 if (vcpu->arch.vsie_block)
187 vcpu->arch.vsie_block->epoch -= *delta;
193 static struct notifier_block kvm_clock_notifier = {
194 .notifier_call = kvm_clock_sync,
197 int kvm_arch_hardware_setup(void)
199 gmap_notifier.notifier_call = kvm_gmap_notifier;
200 gmap_register_pte_notifier(&gmap_notifier);
201 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
202 gmap_register_pte_notifier(&vsie_gmap_notifier);
203 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
208 void kvm_arch_hardware_unsetup(void)
210 gmap_unregister_pte_notifier(&gmap_notifier);
211 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
212 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
213 &kvm_clock_notifier);
216 static void allow_cpu_feat(unsigned long nr)
218 set_bit_inv(nr, kvm_s390_available_cpu_feat);
221 static inline int plo_test_bit(unsigned char nr)
223 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
227 /* Parameter registers are ignored for "test bit" */
237 static void kvm_s390_cpu_feat_init(void)
241 for (i = 0; i < 256; ++i) {
243 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
246 if (test_facility(28)) /* TOD-clock steering */
247 ptff(kvm_s390_available_subfunc.ptff,
248 sizeof(kvm_s390_available_subfunc.ptff),
251 if (test_facility(17)) { /* MSA */
252 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
253 kvm_s390_available_subfunc.kmac);
254 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
255 kvm_s390_available_subfunc.kmc);
256 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
257 kvm_s390_available_subfunc.km);
258 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
259 kvm_s390_available_subfunc.kimd);
260 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
261 kvm_s390_available_subfunc.klmd);
263 if (test_facility(76)) /* MSA3 */
264 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
265 kvm_s390_available_subfunc.pckmo);
266 if (test_facility(77)) { /* MSA4 */
267 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.kmctr);
269 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
270 kvm_s390_available_subfunc.kmf);
271 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
272 kvm_s390_available_subfunc.kmo);
273 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
274 kvm_s390_available_subfunc.pcc);
276 if (test_facility(57)) /* MSA5 */
277 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
278 kvm_s390_available_subfunc.ppno);
280 if (test_facility(146)) /* MSA8 */
281 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
282 kvm_s390_available_subfunc.kma);
284 if (MACHINE_HAS_ESOP)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
287 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
288 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
290 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
291 !test_facility(3) || !nested)
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
294 if (sclp.has_64bscao)
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
301 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
303 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
305 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
307 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
309 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
311 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
312 * all skey handling functions read/set the skey from the PGSTE
313 * instead of the real storage key.
315 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
316 * pages being detected as preserved although they are resident.
318 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
319 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
321 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
322 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
323 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
325 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
326 * cannot easily shadow the SCA because of the ipte lock.
330 int kvm_arch_init(void *opaque)
332 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
336 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
337 debug_unregister(kvm_s390_dbf);
341 kvm_s390_cpu_feat_init();
343 /* Register floating interrupt controller interface. */
344 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
347 void kvm_arch_exit(void)
349 debug_unregister(kvm_s390_dbf);
352 /* Section: device related */
353 long kvm_arch_dev_ioctl(struct file *filp,
354 unsigned int ioctl, unsigned long arg)
356 if (ioctl == KVM_S390_ENABLE_SIE)
357 return s390_enable_sie();
361 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
366 case KVM_CAP_S390_PSW:
367 case KVM_CAP_S390_GMAP:
368 case KVM_CAP_SYNC_MMU:
369 #ifdef CONFIG_KVM_S390_UCONTROL
370 case KVM_CAP_S390_UCONTROL:
372 case KVM_CAP_ASYNC_PF:
373 case KVM_CAP_SYNC_REGS:
374 case KVM_CAP_ONE_REG:
375 case KVM_CAP_ENABLE_CAP:
376 case KVM_CAP_S390_CSS_SUPPORT:
377 case KVM_CAP_IOEVENTFD:
378 case KVM_CAP_DEVICE_CTRL:
379 case KVM_CAP_ENABLE_CAP_VM:
380 case KVM_CAP_S390_IRQCHIP:
381 case KVM_CAP_VM_ATTRIBUTES:
382 case KVM_CAP_MP_STATE:
383 case KVM_CAP_IMMEDIATE_EXIT:
384 case KVM_CAP_S390_INJECT_IRQ:
385 case KVM_CAP_S390_USER_SIGP:
386 case KVM_CAP_S390_USER_STSI:
387 case KVM_CAP_S390_SKEYS:
388 case KVM_CAP_S390_IRQ_STATE:
389 case KVM_CAP_S390_USER_INSTR0:
390 case KVM_CAP_S390_CMMA_MIGRATION:
391 case KVM_CAP_S390_AIS:
394 case KVM_CAP_S390_MEM_OP:
397 case KVM_CAP_NR_VCPUS:
398 case KVM_CAP_MAX_VCPUS:
399 r = KVM_S390_BSCA_CPU_SLOTS;
400 if (!kvm_s390_use_sca_entries())
402 else if (sclp.has_esca && sclp.has_64bscao)
403 r = KVM_S390_ESCA_CPU_SLOTS;
405 case KVM_CAP_NR_MEMSLOTS:
406 r = KVM_USER_MEM_SLOTS;
408 case KVM_CAP_S390_COW:
409 r = MACHINE_HAS_ESOP;
411 case KVM_CAP_S390_VECTOR_REGISTERS:
414 case KVM_CAP_S390_RI:
415 r = test_facility(64);
417 case KVM_CAP_S390_GS:
418 r = test_facility(133);
426 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
427 struct kvm_memory_slot *memslot)
429 gfn_t cur_gfn, last_gfn;
430 unsigned long address;
431 struct gmap *gmap = kvm->arch.gmap;
433 /* Loop over all guest pages */
434 last_gfn = memslot->base_gfn + memslot->npages;
435 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
436 address = gfn_to_hva_memslot(memslot, cur_gfn);
438 if (test_and_clear_guest_dirty(gmap->mm, address))
439 mark_page_dirty(kvm, cur_gfn);
440 if (fatal_signal_pending(current))
446 /* Section: vm related */
447 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
450 * Get (and clear) the dirty memory log for a memory slot.
452 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
453 struct kvm_dirty_log *log)
457 struct kvm_memslots *slots;
458 struct kvm_memory_slot *memslot;
461 if (kvm_is_ucontrol(kvm))
464 mutex_lock(&kvm->slots_lock);
467 if (log->slot >= KVM_USER_MEM_SLOTS)
470 slots = kvm_memslots(kvm);
471 memslot = id_to_memslot(slots, log->slot);
473 if (!memslot->dirty_bitmap)
476 kvm_s390_sync_dirty_log(kvm, memslot);
477 r = kvm_get_dirty_log(kvm, log, &is_dirty);
481 /* Clear the dirty log */
483 n = kvm_dirty_bitmap_bytes(memslot);
484 memset(memslot->dirty_bitmap, 0, n);
488 mutex_unlock(&kvm->slots_lock);
492 static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
495 struct kvm_vcpu *vcpu;
497 kvm_for_each_vcpu(i, vcpu, kvm) {
498 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
502 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
510 case KVM_CAP_S390_IRQCHIP:
511 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
512 kvm->arch.use_irqchip = 1;
515 case KVM_CAP_S390_USER_SIGP:
516 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
517 kvm->arch.user_sigp = 1;
520 case KVM_CAP_S390_VECTOR_REGISTERS:
521 mutex_lock(&kvm->lock);
522 if (kvm->created_vcpus) {
524 } else if (MACHINE_HAS_VX) {
525 set_kvm_facility(kvm->arch.model.fac_mask, 129);
526 set_kvm_facility(kvm->arch.model.fac_list, 129);
527 if (test_facility(134)) {
528 set_kvm_facility(kvm->arch.model.fac_mask, 134);
529 set_kvm_facility(kvm->arch.model.fac_list, 134);
531 if (test_facility(135)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 135);
533 set_kvm_facility(kvm->arch.model.fac_list, 135);
538 mutex_unlock(&kvm->lock);
539 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
540 r ? "(not available)" : "(success)");
542 case KVM_CAP_S390_RI:
544 mutex_lock(&kvm->lock);
545 if (kvm->created_vcpus) {
547 } else if (test_facility(64)) {
548 set_kvm_facility(kvm->arch.model.fac_mask, 64);
549 set_kvm_facility(kvm->arch.model.fac_list, 64);
552 mutex_unlock(&kvm->lock);
553 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
554 r ? "(not available)" : "(success)");
556 case KVM_CAP_S390_AIS:
557 mutex_lock(&kvm->lock);
558 if (kvm->created_vcpus) {
561 set_kvm_facility(kvm->arch.model.fac_mask, 72);
562 set_kvm_facility(kvm->arch.model.fac_list, 72);
565 mutex_unlock(&kvm->lock);
566 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
567 r ? "(not available)" : "(success)");
569 case KVM_CAP_S390_GS:
571 mutex_lock(&kvm->lock);
572 if (atomic_read(&kvm->online_vcpus)) {
574 } else if (test_facility(133)) {
575 set_kvm_facility(kvm->arch.model.fac_mask, 133);
576 set_kvm_facility(kvm->arch.model.fac_list, 133);
579 mutex_unlock(&kvm->lock);
580 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
581 r ? "(not available)" : "(success)");
583 case KVM_CAP_S390_USER_STSI:
584 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
585 kvm->arch.user_stsi = 1;
588 case KVM_CAP_S390_USER_INSTR0:
589 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
590 kvm->arch.user_instr0 = 1;
591 icpt_operexc_on_all_vcpus(kvm);
601 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
605 switch (attr->attr) {
606 case KVM_S390_VM_MEM_LIMIT_SIZE:
608 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
609 kvm->arch.mem_limit);
610 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
620 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
624 switch (attr->attr) {
625 case KVM_S390_VM_MEM_ENABLE_CMMA:
631 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
632 mutex_lock(&kvm->lock);
633 if (!kvm->created_vcpus) {
634 kvm->arch.use_cmma = 1;
637 mutex_unlock(&kvm->lock);
639 case KVM_S390_VM_MEM_CLR_CMMA:
644 if (!kvm->arch.use_cmma)
647 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
648 mutex_lock(&kvm->lock);
649 idx = srcu_read_lock(&kvm->srcu);
650 s390_reset_cmma(kvm->arch.gmap->mm);
651 srcu_read_unlock(&kvm->srcu, idx);
652 mutex_unlock(&kvm->lock);
655 case KVM_S390_VM_MEM_LIMIT_SIZE: {
656 unsigned long new_limit;
658 if (kvm_is_ucontrol(kvm))
661 if (get_user(new_limit, (u64 __user *)attr->addr))
664 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
665 new_limit > kvm->arch.mem_limit)
671 /* gmap_create takes last usable address */
672 if (new_limit != KVM_S390_NO_MEM_LIMIT)
676 mutex_lock(&kvm->lock);
677 if (!kvm->created_vcpus) {
678 /* gmap_create will round the limit up */
679 struct gmap *new = gmap_create(current->mm, new_limit);
684 gmap_remove(kvm->arch.gmap);
686 kvm->arch.gmap = new;
690 mutex_unlock(&kvm->lock);
691 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
692 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
693 (void *) kvm->arch.gmap->asce);
703 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
705 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
707 struct kvm_vcpu *vcpu;
710 if (!test_kvm_facility(kvm, 76))
713 mutex_lock(&kvm->lock);
714 switch (attr->attr) {
715 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
717 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
718 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
719 kvm->arch.crypto.aes_kw = 1;
720 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
722 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
724 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
725 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
726 kvm->arch.crypto.dea_kw = 1;
727 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
729 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
730 kvm->arch.crypto.aes_kw = 0;
731 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
732 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
733 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
735 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
736 kvm->arch.crypto.dea_kw = 0;
737 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
738 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
739 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
742 mutex_unlock(&kvm->lock);
746 kvm_for_each_vcpu(i, vcpu, kvm) {
747 kvm_s390_vcpu_crypto_setup(vcpu);
750 mutex_unlock(&kvm->lock);
754 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
757 struct kvm_vcpu *vcpu;
759 kvm_for_each_vcpu(cx, vcpu, kvm)
760 kvm_s390_sync_request(req, vcpu);
764 * Must be called with kvm->srcu held to avoid races on memslots, and with
765 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
767 static int kvm_s390_vm_start_migration(struct kvm *kvm)
769 struct kvm_s390_migration_state *mgs;
770 struct kvm_memory_slot *ms;
771 /* should be the only one */
772 struct kvm_memslots *slots;
773 unsigned long ram_pages;
776 /* migration mode already enabled */
777 if (kvm->arch.migration_state)
780 slots = kvm_memslots(kvm);
781 if (!slots || !slots->used_slots)
784 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
787 kvm->arch.migration_state = mgs;
789 if (kvm->arch.use_cmma) {
791 * Get the last slot. They should be sorted by base_gfn, so the
792 * last slot is also the one at the end of the address space.
793 * We have verified above that at least one slot is present.
795 ms = slots->memslots + slots->used_slots - 1;
796 /* round up so we only use full longs */
797 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
798 /* allocate enough bytes to store all the bits */
799 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
800 if (!mgs->pgste_bitmap) {
802 kvm->arch.migration_state = NULL;
806 mgs->bitmap_size = ram_pages;
807 atomic64_set(&mgs->dirty_pages, ram_pages);
808 /* mark all the pages in active slots as dirty */
809 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
810 ms = slots->memslots + slotnr;
811 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
814 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
820 * Must be called with kvm->lock to avoid races with ourselves and
821 * kvm_s390_vm_start_migration.
823 static int kvm_s390_vm_stop_migration(struct kvm *kvm)
825 struct kvm_s390_migration_state *mgs;
827 /* migration mode already disabled */
828 if (!kvm->arch.migration_state)
830 mgs = kvm->arch.migration_state;
831 kvm->arch.migration_state = NULL;
833 if (kvm->arch.use_cmma) {
834 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
835 vfree(mgs->pgste_bitmap);
841 static int kvm_s390_vm_set_migration(struct kvm *kvm,
842 struct kvm_device_attr *attr)
844 int idx, res = -ENXIO;
846 mutex_lock(&kvm->lock);
847 switch (attr->attr) {
848 case KVM_S390_VM_MIGRATION_START:
849 idx = srcu_read_lock(&kvm->srcu);
850 res = kvm_s390_vm_start_migration(kvm);
851 srcu_read_unlock(&kvm->srcu, idx);
853 case KVM_S390_VM_MIGRATION_STOP:
854 res = kvm_s390_vm_stop_migration(kvm);
859 mutex_unlock(&kvm->lock);
864 static int kvm_s390_vm_get_migration(struct kvm *kvm,
865 struct kvm_device_attr *attr)
867 u64 mig = (kvm->arch.migration_state != NULL);
869 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
872 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
877 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
881 if (copy_from_user(>od_high, (void __user *)attr->addr,
887 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
892 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
896 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
899 kvm_s390_set_tod_clock(kvm, gtod);
900 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
904 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
911 switch (attr->attr) {
912 case KVM_S390_VM_TOD_HIGH:
913 ret = kvm_s390_set_tod_high(kvm, attr);
915 case KVM_S390_VM_TOD_LOW:
916 ret = kvm_s390_set_tod_low(kvm, attr);
925 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
929 if (copy_to_user((void __user *)attr->addr, >od_high,
932 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
937 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
941 gtod = kvm_s390_get_tod_clock_fast(kvm);
942 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
944 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
949 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
956 switch (attr->attr) {
957 case KVM_S390_VM_TOD_HIGH:
958 ret = kvm_s390_get_tod_high(kvm, attr);
960 case KVM_S390_VM_TOD_LOW:
961 ret = kvm_s390_get_tod_low(kvm, attr);
970 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
972 struct kvm_s390_vm_cpu_processor *proc;
973 u16 lowest_ibc, unblocked_ibc;
976 mutex_lock(&kvm->lock);
977 if (kvm->created_vcpus) {
981 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
986 if (!copy_from_user(proc, (void __user *)attr->addr,
988 kvm->arch.model.cpuid = proc->cpuid;
989 lowest_ibc = sclp.ibc >> 16 & 0xfff;
990 unblocked_ibc = sclp.ibc & 0xfff;
991 if (lowest_ibc && proc->ibc) {
992 if (proc->ibc > unblocked_ibc)
993 kvm->arch.model.ibc = unblocked_ibc;
994 else if (proc->ibc < lowest_ibc)
995 kvm->arch.model.ibc = lowest_ibc;
997 kvm->arch.model.ibc = proc->ibc;
999 memcpy(kvm->arch.model.fac_list, proc->fac_list,
1000 S390_ARCH_FAC_LIST_SIZE_BYTE);
1001 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1002 kvm->arch.model.ibc,
1003 kvm->arch.model.cpuid);
1004 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1005 kvm->arch.model.fac_list[0],
1006 kvm->arch.model.fac_list[1],
1007 kvm->arch.model.fac_list[2]);
1012 mutex_unlock(&kvm->lock);
1016 static int kvm_s390_set_processor_feat(struct kvm *kvm,
1017 struct kvm_device_attr *attr)
1019 struct kvm_s390_vm_cpu_feat data;
1022 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1024 if (!bitmap_subset((unsigned long *) data.feat,
1025 kvm_s390_available_cpu_feat,
1026 KVM_S390_VM_CPU_FEAT_NR_BITS))
1029 mutex_lock(&kvm->lock);
1030 if (!atomic_read(&kvm->online_vcpus)) {
1031 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1032 KVM_S390_VM_CPU_FEAT_NR_BITS);
1035 mutex_unlock(&kvm->lock);
1039 static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1040 struct kvm_device_attr *attr)
1043 * Once supported by kernel + hw, we have to store the subfunctions
1044 * in kvm->arch and remember that user space configured them.
1049 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1053 switch (attr->attr) {
1054 case KVM_S390_VM_CPU_PROCESSOR:
1055 ret = kvm_s390_set_processor(kvm, attr);
1057 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1058 ret = kvm_s390_set_processor_feat(kvm, attr);
1060 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1061 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1067 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1069 struct kvm_s390_vm_cpu_processor *proc;
1072 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1077 proc->cpuid = kvm->arch.model.cpuid;
1078 proc->ibc = kvm->arch.model.ibc;
1079 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1080 S390_ARCH_FAC_LIST_SIZE_BYTE);
1081 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1082 kvm->arch.model.ibc,
1083 kvm->arch.model.cpuid);
1084 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1085 kvm->arch.model.fac_list[0],
1086 kvm->arch.model.fac_list[1],
1087 kvm->arch.model.fac_list[2]);
1088 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1095 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1097 struct kvm_s390_vm_cpu_machine *mach;
1100 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1105 get_cpu_id((struct cpuid *) &mach->cpuid);
1106 mach->ibc = sclp.ibc;
1107 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
1108 S390_ARCH_FAC_LIST_SIZE_BYTE);
1109 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
1110 sizeof(S390_lowcore.stfle_fac_list));
1111 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1112 kvm->arch.model.ibc,
1113 kvm->arch.model.cpuid);
1114 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1118 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1122 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1129 static int kvm_s390_get_processor_feat(struct kvm *kvm,
1130 struct kvm_device_attr *attr)
1132 struct kvm_s390_vm_cpu_feat data;
1134 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1135 KVM_S390_VM_CPU_FEAT_NR_BITS);
1136 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1141 static int kvm_s390_get_machine_feat(struct kvm *kvm,
1142 struct kvm_device_attr *attr)
1144 struct kvm_s390_vm_cpu_feat data;
1146 bitmap_copy((unsigned long *) data.feat,
1147 kvm_s390_available_cpu_feat,
1148 KVM_S390_VM_CPU_FEAT_NR_BITS);
1149 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1154 static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1155 struct kvm_device_attr *attr)
1158 * Once we can actually configure subfunctions (kernel + hw support),
1159 * we have to check if they were already set by user space, if so copy
1160 * them from kvm->arch.
1165 static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1166 struct kvm_device_attr *attr)
1168 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1169 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1173 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1177 switch (attr->attr) {
1178 case KVM_S390_VM_CPU_PROCESSOR:
1179 ret = kvm_s390_get_processor(kvm, attr);
1181 case KVM_S390_VM_CPU_MACHINE:
1182 ret = kvm_s390_get_machine(kvm, attr);
1184 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1185 ret = kvm_s390_get_processor_feat(kvm, attr);
1187 case KVM_S390_VM_CPU_MACHINE_FEAT:
1188 ret = kvm_s390_get_machine_feat(kvm, attr);
1190 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1191 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1193 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1194 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1200 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1204 switch (attr->group) {
1205 case KVM_S390_VM_MEM_CTRL:
1206 ret = kvm_s390_set_mem_control(kvm, attr);
1208 case KVM_S390_VM_TOD:
1209 ret = kvm_s390_set_tod(kvm, attr);
1211 case KVM_S390_VM_CPU_MODEL:
1212 ret = kvm_s390_set_cpu_model(kvm, attr);
1214 case KVM_S390_VM_CRYPTO:
1215 ret = kvm_s390_vm_set_crypto(kvm, attr);
1217 case KVM_S390_VM_MIGRATION:
1218 ret = kvm_s390_vm_set_migration(kvm, attr);
1228 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1232 switch (attr->group) {
1233 case KVM_S390_VM_MEM_CTRL:
1234 ret = kvm_s390_get_mem_control(kvm, attr);
1236 case KVM_S390_VM_TOD:
1237 ret = kvm_s390_get_tod(kvm, attr);
1239 case KVM_S390_VM_CPU_MODEL:
1240 ret = kvm_s390_get_cpu_model(kvm, attr);
1242 case KVM_S390_VM_MIGRATION:
1243 ret = kvm_s390_vm_get_migration(kvm, attr);
1253 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1257 switch (attr->group) {
1258 case KVM_S390_VM_MEM_CTRL:
1259 switch (attr->attr) {
1260 case KVM_S390_VM_MEM_ENABLE_CMMA:
1261 case KVM_S390_VM_MEM_CLR_CMMA:
1262 ret = sclp.has_cmma ? 0 : -ENXIO;
1264 case KVM_S390_VM_MEM_LIMIT_SIZE:
1272 case KVM_S390_VM_TOD:
1273 switch (attr->attr) {
1274 case KVM_S390_VM_TOD_LOW:
1275 case KVM_S390_VM_TOD_HIGH:
1283 case KVM_S390_VM_CPU_MODEL:
1284 switch (attr->attr) {
1285 case KVM_S390_VM_CPU_PROCESSOR:
1286 case KVM_S390_VM_CPU_MACHINE:
1287 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1288 case KVM_S390_VM_CPU_MACHINE_FEAT:
1289 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1292 /* configuring subfunctions is not supported yet */
1293 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1299 case KVM_S390_VM_CRYPTO:
1300 switch (attr->attr) {
1301 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1302 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1303 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1304 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1312 case KVM_S390_VM_MIGRATION:
1323 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1327 int srcu_idx, i, r = 0;
1329 if (args->flags != 0)
1332 /* Is this guest using storage keys? */
1333 if (!mm_use_skey(current->mm))
1334 return KVM_S390_GET_SKEYS_NONE;
1336 /* Enforce sane limit on memory allocation */
1337 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1340 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1344 down_read(¤t->mm->mmap_sem);
1345 srcu_idx = srcu_read_lock(&kvm->srcu);
1346 for (i = 0; i < args->count; i++) {
1347 hva = gfn_to_hva(kvm, args->start_gfn + i);
1348 if (kvm_is_error_hva(hva)) {
1353 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1357 srcu_read_unlock(&kvm->srcu, srcu_idx);
1358 up_read(¤t->mm->mmap_sem);
1361 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1362 sizeof(uint8_t) * args->count);
1371 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1375 int srcu_idx, i, r = 0;
1377 if (args->flags != 0)
1380 /* Enforce sane limit on memory allocation */
1381 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1384 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
1388 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1389 sizeof(uint8_t) * args->count);
1395 /* Enable storage key handling for the guest */
1396 r = s390_enable_skey();
1400 down_read(¤t->mm->mmap_sem);
1401 srcu_idx = srcu_read_lock(&kvm->srcu);
1402 for (i = 0; i < args->count; i++) {
1403 hva = gfn_to_hva(kvm, args->start_gfn + i);
1404 if (kvm_is_error_hva(hva)) {
1409 /* Lowest order bit is reserved */
1410 if (keys[i] & 0x01) {
1415 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
1419 srcu_read_unlock(&kvm->srcu, srcu_idx);
1420 up_read(¤t->mm->mmap_sem);
1427 * Base address and length must be sent at the start of each block, therefore
1428 * it's cheaper to send some clean data, as long as it's less than the size of
1431 #define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1432 /* for consistency */
1433 #define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1436 * This function searches for the next page with dirty CMMA attributes, and
1437 * saves the attributes in the buffer up to either the end of the buffer or
1438 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1439 * no trailing clean bytes are saved.
1440 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1441 * output buffer will indicate 0 as length.
1443 static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1444 struct kvm_s390_cmma_log *args)
1446 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1447 unsigned long bufsize, hva, pgstev, i, next, cur;
1448 int srcu_idx, peek, r = 0, rr;
1451 cur = args->start_gfn;
1452 i = next = pgstev = 0;
1454 if (unlikely(!kvm->arch.use_cmma))
1456 /* Invalid/unsupported flags were specified */
1457 if (args->flags & ~KVM_S390_CMMA_PEEK)
1459 /* Migration mode query, and we are not doing a migration */
1460 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1463 /* CMMA is disabled or was not used, or the buffer has length zero */
1464 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1465 if (!bufsize || !kvm->mm->context.use_cmma) {
1466 memset(args, 0, sizeof(*args));
1471 /* We are not peeking, and there are no dirty pages */
1472 if (!atomic64_read(&s->dirty_pages)) {
1473 memset(args, 0, sizeof(*args));
1476 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1478 if (cur >= s->bitmap_size) /* nothing found, loop back */
1479 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1480 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1481 memset(args, 0, sizeof(*args));
1484 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1487 res = vmalloc(bufsize);
1491 args->start_gfn = cur;
1493 down_read(&kvm->mm->mmap_sem);
1494 srcu_idx = srcu_read_lock(&kvm->srcu);
1495 while (i < bufsize) {
1496 hva = gfn_to_hva(kvm, cur);
1497 if (kvm_is_error_hva(hva)) {
1501 /* decrement only if we actually flipped the bit to 0 */
1502 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1503 atomic64_dec(&s->dirty_pages);
1504 r = get_pgste(kvm->mm, hva, &pgstev);
1507 /* save the value */
1508 res[i++] = (pgstev >> 24) & 0x3;
1510 * if the next bit is too far away, stop.
1511 * if we reached the previous "next", find the next one
1514 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1517 next = find_next_bit(s->pgste_bitmap,
1518 s->bitmap_size, cur + 1);
1519 /* reached the end of the bitmap or of the buffer, stop */
1520 if ((next >= s->bitmap_size) ||
1521 (next >= args->start_gfn + bufsize))
1526 srcu_read_unlock(&kvm->srcu, srcu_idx);
1527 up_read(&kvm->mm->mmap_sem);
1529 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1531 rr = copy_to_user((void __user *)args->values, res, args->count);
1540 * This function sets the CMMA attributes for the given pages. If the input
1541 * buffer has zero length, no action is taken, otherwise the attributes are
1542 * set and the mm->context.use_cmma flag is set.
1544 static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1545 const struct kvm_s390_cmma_log *args)
1547 unsigned long hva, mask, pgstev, i;
1549 int srcu_idx, r = 0;
1553 if (!kvm->arch.use_cmma)
1555 /* invalid/unsupported flags */
1556 if (args->flags != 0)
1558 /* Enforce sane limit on memory allocation */
1559 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1562 if (args->count == 0)
1565 bits = vmalloc(sizeof(*bits) * args->count);
1569 r = copy_from_user(bits, (void __user *)args->values, args->count);
1575 down_read(&kvm->mm->mmap_sem);
1576 srcu_idx = srcu_read_lock(&kvm->srcu);
1577 for (i = 0; i < args->count; i++) {
1578 hva = gfn_to_hva(kvm, args->start_gfn + i);
1579 if (kvm_is_error_hva(hva)) {
1585 pgstev = pgstev << 24;
1586 mask &= _PGSTE_GPS_USAGE_MASK;
1587 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1589 srcu_read_unlock(&kvm->srcu, srcu_idx);
1590 up_read(&kvm->mm->mmap_sem);
1592 if (!kvm->mm->context.use_cmma) {
1593 down_write(&kvm->mm->mmap_sem);
1594 kvm->mm->context.use_cmma = 1;
1595 up_write(&kvm->mm->mmap_sem);
1602 long kvm_arch_vm_ioctl(struct file *filp,
1603 unsigned int ioctl, unsigned long arg)
1605 struct kvm *kvm = filp->private_data;
1606 void __user *argp = (void __user *)arg;
1607 struct kvm_device_attr attr;
1611 case KVM_S390_INTERRUPT: {
1612 struct kvm_s390_interrupt s390int;
1615 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1617 r = kvm_s390_inject_vm(kvm, &s390int);
1620 case KVM_ENABLE_CAP: {
1621 struct kvm_enable_cap cap;
1623 if (copy_from_user(&cap, argp, sizeof(cap)))
1625 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1628 case KVM_CREATE_IRQCHIP: {
1629 struct kvm_irq_routing_entry routing;
1632 if (kvm->arch.use_irqchip) {
1633 /* Set up dummy routing. */
1634 memset(&routing, 0, sizeof(routing));
1635 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
1639 case KVM_SET_DEVICE_ATTR: {
1641 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1643 r = kvm_s390_vm_set_attr(kvm, &attr);
1646 case KVM_GET_DEVICE_ATTR: {
1648 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1650 r = kvm_s390_vm_get_attr(kvm, &attr);
1653 case KVM_HAS_DEVICE_ATTR: {
1655 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1657 r = kvm_s390_vm_has_attr(kvm, &attr);
1660 case KVM_S390_GET_SKEYS: {
1661 struct kvm_s390_skeys args;
1664 if (copy_from_user(&args, argp,
1665 sizeof(struct kvm_s390_skeys)))
1667 r = kvm_s390_get_skeys(kvm, &args);
1670 case KVM_S390_SET_SKEYS: {
1671 struct kvm_s390_skeys args;
1674 if (copy_from_user(&args, argp,
1675 sizeof(struct kvm_s390_skeys)))
1677 r = kvm_s390_set_skeys(kvm, &args);
1680 case KVM_S390_GET_CMMA_BITS: {
1681 struct kvm_s390_cmma_log args;
1684 if (copy_from_user(&args, argp, sizeof(args)))
1686 r = kvm_s390_get_cmma_bits(kvm, &args);
1688 r = copy_to_user(argp, &args, sizeof(args));
1694 case KVM_S390_SET_CMMA_BITS: {
1695 struct kvm_s390_cmma_log args;
1698 if (copy_from_user(&args, argp, sizeof(args)))
1700 r = kvm_s390_set_cmma_bits(kvm, &args);
1710 static int kvm_s390_query_ap_config(u8 *config)
1712 u32 fcn_code = 0x04000000UL;
1715 memset(config, 0, 128);
1719 ".long 0xb2af0000\n" /* PQAP(QCI) */
1725 : "r" (fcn_code), "r" (config)
1726 : "cc", "0", "2", "memory"
1732 static int kvm_s390_apxa_installed(void)
1737 if (test_facility(12)) {
1738 cc = kvm_s390_query_ap_config(config);
1741 pr_err("PQAP(QCI) failed with cc=%d", cc);
1743 return config[0] & 0x40;
1749 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1751 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1753 if (kvm_s390_apxa_installed())
1754 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1756 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1759 static u64 kvm_s390_get_initial_cpuid(void)
1764 cpuid.version = 0xff;
1765 return *((u64 *) &cpuid);
1768 static void kvm_s390_crypto_init(struct kvm *kvm)
1770 if (!test_kvm_facility(kvm, 76))
1773 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
1774 kvm_s390_set_crycb_format(kvm);
1776 /* Enable AES/DEA protected key functions by default */
1777 kvm->arch.crypto.aes_kw = 1;
1778 kvm->arch.crypto.dea_kw = 1;
1779 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1780 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1781 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1782 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1785 static void sca_dispose(struct kvm *kvm)
1787 if (kvm->arch.use_esca)
1788 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1790 free_page((unsigned long)(kvm->arch.sca));
1791 kvm->arch.sca = NULL;
1794 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1796 gfp_t alloc_flags = GFP_KERNEL;
1798 char debug_name[16];
1799 static unsigned long sca_offset;
1802 #ifdef CONFIG_KVM_S390_UCONTROL
1803 if (type & ~KVM_VM_S390_UCONTROL)
1805 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1812 rc = s390_enable_sie();
1818 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1820 kvm->arch.use_esca = 0; /* start with basic SCA */
1821 if (!sclp.has_64bscao)
1822 alloc_flags |= GFP_DMA;
1823 rwlock_init(&kvm->arch.sca_lock);
1824 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
1827 spin_lock(&kvm_lock);
1829 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1831 kvm->arch.sca = (struct bsca_block *)
1832 ((char *) kvm->arch.sca + sca_offset);
1833 spin_unlock(&kvm_lock);
1835 sprintf(debug_name, "kvm-%u", current->pid);
1837 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1841 kvm->arch.sie_page2 =
1842 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1843 if (!kvm->arch.sie_page2)
1846 /* Populate the facility mask initially. */
1847 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1848 sizeof(S390_lowcore.stfle_fac_list));
1849 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1850 if (i < kvm_s390_fac_list_mask_size())
1851 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
1853 kvm->arch.model.fac_mask[i] = 0UL;
1856 /* Populate the facility list initially. */
1857 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1858 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
1859 S390_ARCH_FAC_LIST_SIZE_BYTE);
1861 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1862 set_kvm_facility(kvm->arch.model.fac_list, 74);
1864 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
1865 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1867 kvm_s390_crypto_init(kvm);
1869 mutex_init(&kvm->arch.float_int.ais_lock);
1870 kvm->arch.float_int.simm = 0;
1871 kvm->arch.float_int.nimm = 0;
1872 spin_lock_init(&kvm->arch.float_int.lock);
1873 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1874 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1875 init_waitqueue_head(&kvm->arch.ipte_wq);
1876 mutex_init(&kvm->arch.ipte_mutex);
1878 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1879 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1881 if (type & KVM_VM_S390_UCONTROL) {
1882 kvm->arch.gmap = NULL;
1883 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1885 if (sclp.hamax == U64_MAX)
1886 kvm->arch.mem_limit = TASK_SIZE_MAX;
1888 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
1890 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
1891 if (!kvm->arch.gmap)
1893 kvm->arch.gmap->private = kvm;
1894 kvm->arch.gmap->pfault_enabled = 0;
1897 kvm->arch.css_support = 0;
1898 kvm->arch.use_irqchip = 0;
1899 kvm->arch.epoch = 0;
1901 spin_lock_init(&kvm->arch.start_stop_lock);
1902 kvm_s390_vsie_init(kvm);
1903 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1907 free_page((unsigned long)kvm->arch.sie_page2);
1908 debug_unregister(kvm->arch.dbf);
1910 KVM_EVENT(3, "creation of vm failed: %d", rc);
1914 bool kvm_arch_has_vcpu_debugfs(void)
1919 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1924 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1926 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1927 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1928 kvm_s390_clear_local_irqs(vcpu);
1929 kvm_clear_async_pf_completion_queue(vcpu);
1930 if (!kvm_is_ucontrol(vcpu->kvm))
1933 if (kvm_is_ucontrol(vcpu->kvm))
1934 gmap_remove(vcpu->arch.gmap);
1936 if (vcpu->kvm->arch.use_cmma)
1937 kvm_s390_vcpu_unsetup_cmma(vcpu);
1938 free_page((unsigned long)(vcpu->arch.sie_block));
1940 kvm_vcpu_uninit(vcpu);
1941 kmem_cache_free(kvm_vcpu_cache, vcpu);
1944 static void kvm_free_vcpus(struct kvm *kvm)
1947 struct kvm_vcpu *vcpu;
1949 kvm_for_each_vcpu(i, vcpu, kvm)
1950 kvm_arch_vcpu_destroy(vcpu);
1952 mutex_lock(&kvm->lock);
1953 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1954 kvm->vcpus[i] = NULL;
1956 atomic_set(&kvm->online_vcpus, 0);
1957 mutex_unlock(&kvm->lock);
1960 void kvm_arch_destroy_vm(struct kvm *kvm)
1962 kvm_free_vcpus(kvm);
1964 debug_unregister(kvm->arch.dbf);
1965 free_page((unsigned long)kvm->arch.sie_page2);
1966 if (!kvm_is_ucontrol(kvm))
1967 gmap_remove(kvm->arch.gmap);
1968 kvm_s390_destroy_adapters(kvm);
1969 kvm_s390_clear_float_irqs(kvm);
1970 kvm_s390_vsie_destroy(kvm);
1971 if (kvm->arch.migration_state) {
1972 vfree(kvm->arch.migration_state->pgste_bitmap);
1973 kfree(kvm->arch.migration_state);
1975 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1978 /* Section: vcpu related */
1979 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1981 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
1982 if (!vcpu->arch.gmap)
1984 vcpu->arch.gmap->private = vcpu->kvm;
1989 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1991 if (!kvm_s390_use_sca_entries())
1993 read_lock(&vcpu->kvm->arch.sca_lock);
1994 if (vcpu->kvm->arch.use_esca) {
1995 struct esca_block *sca = vcpu->kvm->arch.sca;
1997 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1998 sca->cpu[vcpu->vcpu_id].sda = 0;
2000 struct bsca_block *sca = vcpu->kvm->arch.sca;
2002 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2003 sca->cpu[vcpu->vcpu_id].sda = 0;
2005 read_unlock(&vcpu->kvm->arch.sca_lock);
2008 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
2010 if (!kvm_s390_use_sca_entries()) {
2011 struct bsca_block *sca = vcpu->kvm->arch.sca;
2013 /* we still need the basic sca for the ipte control */
2014 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2015 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2017 read_lock(&vcpu->kvm->arch.sca_lock);
2018 if (vcpu->kvm->arch.use_esca) {
2019 struct esca_block *sca = vcpu->kvm->arch.sca;
2021 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2022 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2023 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
2024 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2025 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
2027 struct bsca_block *sca = vcpu->kvm->arch.sca;
2029 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
2030 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2031 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2032 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
2034 read_unlock(&vcpu->kvm->arch.sca_lock);
2037 /* Basic SCA to Extended SCA data copy routines */
2038 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2041 d->sigp_ctrl.c = s->sigp_ctrl.c;
2042 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2045 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2049 d->ipte_control = s->ipte_control;
2051 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2052 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2055 static int sca_switch_to_extended(struct kvm *kvm)
2057 struct bsca_block *old_sca = kvm->arch.sca;
2058 struct esca_block *new_sca;
2059 struct kvm_vcpu *vcpu;
2060 unsigned int vcpu_idx;
2063 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2067 scaoh = (u32)((u64)(new_sca) >> 32);
2068 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2070 kvm_s390_vcpu_block_all(kvm);
2071 write_lock(&kvm->arch.sca_lock);
2073 sca_copy_b_to_e(new_sca, old_sca);
2075 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2076 vcpu->arch.sie_block->scaoh = scaoh;
2077 vcpu->arch.sie_block->scaol = scaol;
2078 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
2080 kvm->arch.sca = new_sca;
2081 kvm->arch.use_esca = 1;
2083 write_unlock(&kvm->arch.sca_lock);
2084 kvm_s390_vcpu_unblock_all(kvm);
2086 free_page((unsigned long)old_sca);
2088 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2089 old_sca, kvm->arch.sca);
2093 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2097 if (!kvm_s390_use_sca_entries()) {
2098 if (id < KVM_MAX_VCPUS)
2102 if (id < KVM_S390_BSCA_CPU_SLOTS)
2104 if (!sclp.has_esca || !sclp.has_64bscao)
2107 mutex_lock(&kvm->lock);
2108 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2109 mutex_unlock(&kvm->lock);
2111 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
2114 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2116 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2117 kvm_clear_async_pf_completion_queue(vcpu);
2118 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2124 kvm_s390_set_prefix(vcpu, 0);
2125 if (test_kvm_facility(vcpu->kvm, 64))
2126 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
2127 if (test_kvm_facility(vcpu->kvm, 133))
2128 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
2129 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2130 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2133 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
2135 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
2137 if (kvm_is_ucontrol(vcpu->kvm))
2138 return __kvm_ucontrol_vcpu_init(vcpu);
2143 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2144 static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2146 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
2147 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2148 vcpu->arch.cputm_start = get_tod_clock_fast();
2149 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2152 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2153 static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2155 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
2156 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2157 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2158 vcpu->arch.cputm_start = 0;
2159 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2162 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2163 static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2165 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2166 vcpu->arch.cputm_enabled = true;
2167 __start_cpu_timer_accounting(vcpu);
2170 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2171 static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2173 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2174 __stop_cpu_timer_accounting(vcpu);
2175 vcpu->arch.cputm_enabled = false;
2178 static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2180 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2181 __enable_cpu_timer_accounting(vcpu);
2185 static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2187 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2188 __disable_cpu_timer_accounting(vcpu);
2192 /* set the cpu timer - may only be called from the VCPU thread itself */
2193 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2195 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2196 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
2197 if (vcpu->arch.cputm_enabled)
2198 vcpu->arch.cputm_start = get_tod_clock_fast();
2199 vcpu->arch.sie_block->cputm = cputm;
2200 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
2204 /* update and get the cpu timer - can also be called from other VCPU threads */
2205 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2210 if (unlikely(!vcpu->arch.cputm_enabled))
2211 return vcpu->arch.sie_block->cputm;
2213 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2215 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2217 * If the writer would ever execute a read in the critical
2218 * section, e.g. in irq context, we have a deadlock.
2220 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2221 value = vcpu->arch.sie_block->cputm;
2222 /* if cputm_start is 0, accounting is being started/stopped */
2223 if (likely(vcpu->arch.cputm_start))
2224 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2225 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2230 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2233 gmap_enable(vcpu->arch.enabled_gmap);
2234 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
2235 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2236 __start_cpu_timer_accounting(vcpu);
2240 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2243 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2244 __stop_cpu_timer_accounting(vcpu);
2245 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
2246 vcpu->arch.enabled_gmap = gmap_get_enabled();
2247 gmap_disable(vcpu->arch.enabled_gmap);
2251 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2253 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2254 vcpu->arch.sie_block->gpsw.mask = 0UL;
2255 vcpu->arch.sie_block->gpsw.addr = 0UL;
2256 kvm_s390_set_prefix(vcpu, 0);
2257 kvm_s390_set_cpu_timer(vcpu, 0);
2258 vcpu->arch.sie_block->ckc = 0UL;
2259 vcpu->arch.sie_block->todpr = 0;
2260 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2261 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2262 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
2263 /* make sure the new fpc will be lazily loaded */
2265 current->thread.fpu.fpc = 0;
2266 vcpu->arch.sie_block->gbea = 1;
2267 vcpu->arch.sie_block->pp = 0;
2268 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2269 kvm_clear_async_pf_completion_queue(vcpu);
2270 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2271 kvm_s390_vcpu_stop(vcpu);
2272 kvm_s390_clear_local_irqs(vcpu);
2275 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2277 mutex_lock(&vcpu->kvm->lock);
2279 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
2281 mutex_unlock(&vcpu->kvm->lock);
2282 if (!kvm_is_ucontrol(vcpu->kvm)) {
2283 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
2286 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2287 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2288 /* make vcpu_load load the right gmap on the first trigger */
2289 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
2292 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2294 if (!test_kvm_facility(vcpu->kvm, 76))
2297 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2299 if (vcpu->kvm->arch.crypto.aes_kw)
2300 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2301 if (vcpu->kvm->arch.crypto.dea_kw)
2302 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2304 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2307 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2309 free_page(vcpu->arch.sie_block->cbrlo);
2310 vcpu->arch.sie_block->cbrlo = 0;
2313 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2315 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2316 if (!vcpu->arch.sie_block->cbrlo)
2319 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
2323 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2325 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2327 vcpu->arch.sie_block->ibc = model->ibc;
2328 if (test_kvm_facility(vcpu->kvm, 7))
2329 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
2332 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2336 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2340 if (test_kvm_facility(vcpu->kvm, 78))
2341 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
2342 else if (test_kvm_facility(vcpu->kvm, 8))
2343 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
2345 kvm_s390_vcpu_setup_model(vcpu);
2347 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2348 if (MACHINE_HAS_ESOP)
2349 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
2350 if (test_kvm_facility(vcpu->kvm, 9))
2351 vcpu->arch.sie_block->ecb |= ECB_SRSI;
2352 if (test_kvm_facility(vcpu->kvm, 73))
2353 vcpu->arch.sie_block->ecb |= ECB_TE;
2355 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
2356 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
2357 if (test_kvm_facility(vcpu->kvm, 130))
2358 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2359 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
2361 vcpu->arch.sie_block->eca |= ECA_CEI;
2363 vcpu->arch.sie_block->eca |= ECA_IB;
2365 vcpu->arch.sie_block->eca |= ECA_SII;
2366 if (sclp.has_sigpif)
2367 vcpu->arch.sie_block->eca |= ECA_SIGPI;
2368 if (test_kvm_facility(vcpu->kvm, 129)) {
2369 vcpu->arch.sie_block->eca |= ECA_VX;
2370 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
2372 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2374 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
2377 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2379 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
2381 if (vcpu->kvm->arch.use_cmma) {
2382 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2386 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2387 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
2389 kvm_s390_vcpu_crypto_setup(vcpu);
2394 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2397 struct kvm_vcpu *vcpu;
2398 struct sie_page *sie_page;
2401 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
2406 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
2410 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
2411 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2415 vcpu->arch.sie_block = &sie_page->sie_block;
2416 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2418 /* the real guest size will always be smaller than msl */
2419 vcpu->arch.sie_block->mso = 0;
2420 vcpu->arch.sie_block->msl = sclp.hamax;
2422 vcpu->arch.sie_block->icpua = id;
2423 spin_lock_init(&vcpu->arch.local_int.lock);
2424 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
2425 vcpu->arch.local_int.wq = &vcpu->wq;
2426 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
2427 seqcount_init(&vcpu->arch.cputm_seqcount);
2429 rc = kvm_vcpu_init(vcpu, kvm, id);
2431 goto out_free_sie_block;
2432 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
2433 vcpu->arch.sie_block);
2434 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
2438 free_page((unsigned long)(vcpu->arch.sie_block));
2440 kmem_cache_free(kvm_vcpu_cache, vcpu);
2445 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2447 return kvm_s390_vcpu_has_irq(vcpu, 0);
2450 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
2452 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
2456 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
2458 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
2461 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2463 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
2467 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2469 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
2473 * Kick a guest cpu out of SIE and wait until SIE is not running.
2474 * If the CPU is not running (e.g. waiting as idle) the function will
2475 * return immediately. */
2476 void exit_sie(struct kvm_vcpu *vcpu)
2478 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
2479 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2483 /* Kick a guest cpu out of SIE to process a request synchronously */
2484 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
2486 kvm_make_request(req, vcpu);
2487 kvm_s390_vcpu_request(vcpu);
2490 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2493 struct kvm *kvm = gmap->private;
2494 struct kvm_vcpu *vcpu;
2495 unsigned long prefix;
2498 if (gmap_is_shadow(gmap))
2500 if (start >= 1UL << 31)
2501 /* We are only interested in prefix pages */
2503 kvm_for_each_vcpu(i, vcpu, kvm) {
2504 /* match against both prefix pages */
2505 prefix = kvm_s390_get_prefix(vcpu);
2506 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2507 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2509 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2514 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2516 /* kvm common code refers to this, but never calls it */
2521 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2522 struct kvm_one_reg *reg)
2527 case KVM_REG_S390_TODPR:
2528 r = put_user(vcpu->arch.sie_block->todpr,
2529 (u32 __user *)reg->addr);
2531 case KVM_REG_S390_EPOCHDIFF:
2532 r = put_user(vcpu->arch.sie_block->epoch,
2533 (u64 __user *)reg->addr);
2535 case KVM_REG_S390_CPU_TIMER:
2536 r = put_user(kvm_s390_get_cpu_timer(vcpu),
2537 (u64 __user *)reg->addr);
2539 case KVM_REG_S390_CLOCK_COMP:
2540 r = put_user(vcpu->arch.sie_block->ckc,
2541 (u64 __user *)reg->addr);
2543 case KVM_REG_S390_PFTOKEN:
2544 r = put_user(vcpu->arch.pfault_token,
2545 (u64 __user *)reg->addr);
2547 case KVM_REG_S390_PFCOMPARE:
2548 r = put_user(vcpu->arch.pfault_compare,
2549 (u64 __user *)reg->addr);
2551 case KVM_REG_S390_PFSELECT:
2552 r = put_user(vcpu->arch.pfault_select,
2553 (u64 __user *)reg->addr);
2555 case KVM_REG_S390_PP:
2556 r = put_user(vcpu->arch.sie_block->pp,
2557 (u64 __user *)reg->addr);
2559 case KVM_REG_S390_GBEA:
2560 r = put_user(vcpu->arch.sie_block->gbea,
2561 (u64 __user *)reg->addr);
2570 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2571 struct kvm_one_reg *reg)
2577 case KVM_REG_S390_TODPR:
2578 r = get_user(vcpu->arch.sie_block->todpr,
2579 (u32 __user *)reg->addr);
2581 case KVM_REG_S390_EPOCHDIFF:
2582 r = get_user(vcpu->arch.sie_block->epoch,
2583 (u64 __user *)reg->addr);
2585 case KVM_REG_S390_CPU_TIMER:
2586 r = get_user(val, (u64 __user *)reg->addr);
2588 kvm_s390_set_cpu_timer(vcpu, val);
2590 case KVM_REG_S390_CLOCK_COMP:
2591 r = get_user(vcpu->arch.sie_block->ckc,
2592 (u64 __user *)reg->addr);
2594 case KVM_REG_S390_PFTOKEN:
2595 r = get_user(vcpu->arch.pfault_token,
2596 (u64 __user *)reg->addr);
2597 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2598 kvm_clear_async_pf_completion_queue(vcpu);
2600 case KVM_REG_S390_PFCOMPARE:
2601 r = get_user(vcpu->arch.pfault_compare,
2602 (u64 __user *)reg->addr);
2604 case KVM_REG_S390_PFSELECT:
2605 r = get_user(vcpu->arch.pfault_select,
2606 (u64 __user *)reg->addr);
2608 case KVM_REG_S390_PP:
2609 r = get_user(vcpu->arch.sie_block->pp,
2610 (u64 __user *)reg->addr);
2612 case KVM_REG_S390_GBEA:
2613 r = get_user(vcpu->arch.sie_block->gbea,
2614 (u64 __user *)reg->addr);
2623 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2625 kvm_s390_vcpu_initial_reset(vcpu);
2629 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2631 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
2635 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2637 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
2641 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2642 struct kvm_sregs *sregs)
2644 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
2645 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
2649 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2650 struct kvm_sregs *sregs)
2652 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
2653 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
2657 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2659 if (test_fp_ctl(fpu->fpc))
2661 vcpu->run->s.regs.fpc = fpu->fpc;
2663 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2664 (freg_t *) fpu->fprs);
2666 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
2670 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2672 /* make sure we have the latest values */
2675 convert_vx_to_fp((freg_t *) fpu->fprs,
2676 (__vector128 *) vcpu->run->s.regs.vrs);
2678 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
2679 fpu->fpc = vcpu->run->s.regs.fpc;
2683 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2687 if (!is_vcpu_stopped(vcpu))
2690 vcpu->run->psw_mask = psw.mask;
2691 vcpu->run->psw_addr = psw.addr;
2696 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2697 struct kvm_translation *tr)
2699 return -EINVAL; /* not implemented yet */
2702 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2703 KVM_GUESTDBG_USE_HW_BP | \
2704 KVM_GUESTDBG_ENABLE)
2706 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2707 struct kvm_guest_debug *dbg)
2711 vcpu->guest_debug = 0;
2712 kvm_s390_clear_bp_data(vcpu);
2714 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
2716 if (!sclp.has_gpere)
2719 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2720 vcpu->guest_debug = dbg->control;
2721 /* enforce guest PER */
2722 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2724 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2725 rc = kvm_s390_import_bp_data(vcpu, dbg);
2727 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2728 vcpu->arch.guestdbg.last_bp = 0;
2732 vcpu->guest_debug = 0;
2733 kvm_s390_clear_bp_data(vcpu);
2734 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2740 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2741 struct kvm_mp_state *mp_state)
2743 /* CHECK_STOP and LOAD are not supported yet */
2744 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2745 KVM_MP_STATE_OPERATING;
2748 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2749 struct kvm_mp_state *mp_state)
2753 /* user space knows about this interface - let it control the state */
2754 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2756 switch (mp_state->mp_state) {
2757 case KVM_MP_STATE_STOPPED:
2758 kvm_s390_vcpu_stop(vcpu);
2760 case KVM_MP_STATE_OPERATING:
2761 kvm_s390_vcpu_start(vcpu);
2763 case KVM_MP_STATE_LOAD:
2764 case KVM_MP_STATE_CHECK_STOP:
2765 /* fall through - CHECK_STOP and LOAD are not supported yet */
2773 static bool ibs_enabled(struct kvm_vcpu *vcpu)
2775 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2778 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2781 kvm_s390_vcpu_request_handled(vcpu);
2782 if (!kvm_request_pending(vcpu))
2785 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2786 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2787 * This ensures that the ipte instruction for this request has
2788 * already finished. We might race against a second unmapper that
2789 * wants to set the blocking bit. Lets just retry the request loop.
2791 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2793 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2794 kvm_s390_get_prefix(vcpu),
2795 PAGE_SIZE * 2, PROT_WRITE);
2797 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2803 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2804 vcpu->arch.sie_block->ihcpu = 0xffff;
2808 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2809 if (!ibs_enabled(vcpu)) {
2810 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2811 atomic_or(CPUSTAT_IBS,
2812 &vcpu->arch.sie_block->cpuflags);
2817 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2818 if (ibs_enabled(vcpu)) {
2819 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2820 atomic_andnot(CPUSTAT_IBS,
2821 &vcpu->arch.sie_block->cpuflags);
2826 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2827 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2831 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2833 * Disable CMMA virtualization; we will emulate the ESSA
2834 * instruction manually, in order to provide additional
2835 * functionalities needed for live migration.
2837 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2841 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2843 * Re-enable CMMA virtualization if CMMA is available and
2846 if ((vcpu->kvm->arch.use_cmma) &&
2847 (vcpu->kvm->mm->context.use_cmma))
2848 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2852 /* nothing to do, just clear the request */
2853 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
2858 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2860 struct kvm_vcpu *vcpu;
2863 mutex_lock(&kvm->lock);
2865 kvm->arch.epoch = tod - get_tod_clock();
2866 kvm_s390_vcpu_block_all(kvm);
2867 kvm_for_each_vcpu(i, vcpu, kvm)
2868 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2869 kvm_s390_vcpu_unblock_all(kvm);
2871 mutex_unlock(&kvm->lock);
2875 * kvm_arch_fault_in_page - fault-in guest page if necessary
2876 * @vcpu: The corresponding virtual cpu
2877 * @gpa: Guest physical address
2878 * @writable: Whether the page should be writable or not
2880 * Make sure that a guest page has been faulted-in on the host.
2882 * Return: Zero on success, negative error code otherwise.
2884 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2886 return gmap_fault(vcpu->arch.gmap, gpa,
2887 writable ? FAULT_FLAG_WRITE : 0);
2890 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2891 unsigned long token)
2893 struct kvm_s390_interrupt inti;
2894 struct kvm_s390_irq irq;
2897 irq.u.ext.ext_params2 = token;
2898 irq.type = KVM_S390_INT_PFAULT_INIT;
2899 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2901 inti.type = KVM_S390_INT_PFAULT_DONE;
2902 inti.parm64 = token;
2903 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2907 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2908 struct kvm_async_pf *work)
2910 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2911 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2914 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2915 struct kvm_async_pf *work)
2917 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2918 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2921 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2922 struct kvm_async_pf *work)
2924 /* s390 will always inject the page directly */
2927 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2930 * s390 will always inject the page directly,
2931 * but we still want check_async_completion to cleanup
2936 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2939 struct kvm_arch_async_pf arch;
2942 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2944 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2945 vcpu->arch.pfault_compare)
2947 if (psw_extint_disabled(vcpu))
2949 if (kvm_s390_vcpu_has_irq(vcpu, 0))
2951 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2953 if (!vcpu->arch.gmap->pfault_enabled)
2956 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2957 hva += current->thread.gmap_addr & ~PAGE_MASK;
2958 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2961 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2965 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2970 * On s390 notifications for arriving pages will be delivered directly
2971 * to the guest but the house keeping for completed pfaults is
2972 * handled outside the worker.
2974 kvm_check_async_pf_completion(vcpu);
2976 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2977 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2982 if (test_cpu_flag(CIF_MCCK_PENDING))
2985 if (!kvm_is_ucontrol(vcpu->kvm)) {
2986 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2991 rc = kvm_s390_handle_requests(vcpu);
2995 if (guestdbg_enabled(vcpu)) {
2996 kvm_s390_backup_guest_per_regs(vcpu);
2997 kvm_s390_patch_guest_per_regs(vcpu);
3000 vcpu->arch.sie_block->icptcode = 0;
3001 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3002 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3003 trace_kvm_s390_sie_enter(vcpu, cpuflags);
3008 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3010 struct kvm_s390_pgm_info pgm_info = {
3011 .code = PGM_ADDRESSING,
3016 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3017 trace_kvm_s390_sie_fault(vcpu);
3020 * We want to inject an addressing exception, which is defined as a
3021 * suppressing or terminating exception. However, since we came here
3022 * by a DAT access exception, the PSW still points to the faulting
3023 * instruction since DAT exceptions are nullifying. So we've got
3024 * to look up the current opcode to get the length of the instruction
3025 * to be able to forward the PSW.
3027 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
3028 ilen = insn_length(opcode);
3032 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3033 * Forward by arbitrary ilc, injection will take care of
3034 * nullification if necessary.
3036 pgm_info = vcpu->arch.pgm;
3039 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3040 kvm_s390_forward_psw(vcpu, ilen);
3041 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
3044 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3046 struct mcck_volatile_info *mcck_info;
3047 struct sie_page *sie_page;
3049 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3050 vcpu->arch.sie_block->icptcode);
3051 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3053 if (guestdbg_enabled(vcpu))
3054 kvm_s390_restore_guest_per_regs(vcpu);
3056 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3057 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
3059 if (exit_reason == -EINTR) {
3060 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3061 sie_page = container_of(vcpu->arch.sie_block,
3062 struct sie_page, sie_block);
3063 mcck_info = &sie_page->mcck_info;
3064 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3068 if (vcpu->arch.sie_block->icptcode > 0) {
3069 int rc = kvm_handle_sie_intercept(vcpu);
3071 if (rc != -EOPNOTSUPP)
3073 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3074 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3075 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3076 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3078 } else if (exit_reason != -EFAULT) {
3079 vcpu->stat.exit_null++;
3081 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3082 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3083 vcpu->run->s390_ucontrol.trans_exc_code =
3084 current->thread.gmap_addr;
3085 vcpu->run->s390_ucontrol.pgm_code = 0x10;
3087 } else if (current->thread.gmap_pfault) {
3088 trace_kvm_s390_major_guest_pfault(vcpu);
3089 current->thread.gmap_pfault = 0;
3090 if (kvm_arch_setup_async_pf(vcpu))
3092 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
3094 return vcpu_post_run_fault_in_sie(vcpu);
3097 static int __vcpu_run(struct kvm_vcpu *vcpu)
3099 int rc, exit_reason;
3102 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3103 * ning the guest), so that memslots (and other stuff) are protected
3105 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3108 rc = vcpu_pre_run(vcpu);
3112 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3114 * As PF_VCPU will be used in fault handler, between
3115 * guest_enter and guest_exit should be no uaccess.
3117 local_irq_disable();
3118 guest_enter_irqoff();
3119 __disable_cpu_timer_accounting(vcpu);
3121 exit_reason = sie64a(vcpu->arch.sie_block,
3122 vcpu->run->s.regs.gprs);
3123 local_irq_disable();
3124 __enable_cpu_timer_accounting(vcpu);
3125 guest_exit_irqoff();
3127 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3129 rc = vcpu_post_run(vcpu, exit_reason);
3130 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3132 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3136 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3138 struct runtime_instr_cb *riccb;
3141 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
3142 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
3143 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3144 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3145 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3146 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3147 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3148 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
3149 /* some control register changes require a tlb flush */
3150 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3152 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
3153 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
3154 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3155 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3156 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3157 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3159 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3160 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3161 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3162 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
3163 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3164 kvm_clear_async_pf_completion_queue(vcpu);
3167 * If userspace sets the riccb (e.g. after migration) to a valid state,
3168 * we should enable RI here instead of doing the lazy enablement.
3170 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
3171 test_kvm_facility(vcpu->kvm, 64) &&
3173 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
3174 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
3175 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
3178 * If userspace sets the gscb (e.g. after migration) to non-zero,
3179 * we should enable GS here instead of doing the lazy enablement.
3181 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3182 test_kvm_facility(vcpu->kvm, 133) &&
3184 !vcpu->arch.gs_enabled) {
3185 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3186 vcpu->arch.sie_block->ecb |= ECB_GS;
3187 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3188 vcpu->arch.gs_enabled = 1;
3190 save_access_regs(vcpu->arch.host_acrs);
3191 restore_access_regs(vcpu->run->s.regs.acrs);
3192 /* save host (userspace) fprs/vrs */
3194 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3195 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3197 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3199 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3200 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3201 if (test_fp_ctl(current->thread.fpu.fpc))
3202 /* User space provided an invalid FPC, let's clear it */
3203 current->thread.fpu.fpc = 0;
3204 if (MACHINE_HAS_GS) {
3206 __ctl_set_bit(2, 4);
3207 if (current->thread.gs_cb) {
3208 vcpu->arch.host_gscb = current->thread.gs_cb;
3209 save_gs_cb(vcpu->arch.host_gscb);
3211 if (vcpu->arch.gs_enabled) {
3212 current->thread.gs_cb = (struct gs_cb *)
3213 &vcpu->run->s.regs.gscb;
3214 restore_gs_cb(current->thread.gs_cb);
3219 kvm_run->kvm_dirty_regs = 0;
3222 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3224 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3225 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3226 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3227 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
3228 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
3229 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3230 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3231 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3232 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3233 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3234 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3235 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
3236 save_access_regs(vcpu->run->s.regs.acrs);
3237 restore_access_regs(vcpu->arch.host_acrs);
3238 /* Save guest register state */
3240 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3241 /* Restore will be done lazily at return */
3242 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3243 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
3244 if (MACHINE_HAS_GS) {
3245 __ctl_set_bit(2, 4);
3246 if (vcpu->arch.gs_enabled)
3247 save_gs_cb(current->thread.gs_cb);
3249 current->thread.gs_cb = vcpu->arch.host_gscb;
3250 restore_gs_cb(vcpu->arch.host_gscb);
3252 if (!vcpu->arch.host_gscb)
3253 __ctl_clear_bit(2, 4);
3254 vcpu->arch.host_gscb = NULL;
3259 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3264 if (kvm_run->immediate_exit)
3267 if (guestdbg_exit_pending(vcpu)) {
3268 kvm_s390_prepare_debug_exit(vcpu);
3272 if (vcpu->sigset_active)
3273 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3275 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3276 kvm_s390_vcpu_start(vcpu);
3277 } else if (is_vcpu_stopped(vcpu)) {
3278 pr_err_ratelimited("can't run stopped vcpu %d\n",
3283 sync_regs(vcpu, kvm_run);
3284 enable_cpu_timer_accounting(vcpu);
3287 rc = __vcpu_run(vcpu);
3289 if (signal_pending(current) && !rc) {
3290 kvm_run->exit_reason = KVM_EXIT_INTR;
3294 if (guestdbg_exit_pending(vcpu) && !rc) {
3295 kvm_s390_prepare_debug_exit(vcpu);
3299 if (rc == -EREMOTE) {
3300 /* userspace support is needed, kvm_run has been prepared */
3304 disable_cpu_timer_accounting(vcpu);
3305 store_regs(vcpu, kvm_run);
3307 if (vcpu->sigset_active)
3308 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3310 vcpu->stat.exit_userspace++;
3315 * store status at address
3316 * we use have two special cases:
3317 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3318 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3320 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
3322 unsigned char archmode = 1;
3323 freg_t fprs[NUM_FPRS];
3328 px = kvm_s390_get_prefix(vcpu);
3329 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3330 if (write_guest_abs(vcpu, 163, &archmode, 1))
3333 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3334 if (write_guest_real(vcpu, 163, &archmode, 1))
3338 gpa -= __LC_FPREGS_SAVE_AREA;
3340 /* manually convert vector registers if necessary */
3341 if (MACHINE_HAS_VX) {
3342 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
3343 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3346 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3347 vcpu->run->s.regs.fprs, 128);
3349 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
3350 vcpu->run->s.regs.gprs, 128);
3351 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
3352 &vcpu->arch.sie_block->gpsw, 16);
3353 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
3355 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
3356 &vcpu->run->s.regs.fpc, 4);
3357 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
3358 &vcpu->arch.sie_block->todpr, 4);
3359 cputm = kvm_s390_get_cpu_timer(vcpu);
3360 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
3362 clkcomp = vcpu->arch.sie_block->ckc >> 8;
3363 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
3365 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
3366 &vcpu->run->s.regs.acrs, 64);
3367 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
3368 &vcpu->arch.sie_block->gcr, 128);
3369 return rc ? -EFAULT : 0;
3372 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3375 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
3376 * switch in the run ioctl. Let's update our copies before we save
3377 * it into the save area
3380 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3381 save_access_regs(vcpu->run->s.regs.acrs);
3383 return kvm_s390_store_status_unloaded(vcpu, addr);
3386 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3388 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
3389 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
3392 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3395 struct kvm_vcpu *vcpu;
3397 kvm_for_each_vcpu(i, vcpu, kvm) {
3398 __disable_ibs_on_vcpu(vcpu);
3402 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3406 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
3407 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
3410 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3412 int i, online_vcpus, started_vcpus = 0;
3414 if (!is_vcpu_stopped(vcpu))
3417 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
3418 /* Only one cpu at a time may enter/leave the STOPPED state. */
3419 spin_lock(&vcpu->kvm->arch.start_stop_lock);
3420 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3422 for (i = 0; i < online_vcpus; i++) {
3423 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3427 if (started_vcpus == 0) {
3428 /* we're the only active VCPU -> speed it up */
3429 __enable_ibs_on_vcpu(vcpu);
3430 } else if (started_vcpus == 1) {
3432 * As we are starting a second VCPU, we have to disable
3433 * the IBS facility on all VCPUs to remove potentially
3434 * oustanding ENABLE requests.
3436 __disable_ibs_on_all_vcpus(vcpu->kvm);
3439 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
3441 * Another VCPU might have used IBS while we were offline.
3442 * Let's play safe and flush the VCPU at startup.
3444 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3445 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
3449 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3451 int i, online_vcpus, started_vcpus = 0;
3452 struct kvm_vcpu *started_vcpu = NULL;
3454 if (is_vcpu_stopped(vcpu))
3457 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
3458 /* Only one cpu at a time may enter/leave the STOPPED state. */
3459 spin_lock(&vcpu->kvm->arch.start_stop_lock);
3460 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3462 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
3463 kvm_s390_clear_stop_irq(vcpu);
3465 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
3466 __disable_ibs_on_vcpu(vcpu);
3468 for (i = 0; i < online_vcpus; i++) {
3469 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3471 started_vcpu = vcpu->kvm->vcpus[i];
3475 if (started_vcpus == 1) {
3477 * As we only have one VCPU left, we want to enable the
3478 * IBS facility for that VCPU to speed it up.
3480 __enable_ibs_on_vcpu(started_vcpu);
3483 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
3487 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3488 struct kvm_enable_cap *cap)
3496 case KVM_CAP_S390_CSS_SUPPORT:
3497 if (!vcpu->kvm->arch.css_support) {
3498 vcpu->kvm->arch.css_support = 1;
3499 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
3500 trace_kvm_s390_enable_css(vcpu->kvm);
3511 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3512 struct kvm_s390_mem_op *mop)
3514 void __user *uaddr = (void __user *)mop->buf;
3515 void *tmpbuf = NULL;
3517 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3518 | KVM_S390_MEMOP_F_CHECK_ONLY;
3520 if (mop->flags & ~supported_flags)
3523 if (mop->size > MEM_OP_MAX_SIZE)
3526 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3527 tmpbuf = vmalloc(mop->size);
3532 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3535 case KVM_S390_MEMOP_LOGICAL_READ:
3536 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
3537 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3538 mop->size, GACC_FETCH);
3541 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3543 if (copy_to_user(uaddr, tmpbuf, mop->size))
3547 case KVM_S390_MEMOP_LOGICAL_WRITE:
3548 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
3549 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3550 mop->size, GACC_STORE);
3553 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3557 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3563 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3565 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3566 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3572 long kvm_arch_vcpu_ioctl(struct file *filp,
3573 unsigned int ioctl, unsigned long arg)
3575 struct kvm_vcpu *vcpu = filp->private_data;
3576 void __user *argp = (void __user *)arg;
3581 case KVM_S390_IRQ: {
3582 struct kvm_s390_irq s390irq;
3585 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3587 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3590 case KVM_S390_INTERRUPT: {
3591 struct kvm_s390_interrupt s390int;
3592 struct kvm_s390_irq s390irq;
3595 if (copy_from_user(&s390int, argp, sizeof(s390int)))
3597 if (s390int_to_s390irq(&s390int, &s390irq))
3599 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3602 case KVM_S390_STORE_STATUS:
3603 idx = srcu_read_lock(&vcpu->kvm->srcu);
3604 r = kvm_s390_vcpu_store_status(vcpu, arg);
3605 srcu_read_unlock(&vcpu->kvm->srcu, idx);
3607 case KVM_S390_SET_INITIAL_PSW: {
3611 if (copy_from_user(&psw, argp, sizeof(psw)))
3613 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3616 case KVM_S390_INITIAL_RESET:
3617 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3619 case KVM_SET_ONE_REG:
3620 case KVM_GET_ONE_REG: {
3621 struct kvm_one_reg reg;
3623 if (copy_from_user(®, argp, sizeof(reg)))
3625 if (ioctl == KVM_SET_ONE_REG)
3626 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
3628 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
3631 #ifdef CONFIG_KVM_S390_UCONTROL
3632 case KVM_S390_UCAS_MAP: {
3633 struct kvm_s390_ucas_mapping ucasmap;
3635 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3640 if (!kvm_is_ucontrol(vcpu->kvm)) {
3645 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3646 ucasmap.vcpu_addr, ucasmap.length);
3649 case KVM_S390_UCAS_UNMAP: {
3650 struct kvm_s390_ucas_mapping ucasmap;
3652 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3657 if (!kvm_is_ucontrol(vcpu->kvm)) {
3662 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3667 case KVM_S390_VCPU_FAULT: {
3668 r = gmap_fault(vcpu->arch.gmap, arg, 0);
3671 case KVM_ENABLE_CAP:
3673 struct kvm_enable_cap cap;
3675 if (copy_from_user(&cap, argp, sizeof(cap)))
3677 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3680 case KVM_S390_MEM_OP: {
3681 struct kvm_s390_mem_op mem_op;
3683 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3684 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3689 case KVM_S390_SET_IRQ_STATE: {
3690 struct kvm_s390_irq_state irq_state;
3693 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3695 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3696 irq_state.len == 0 ||
3697 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3701 r = kvm_s390_set_irq_state(vcpu,
3702 (void __user *) irq_state.buf,
3706 case KVM_S390_GET_IRQ_STATE: {
3707 struct kvm_s390_irq_state irq_state;
3710 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3712 if (irq_state.len == 0) {
3716 r = kvm_s390_get_irq_state(vcpu,
3717 (__u8 __user *) irq_state.buf,
3727 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3729 #ifdef CONFIG_KVM_S390_UCONTROL
3730 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3731 && (kvm_is_ucontrol(vcpu->kvm))) {
3732 vmf->page = virt_to_page(vcpu->arch.sie_block);
3733 get_page(vmf->page);
3737 return VM_FAULT_SIGBUS;
3740 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3741 unsigned long npages)
3746 /* Section: memory related */
3747 int kvm_arch_prepare_memory_region(struct kvm *kvm,
3748 struct kvm_memory_slot *memslot,
3749 const struct kvm_userspace_memory_region *mem,
3750 enum kvm_mr_change change)
3752 /* A few sanity checks. We can have memory slots which have to be
3753 located/ended at a segment boundary (1MB). The memory in userland is
3754 ok to be fragmented into various different vmas. It is okay to mmap()
3755 and munmap() stuff in this slot after doing this call at any time */
3757 if (mem->userspace_addr & 0xffffful)
3760 if (mem->memory_size & 0xffffful)
3763 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3769 void kvm_arch_commit_memory_region(struct kvm *kvm,
3770 const struct kvm_userspace_memory_region *mem,
3771 const struct kvm_memory_slot *old,
3772 const struct kvm_memory_slot *new,
3773 enum kvm_mr_change change)
3777 /* If the basics of the memslot do not change, we do not want
3778 * to update the gmap. Every update causes several unnecessary
3779 * segment translation exceptions. This is usually handled just
3780 * fine by the normal fault handler + gmap, but it will also
3781 * cause faults on the prefix page of running guest CPUs.
3783 if (old->userspace_addr == mem->userspace_addr &&
3784 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3785 old->npages * PAGE_SIZE == mem->memory_size)
3788 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3789 mem->guest_phys_addr, mem->memory_size);
3791 pr_warn("failed to commit memory region\n");
3795 static inline unsigned long nonhyp_mask(int i)
3797 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3799 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3802 void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3804 vcpu->valid_wakeup = false;
3807 static int __init kvm_s390_init(void)
3811 if (!sclp.has_sief2) {
3812 pr_info("SIE not available\n");
3816 for (i = 0; i < 16; i++)
3817 kvm_s390_fac_list_mask[i] |=
3818 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3820 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
3823 static void __exit kvm_s390_exit(void)
3828 module_init(kvm_s390_init);
3829 module_exit(kvm_s390_exit);
3832 * Enable autoloading of the kvm module.
3833 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3834 * since x86 takes a different approach.
3836 #include <linux/miscdevice.h>
3837 MODULE_ALIAS_MISCDEV(KVM_MINOR);
3838 MODULE_ALIAS("devname:kvm");