1 // SPDX-License-Identifier: GPL-2.0
3 * Hosting Protected Virtual Machines
5 * Copyright IBM Corp. 2019, 2020
9 #include <linux/kvm_host.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched/signal.h>
17 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
21 if (!kvm_s390_pv_cpu_get_handle(vcpu))
24 cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
26 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
27 vcpu->vcpu_id, *rc, *rrc);
28 WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
30 /* Intended memory leak for something that should never happen. */
32 free_pages(vcpu->arch.pv.stor_base,
33 get_order(uv_info.guest_cpu_stor_len));
35 free_page(sida_origin(vcpu->arch.sie_block));
36 vcpu->arch.sie_block->pv_handle_cpu = 0;
37 vcpu->arch.sie_block->pv_handle_config = 0;
38 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
39 vcpu->arch.sie_block->sdf = 0;
41 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
42 * Use the reset value of gbea to avoid leaking the kernel pointer of
43 * the just freed sida.
45 vcpu->arch.sie_block->gbea = 1;
46 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
51 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
53 struct uv_cb_csc uvcb = {
54 .header.cmd = UVC_CMD_CREATE_SEC_CPU,
55 .header.len = sizeof(uvcb),
59 if (kvm_s390_pv_cpu_get_handle(vcpu))
62 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
63 get_order(uv_info.guest_cpu_stor_len));
64 if (!vcpu->arch.pv.stor_base)
68 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
69 uvcb.num = vcpu->arch.sie_block->icpua;
70 uvcb.state_origin = (u64)vcpu->arch.sie_block;
71 uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
73 /* Alloc Secure Instruction Data Area Designation */
74 vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
75 if (!vcpu->arch.sie_block->sidad) {
76 free_pages(vcpu->arch.pv.stor_base,
77 get_order(uv_info.guest_cpu_stor_len));
81 cc = uv_call(0, (u64)&uvcb);
83 *rrc = uvcb.header.rrc;
84 KVM_UV_EVENT(vcpu->kvm, 3,
85 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
86 vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
92 kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
97 vcpu->arch.pv.handle = uvcb.cpu_handle;
98 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
99 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
100 vcpu->arch.sie_block->sdf = 2;
101 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
105 /* only free resources when the destroy was successful */
106 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
108 vfree(kvm->arch.pv.stor_var);
109 free_pages(kvm->arch.pv.stor_base,
110 get_order(uv_info.guest_base_stor_len));
111 memset(&kvm->arch.pv, 0, sizeof(kvm->arch.pv));
114 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
116 unsigned long base = uv_info.guest_base_stor_len;
117 unsigned long virt = uv_info.guest_virt_var_stor_len;
118 unsigned long npages = 0, vlen = 0;
119 struct kvm_memory_slot *memslot;
121 kvm->arch.pv.stor_var = NULL;
122 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
123 if (!kvm->arch.pv.stor_base)
127 * Calculate current guest storage for allocation of the
128 * variable storage, which is based on the length in MB.
130 * Slots are sorted by GFN
132 mutex_lock(&kvm->slots_lock);
133 memslot = kvm_memslots(kvm)->memslots;
134 npages = memslot->base_gfn + memslot->npages;
135 mutex_unlock(&kvm->slots_lock);
137 kvm->arch.pv.guest_len = npages * PAGE_SIZE;
139 /* Allocate variable storage */
140 vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
141 vlen += uv_info.guest_virt_base_stor_len;
143 * The Create Secure Configuration Ultravisor Call does not support
144 * using large pages for the virtual memory area.
145 * This is a hardware limitation.
147 kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
148 if (!kvm->arch.pv.stor_var)
153 kvm_s390_pv_dealloc_vm(kvm);
157 /* this should not fail, but if it does, we must not free the donated memory */
158 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
162 /* make all pages accessible before destroying the guest */
163 s390_reset_acc(kvm->mm);
165 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
166 UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
167 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
168 atomic_set(&kvm->mm->context.is_protected, 0);
169 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
170 WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
171 /* Inteded memory leak on "impossible" error */
173 kvm_s390_pv_dealloc_vm(kvm);
174 return cc ? -EIO : 0;
177 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
179 struct uv_cb_cgc uvcb = {
180 .header.cmd = UVC_CMD_CREATE_SEC_CONF,
181 .header.len = sizeof(uvcb)
186 ret = kvm_s390_pv_alloc_vm(kvm);
191 uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
192 uvcb.guest_stor_len = kvm->arch.pv.guest_len;
193 uvcb.guest_asce = kvm->arch.gmap->asce;
194 uvcb.guest_sca = (unsigned long)kvm->arch.sca;
195 uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
196 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
198 cc = uv_call_sched(0, (u64)&uvcb);
199 *rc = uvcb.header.rc;
200 *rrc = uvcb.header.rrc;
201 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
202 uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
205 kvm->arch.pv.handle = uvcb.guest_handle;
208 if (uvcb.header.rc & UVC_RC_NEED_DESTROY)
209 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
211 kvm_s390_pv_dealloc_vm(kvm);
214 kvm->arch.gmap->guest_handle = uvcb.guest_handle;
218 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
221 struct uv_cb_ssc uvcb = {
222 .header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
223 .header.len = sizeof(uvcb),
224 .sec_header_origin = (u64)hdr,
225 .sec_header_len = length,
226 .guest_handle = kvm_s390_pv_get_handle(kvm),
228 int cc = uv_call(0, (u64)&uvcb);
230 *rc = uvcb.header.rc;
231 *rrc = uvcb.header.rrc;
232 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
235 atomic_set(&kvm->mm->context.is_protected, 1);
236 return cc ? -EINVAL : 0;
239 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
240 u64 offset, u16 *rc, u16 *rrc)
242 struct uv_cb_unp uvcb = {
243 .header.cmd = UVC_CMD_UNPACK_IMG,
244 .header.len = sizeof(uvcb),
245 .guest_handle = kvm_s390_pv_get_handle(kvm),
250 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
252 *rc = uvcb.header.rc;
253 *rrc = uvcb.header.rrc;
255 if (ret && ret != -EAGAIN)
256 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
257 uvcb.gaddr, *rc, *rrc);
261 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
262 unsigned long tweak, u16 *rc, u16 *rrc)
267 if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
270 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
273 while (offset < size) {
274 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
275 if (ret == -EAGAIN) {
277 if (fatal_signal_pending(current))
287 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
291 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
293 struct uv_cb_cpu_set_state uvcb = {
294 .header.cmd = UVC_CMD_CPU_SET_STATE,
295 .header.len = sizeof(uvcb),
296 .cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu),
301 cc = uv_call(0, (u64)&uvcb);
302 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
303 vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);