1 // SPDX-License-Identifier: GPL-2.0
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2020
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
26 struct uv_info __bootdata_preserved(uv_info);
28 #if IS_ENABLED(CONFIG_KVM)
29 int __bootdata_preserved(prot_virt_host);
30 EXPORT_SYMBOL(prot_virt_host);
31 EXPORT_SYMBOL(uv_info);
33 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
35 struct uv_cb_init uvcb = {
36 .header.cmd = UVC_CMD_INIT_UV,
37 .header.len = sizeof(uvcb),
38 .stor_origin = stor_base,
42 if (uv_call(0, (uint64_t)&uvcb)) {
43 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
44 uvcb.header.rc, uvcb.header.rrc);
50 void __init setup_uv(void)
54 if (!is_prot_virt_host())
57 uv_stor_base = memblock_alloc_try_nid(
58 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
59 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
61 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
62 uv_info.uv_base_stor_len);
66 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
67 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
71 pr_info("Reserving %luMB as ultravisor base storage\n",
72 uv_info.uv_base_stor_len >> 20);
75 pr_info("Disabling support for protected virtualization");
80 * Requests the Ultravisor to pin the page in the shared state. This will
81 * cause an intercept when the guest attempts to unshare the pinned page.
83 static int uv_pin_shared(unsigned long paddr)
85 struct uv_cb_cfs uvcb = {
86 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
87 .header.len = sizeof(uvcb),
91 if (uv_call(0, (u64)&uvcb))
97 * Requests the Ultravisor to destroy a guest page and make it
98 * accessible to the host. The destroy clears the page instead of
101 * @paddr: Absolute host address of page to be destroyed
103 static int uv_destroy_page(unsigned long paddr)
105 struct uv_cb_cfs uvcb = {
106 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
107 .header.len = sizeof(uvcb),
111 if (uv_call(0, (u64)&uvcb)) {
113 * Older firmware uses 107/d as an indication of a non secure
114 * page. Let us emulate the newer variant (no-op).
116 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
124 * The caller must already hold a reference to the page
126 int uv_destroy_owned_page(unsigned long paddr)
128 struct page *page = phys_to_page(paddr);
132 rc = uv_destroy_page(paddr);
134 clear_bit(PG_arch_1, &page->flags);
140 * Requests the Ultravisor to encrypt a guest page and make it
141 * accessible to the host for paging (export).
143 * @paddr: Absolute host address of page to be exported
145 int uv_convert_from_secure(unsigned long paddr)
147 struct uv_cb_cfs uvcb = {
148 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
149 .header.len = sizeof(uvcb),
153 if (uv_call(0, (u64)&uvcb))
159 * The caller must already hold a reference to the page
161 int uv_convert_owned_from_secure(unsigned long paddr)
163 struct page *page = phys_to_page(paddr);
167 rc = uv_convert_from_secure(paddr);
169 clear_bit(PG_arch_1, &page->flags);
175 * Calculate the expected ref_count for a page that would otherwise have no
176 * further pins. This was cribbed from similar functions in other places in
177 * the kernel, but with some slight modifications. We know that a secure
178 * page can not be a huge page for example.
180 static int expected_page_refs(struct page *page)
184 res = page_mapcount(page);
185 if (PageSwapCache(page)) {
187 } else if (page_mapping(page)) {
189 if (page_has_private(page))
195 static int make_page_secure(struct page *page, struct uv_cb_header *uvcb)
197 int expected, cc = 0;
199 if (PageWriteback(page))
201 expected = expected_page_refs(page);
202 if (!page_ref_freeze(page, expected))
204 set_bit(PG_arch_1, &page->flags);
206 * If the UVC does not succeed or fail immediately, we don't want to
207 * loop for long, or we might get stall notifications.
208 * On the other hand, this is a complex scenario and we are holding a lot of
209 * locks, so we can't easily sleep and reschedule. We try only once,
210 * and if the UVC returned busy or partial completion, we return
211 * -EAGAIN and we let the callers deal with it.
213 cc = __uv_call(0, (u64)uvcb);
214 page_ref_unfreeze(page, expected);
216 * Return -ENXIO if the page was not mapped, -EINVAL for other errors.
217 * If busy or partially completed, return -EAGAIN.
221 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
223 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
227 * should_export_before_import - Determine whether an export is needed
228 * before an import-like operation
229 * @uvcb: the Ultravisor control block of the UVC to be performed
230 * @mm: the mm of the process
232 * Returns whether an export is needed before every import-like operation.
233 * This is needed for shared pages, which don't trigger a secure storage
234 * exception when accessed from a different guest.
236 * Although considered as one, the Unpin Page UVC is not an actual import,
237 * so it is not affected.
239 * No export is needed also when there is only one protected VM, because the
240 * page cannot belong to the wrong VM in that case (there is no "other VM"
243 * Return: true if an export is needed before every import, otherwise false.
245 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
248 * The misc feature indicates, among other things, that importing a
249 * shared page from a different protected VM will automatically also
250 * transfer its ownership.
252 if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
254 if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
256 return atomic_read(&mm->context.protected_count) > 1;
260 * Requests the Ultravisor to make a page accessible to a guest.
261 * If it's brought in the first time, it will be cleared. If
262 * it has been exported before, it will be decrypted and integrity
265 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
267 struct vm_area_struct *vma;
268 bool local_drain = false;
277 mmap_read_lock(gmap->mm);
279 uaddr = __gmap_translate(gmap, gaddr);
280 if (IS_ERR_VALUE(uaddr))
282 vma = vma_lookup(gmap->mm, uaddr);
286 * Secure pages cannot be huge and userspace should not combine both.
287 * In case userspace does it anyway this will result in an -EFAULT for
288 * the unpack. The guest is thus never reaching secure mode. If
289 * userspace is playing dirty tricky with mapping huge pages later
290 * on this will result in a segmentation fault.
292 if (is_vm_hugetlb_page(vma))
296 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
297 if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
298 page = pte_page(*ptep);
300 if (trylock_page(page)) {
301 if (should_export_before_import(uvcb, gmap->mm))
302 uv_convert_from_secure(page_to_phys(page));
303 rc = make_page_secure(page, uvcb);
307 pte_unmap_unlock(ptep, ptelock);
309 mmap_read_unlock(gmap->mm);
313 * If we are here because the UVC returned busy or partial
314 * completion, this is just a useless check, but it is safe.
316 wait_on_page_writeback(page);
317 } else if (rc == -EBUSY) {
319 * If we have tried a local drain and the page refcount
320 * still does not match our expected safe value, try with a
321 * system wide drain. This is needed if the pagevecs holding
322 * the page are on a different CPU.
326 /* We give up here, and let the caller try again */
330 * We are here if the page refcount does not match the
331 * expected safe value. The main culprits are usually
332 * pagevecs. With lru_add_drain() we drain the pagevecs
333 * on the local CPU so that hopefully the refcount will
334 * reach the expected safe value.
338 /* And now we try again immediately after draining */
340 } else if (rc == -ENXIO) {
341 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
347 EXPORT_SYMBOL_GPL(gmap_make_secure);
349 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
351 struct uv_cb_cts uvcb = {
352 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
353 .header.len = sizeof(uvcb),
354 .guest_handle = gmap->guest_handle,
358 return gmap_make_secure(gmap, gaddr, &uvcb);
360 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
363 * gmap_destroy_page - Destroy a guest page.
364 * @gmap: the gmap of the guest
365 * @gaddr: the guest address to destroy
367 * An attempt will be made to destroy the given guest page. If the attempt
368 * fails, an attempt is made to export the page. If both attempts fail, an
369 * appropriate error is returned.
371 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
373 struct vm_area_struct *vma;
379 mmap_read_lock(gmap->mm);
381 uaddr = __gmap_translate(gmap, gaddr);
382 if (IS_ERR_VALUE(uaddr))
384 vma = vma_lookup(gmap->mm, uaddr);
388 * Huge pages should not be able to become secure
390 if (is_vm_hugetlb_page(vma))
394 /* we take an extra reference here */
395 page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
396 if (IS_ERR_OR_NULL(page))
398 rc = uv_destroy_owned_page(page_to_phys(page));
400 * Fault handlers can race; it is possible that two CPUs will fault
401 * on the same secure page. One CPU can destroy the page, reboot,
402 * re-enter secure mode and import it, while the second CPU was
403 * stuck at the beginning of the handler. At some point the second
404 * CPU will be able to progress, and it will not be able to destroy
405 * the page. In that case we do not want to terminate the process,
406 * we instead try to export the page.
409 rc = uv_convert_owned_from_secure(page_to_phys(page));
412 mmap_read_unlock(gmap->mm);
415 EXPORT_SYMBOL_GPL(gmap_destroy_page);
418 * To be called with the page locked or with an extra reference! This will
419 * prevent gmap_make_secure from touching the page concurrently. Having 2
420 * parallel make_page_accessible is fine, as the UV calls will become a
421 * no-op if the page is already exported.
423 int arch_make_page_accessible(struct page *page)
427 /* Hugepage cannot be protected, so nothing to do */
432 * PG_arch_1 is used in 3 places:
433 * 1. for kernel page tables during early boot
434 * 2. for storage keys of huge pages and KVM
435 * 3. As an indication that this page might be secure. This can
436 * overindicate, e.g. we set the bit before calling
438 * As secure pages are never huge, all 3 variants can co-exists.
440 if (!test_bit(PG_arch_1, &page->flags))
443 rc = uv_pin_shared(page_to_phys(page));
445 clear_bit(PG_arch_1, &page->flags);
449 rc = uv_convert_from_secure(page_to_phys(page));
451 clear_bit(PG_arch_1, &page->flags);
457 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
461 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
462 static ssize_t uv_query_facilities(struct kobject *kobj,
463 struct kobj_attribute *attr, char *page)
465 return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
466 uv_info.inst_calls_list[0],
467 uv_info.inst_calls_list[1],
468 uv_info.inst_calls_list[2],
469 uv_info.inst_calls_list[3]);
472 static struct kobj_attribute uv_query_facilities_attr =
473 __ATTR(facilities, 0444, uv_query_facilities, NULL);
475 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
476 struct kobj_attribute *attr, char *buf)
478 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
481 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
482 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
484 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
485 struct kobj_attribute *attr, char *buf)
487 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
490 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
491 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
493 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
494 struct kobj_attribute *attr, char *page)
496 return scnprintf(page, PAGE_SIZE, "%lx\n",
497 uv_info.guest_cpu_stor_len);
500 static struct kobj_attribute uv_query_dump_cpu_len_attr =
501 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
503 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
504 struct kobj_attribute *attr, char *page)
506 return scnprintf(page, PAGE_SIZE, "%lx\n",
507 uv_info.conf_dump_storage_state_len);
510 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
511 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
513 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
514 struct kobj_attribute *attr, char *page)
516 return scnprintf(page, PAGE_SIZE, "%lx\n",
517 uv_info.conf_dump_finalize_len);
520 static struct kobj_attribute uv_query_dump_finalize_len_attr =
521 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
523 static ssize_t uv_query_feature_indications(struct kobject *kobj,
524 struct kobj_attribute *attr, char *buf)
526 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
529 static struct kobj_attribute uv_query_feature_indications_attr =
530 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
532 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
533 struct kobj_attribute *attr, char *page)
535 return scnprintf(page, PAGE_SIZE, "%d\n",
536 uv_info.max_guest_cpu_id + 1);
539 static struct kobj_attribute uv_query_max_guest_cpus_attr =
540 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
542 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
543 struct kobj_attribute *attr, char *page)
545 return scnprintf(page, PAGE_SIZE, "%d\n",
546 uv_info.max_num_sec_conf);
549 static struct kobj_attribute uv_query_max_guest_vms_attr =
550 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
552 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
553 struct kobj_attribute *attr, char *page)
555 return scnprintf(page, PAGE_SIZE, "%lx\n",
556 uv_info.max_sec_stor_addr);
559 static struct kobj_attribute uv_query_max_guest_addr_attr =
560 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
562 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
563 struct kobj_attribute *attr, char *page)
565 return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_req_hdr_ver);
568 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
569 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
571 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
572 struct kobj_attribute *attr, char *page)
574 return scnprintf(page, PAGE_SIZE, "%lx\n", uv_info.supp_att_pflags);
577 static struct kobj_attribute uv_query_supp_att_pflags_attr =
578 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
580 static struct attribute *uv_query_attrs[] = {
581 &uv_query_facilities_attr.attr,
582 &uv_query_feature_indications_attr.attr,
583 &uv_query_max_guest_cpus_attr.attr,
584 &uv_query_max_guest_vms_attr.attr,
585 &uv_query_max_guest_addr_attr.attr,
586 &uv_query_supp_se_hdr_ver_attr.attr,
587 &uv_query_supp_se_hdr_pcf_attr.attr,
588 &uv_query_dump_storage_state_len_attr.attr,
589 &uv_query_dump_finalize_len_attr.attr,
590 &uv_query_dump_cpu_len_attr.attr,
591 &uv_query_supp_att_req_hdr_ver_attr.attr,
592 &uv_query_supp_att_pflags_attr.attr,
596 static struct attribute_group uv_query_attr_group = {
597 .attrs = uv_query_attrs,
600 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
601 struct kobj_attribute *attr, char *page)
605 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
606 val = prot_virt_guest;
608 return scnprintf(page, PAGE_SIZE, "%d\n", val);
611 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
612 struct kobj_attribute *attr, char *page)
616 #if IS_ENABLED(CONFIG_KVM)
617 val = prot_virt_host;
620 return scnprintf(page, PAGE_SIZE, "%d\n", val);
623 static struct kobj_attribute uv_prot_virt_guest =
624 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
626 static struct kobj_attribute uv_prot_virt_host =
627 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
629 static const struct attribute *uv_prot_virt_attrs[] = {
630 &uv_prot_virt_guest.attr,
631 &uv_prot_virt_host.attr,
635 static struct kset *uv_query_kset;
636 static struct kobject *uv_kobj;
638 static int __init uv_info_init(void)
642 if (!test_facility(158))
645 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
649 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
653 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
654 if (!uv_query_kset) {
659 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
663 kset_unregister(uv_query_kset);
665 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
667 kobject_del(uv_kobj);
668 kobject_put(uv_kobj);
671 device_initcall(uv_info_init);