1 // SPDX-License-Identifier: GPL-2.0
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2024
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <asm/facility.h>
19 #include <asm/sections.h>
22 #if !IS_ENABLED(CONFIG_KVM)
23 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
28 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
29 unsigned int fault_flags)
35 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
36 int __bootdata_preserved(prot_virt_guest);
37 EXPORT_SYMBOL(prot_virt_guest);
40 * uv_info contains both host and guest information but it's currently only
41 * expected to be used within modules if it's the KVM module or for
42 * any PV guest module.
44 * The kernel itself will write these values once in uv_query_info()
45 * and then make some of them readable via a sysfs interface.
47 struct uv_info __bootdata_preserved(uv_info);
48 EXPORT_SYMBOL(uv_info);
50 int __bootdata_preserved(prot_virt_host);
51 EXPORT_SYMBOL(prot_virt_host);
53 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
55 struct uv_cb_init uvcb = {
56 .header.cmd = UVC_CMD_INIT_UV,
57 .header.len = sizeof(uvcb),
58 .stor_origin = stor_base,
62 if (uv_call(0, (uint64_t)&uvcb)) {
63 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
64 uvcb.header.rc, uvcb.header.rrc);
70 void __init setup_uv(void)
74 if (!is_prot_virt_host())
77 uv_stor_base = memblock_alloc_try_nid(
78 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
79 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
81 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
82 uv_info.uv_base_stor_len);
86 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
87 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
91 pr_info("Reserving %luMB as ultravisor base storage\n",
92 uv_info.uv_base_stor_len >> 20);
95 pr_info("Disabling support for protected virtualization");
100 * Requests the Ultravisor to pin the page in the shared state. This will
101 * cause an intercept when the guest attempts to unshare the pinned page.
103 int uv_pin_shared(unsigned long paddr)
105 struct uv_cb_cfs uvcb = {
106 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
107 .header.len = sizeof(uvcb),
111 if (uv_call(0, (u64)&uvcb))
115 EXPORT_SYMBOL_GPL(uv_pin_shared);
118 * Requests the Ultravisor to destroy a guest page and make it
119 * accessible to the host. The destroy clears the page instead of
122 * @paddr: Absolute host address of page to be destroyed
124 static int uv_destroy(unsigned long paddr)
126 struct uv_cb_cfs uvcb = {
127 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
128 .header.len = sizeof(uvcb),
132 if (uv_call(0, (u64)&uvcb)) {
134 * Older firmware uses 107/d as an indication of a non secure
135 * page. Let us emulate the newer variant (no-op).
137 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
145 * The caller must already hold a reference to the folio
147 int uv_destroy_folio(struct folio *folio)
151 /* See gmap_make_secure(): large folios cannot be secure */
152 if (unlikely(folio_test_large(folio)))
156 rc = uv_destroy(folio_to_phys(folio));
158 clear_bit(PG_arch_1, &folio->flags);
164 * The present PTE still indirectly holds a folio reference through the mapping.
166 int uv_destroy_pte(pte_t pte)
168 VM_WARN_ON(!pte_present(pte));
169 return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
173 * Requests the Ultravisor to encrypt a guest page and make it
174 * accessible to the host for paging (export).
176 * @paddr: Absolute host address of page to be exported
178 static int uv_convert_from_secure(unsigned long paddr)
180 struct uv_cb_cfs uvcb = {
181 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
182 .header.len = sizeof(uvcb),
186 if (uv_call(0, (u64)&uvcb))
192 * The caller must already hold a reference to the folio.
194 static int uv_convert_from_secure_folio(struct folio *folio)
198 /* See gmap_make_secure(): large folios cannot be secure */
199 if (unlikely(folio_test_large(folio)))
203 rc = uv_convert_from_secure(folio_to_phys(folio));
205 clear_bit(PG_arch_1, &folio->flags);
211 * The present PTE still indirectly holds a folio reference through the mapping.
213 int uv_convert_from_secure_pte(pte_t pte)
215 VM_WARN_ON(!pte_present(pte));
216 return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
220 * Calculate the expected ref_count for a folio that would otherwise have no
221 * further pins. This was cribbed from similar functions in other places in
222 * the kernel, but with some slight modifications. We know that a secure
223 * folio can not be a large folio, for example.
225 static int expected_folio_refs(struct folio *folio)
229 res = folio_mapcount(folio);
230 if (folio_test_swapcache(folio)) {
232 } else if (folio_mapping(folio)) {
240 static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
242 int expected, cc = 0;
244 if (folio_test_writeback(folio))
246 expected = expected_folio_refs(folio);
247 if (!folio_ref_freeze(folio, expected))
249 set_bit(PG_arch_1, &folio->flags);
251 * If the UVC does not succeed or fail immediately, we don't want to
252 * loop for long, or we might get stall notifications.
253 * On the other hand, this is a complex scenario and we are holding a lot of
254 * locks, so we can't easily sleep and reschedule. We try only once,
255 * and if the UVC returned busy or partial completion, we return
256 * -EAGAIN and we let the callers deal with it.
258 cc = __uv_call(0, (u64)uvcb);
259 folio_ref_unfreeze(folio, expected);
261 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
262 * If busy or partially completed, return -EAGAIN.
266 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
268 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
272 * should_export_before_import - Determine whether an export is needed
273 * before an import-like operation
274 * @uvcb: the Ultravisor control block of the UVC to be performed
275 * @mm: the mm of the process
277 * Returns whether an export is needed before every import-like operation.
278 * This is needed for shared pages, which don't trigger a secure storage
279 * exception when accessed from a different guest.
281 * Although considered as one, the Unpin Page UVC is not an actual import,
282 * so it is not affected.
284 * No export is needed also when there is only one protected VM, because the
285 * page cannot belong to the wrong VM in that case (there is no "other VM"
288 * Return: true if an export is needed before every import, otherwise false.
290 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
293 * The misc feature indicates, among other things, that importing a
294 * shared page from a different protected VM will automatically also
295 * transfer its ownership.
297 if (uv_has_feature(BIT_UV_FEAT_MISC))
299 if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
301 return atomic_read(&mm->context.protected_count) > 1;
305 * Drain LRU caches: the local one on first invocation and the ones of all
306 * CPUs on successive invocations. Returns "true" on the first invocation.
308 static bool drain_lru(bool *drain_lru_called)
311 * If we have tried a local drain and the folio refcount
312 * still does not match our expected safe value, try with a
313 * system wide drain. This is needed if the pagevecs holding
314 * the page are on a different CPU.
316 if (*drain_lru_called) {
318 /* We give up here, don't retry immediately. */
322 * We are here if the folio refcount does not match the
323 * expected safe value. The main culprits are usually
324 * pagevecs. With lru_add_drain() we drain the pagevecs
325 * on the local CPU so that hopefully the refcount will
326 * reach the expected safe value.
329 *drain_lru_called = true;
330 /* The caller should try again immediately */
335 * Requests the Ultravisor to make a page accessible to a guest.
336 * If it's brought in the first time, it will be cleared. If
337 * it has been exported before, it will be decrypted and integrity
340 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
342 struct vm_area_struct *vma;
343 bool drain_lru_called = false;
352 mmap_read_lock(gmap->mm);
354 uaddr = __gmap_translate(gmap, gaddr);
355 if (IS_ERR_VALUE(uaddr))
357 vma = vma_lookup(gmap->mm, uaddr);
361 * Secure pages cannot be huge and userspace should not combine both.
362 * In case userspace does it anyway this will result in an -EFAULT for
363 * the unpack. The guest is thus never reaching secure mode. If
364 * userspace is playing dirty tricky with mapping huge pages later
365 * on this will result in a segmentation fault.
367 if (is_vm_hugetlb_page(vma))
371 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
374 if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
375 folio = page_folio(pte_page(*ptep));
377 if (folio_test_large(folio)) {
379 } else if (folio_trylock(folio)) {
380 if (should_export_before_import(uvcb, gmap->mm))
381 uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
382 rc = make_folio_secure(folio, uvcb);
387 * Once we drop the PTL, the folio may get unmapped and
388 * freed immediately. We need a temporary reference.
390 if (rc == -EAGAIN || rc == -E2BIG)
393 pte_unmap_unlock(ptep, ptelock);
395 mmap_read_unlock(gmap->mm);
400 rc = split_folio(folio);
406 /* Splitting succeeded, try again immediately. */
409 /* Additional folio references. */
410 if (drain_lru(&drain_lru_called))
414 /* Unexpected race. */
421 * If we are here because the UVC returned busy or partial
422 * completion, this is just a useless check, but it is safe.
424 folio_wait_writeback(folio);
428 /* Additional folio references. */
429 if (drain_lru(&drain_lru_called))
433 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
439 EXPORT_SYMBOL_GPL(gmap_make_secure);
441 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
443 struct uv_cb_cts uvcb = {
444 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
445 .header.len = sizeof(uvcb),
446 .guest_handle = gmap->guest_handle,
450 return gmap_make_secure(gmap, gaddr, &uvcb);
452 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
455 * gmap_destroy_page - Destroy a guest page.
456 * @gmap: the gmap of the guest
457 * @gaddr: the guest address to destroy
459 * An attempt will be made to destroy the given guest page. If the attempt
460 * fails, an attempt is made to export the page. If both attempts fail, an
461 * appropriate error is returned.
463 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
465 struct vm_area_struct *vma;
466 struct folio_walk fw;
472 mmap_read_lock(gmap->mm);
474 uaddr = __gmap_translate(gmap, gaddr);
475 if (IS_ERR_VALUE(uaddr))
477 vma = vma_lookup(gmap->mm, uaddr);
481 * Huge pages should not be able to become secure
483 if (is_vm_hugetlb_page(vma))
487 folio = folio_walk_start(&fw, vma, uaddr, 0);
491 * See gmap_make_secure(): large folios cannot be secure. Small
492 * folio implies FW_LEVEL_PTE.
494 if (folio_test_large(folio) || !pte_write(fw.pte))
496 rc = uv_destroy_folio(folio);
498 * Fault handlers can race; it is possible that two CPUs will fault
499 * on the same secure page. One CPU can destroy the page, reboot,
500 * re-enter secure mode and import it, while the second CPU was
501 * stuck at the beginning of the handler. At some point the second
502 * CPU will be able to progress, and it will not be able to destroy
503 * the page. In that case we do not want to terminate the process,
504 * we instead try to export the page.
507 rc = uv_convert_from_secure_folio(folio);
509 folio_walk_end(&fw, vma);
511 mmap_read_unlock(gmap->mm);
514 EXPORT_SYMBOL_GPL(gmap_destroy_page);
517 * To be called with the folio locked or with an extra reference! This will
518 * prevent gmap_make_secure from touching the folio concurrently. Having 2
519 * parallel arch_make_folio_accessible is fine, as the UV calls will become a
520 * no-op if the folio is already exported.
522 int arch_make_folio_accessible(struct folio *folio)
526 /* See gmap_make_secure(): large folios cannot be secure */
527 if (unlikely(folio_test_large(folio)))
531 * PG_arch_1 is used in 2 places:
532 * 1. for storage keys of hugetlb folios and KVM
533 * 2. As an indication that this small folio might be secure. This can
534 * overindicate, e.g. we set the bit before calling
536 * As secure pages are never large folios, both variants can co-exists.
538 if (!test_bit(PG_arch_1, &folio->flags))
541 rc = uv_pin_shared(folio_to_phys(folio));
543 clear_bit(PG_arch_1, &folio->flags);
547 rc = uv_convert_from_secure(folio_to_phys(folio));
549 clear_bit(PG_arch_1, &folio->flags);
555 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
557 static ssize_t uv_query_facilities(struct kobject *kobj,
558 struct kobj_attribute *attr, char *buf)
560 return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
561 uv_info.inst_calls_list[0],
562 uv_info.inst_calls_list[1],
563 uv_info.inst_calls_list[2],
564 uv_info.inst_calls_list[3]);
567 static struct kobj_attribute uv_query_facilities_attr =
568 __ATTR(facilities, 0444, uv_query_facilities, NULL);
570 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
571 struct kobj_attribute *attr, char *buf)
573 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
576 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
577 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
579 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
580 struct kobj_attribute *attr, char *buf)
582 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
585 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
586 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
588 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
589 struct kobj_attribute *attr, char *buf)
591 return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
594 static struct kobj_attribute uv_query_dump_cpu_len_attr =
595 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
597 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
598 struct kobj_attribute *attr, char *buf)
600 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
603 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
604 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
606 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
607 struct kobj_attribute *attr, char *buf)
609 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
612 static struct kobj_attribute uv_query_dump_finalize_len_attr =
613 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
615 static ssize_t uv_query_feature_indications(struct kobject *kobj,
616 struct kobj_attribute *attr, char *buf)
618 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
621 static struct kobj_attribute uv_query_feature_indications_attr =
622 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
624 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
625 struct kobj_attribute *attr, char *buf)
627 return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
630 static struct kobj_attribute uv_query_max_guest_cpus_attr =
631 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
633 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
634 struct kobj_attribute *attr, char *buf)
636 return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
639 static struct kobj_attribute uv_query_max_guest_vms_attr =
640 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
642 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
643 struct kobj_attribute *attr, char *buf)
645 return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
648 static struct kobj_attribute uv_query_max_guest_addr_attr =
649 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
651 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
652 struct kobj_attribute *attr, char *buf)
654 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
657 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
658 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
660 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
661 struct kobj_attribute *attr, char *buf)
663 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
666 static struct kobj_attribute uv_query_supp_att_pflags_attr =
667 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
669 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
670 struct kobj_attribute *attr, char *buf)
672 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
675 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
676 __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
678 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
679 struct kobj_attribute *attr, char *buf)
681 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
684 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
685 __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
687 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
688 struct kobj_attribute *attr, char *buf)
690 return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
693 static struct kobj_attribute uv_query_supp_secret_types_attr =
694 __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
696 static ssize_t uv_query_max_secrets(struct kobject *kobj,
697 struct kobj_attribute *attr, char *buf)
699 return sysfs_emit(buf, "%d\n",
700 uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
703 static struct kobj_attribute uv_query_max_secrets_attr =
704 __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
706 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
707 struct kobj_attribute *attr, char *buf)
709 return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
712 static struct kobj_attribute uv_query_max_retr_secrets_attr =
713 __ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
715 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
716 struct kobj_attribute *attr,
719 return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
722 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
723 __ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
725 static struct attribute *uv_query_attrs[] = {
726 &uv_query_facilities_attr.attr,
727 &uv_query_feature_indications_attr.attr,
728 &uv_query_max_guest_cpus_attr.attr,
729 &uv_query_max_guest_vms_attr.attr,
730 &uv_query_max_guest_addr_attr.attr,
731 &uv_query_supp_se_hdr_ver_attr.attr,
732 &uv_query_supp_se_hdr_pcf_attr.attr,
733 &uv_query_dump_storage_state_len_attr.attr,
734 &uv_query_dump_finalize_len_attr.attr,
735 &uv_query_dump_cpu_len_attr.attr,
736 &uv_query_supp_att_req_hdr_ver_attr.attr,
737 &uv_query_supp_att_pflags_attr.attr,
738 &uv_query_supp_add_secret_req_ver_attr.attr,
739 &uv_query_supp_add_secret_pcf_attr.attr,
740 &uv_query_supp_secret_types_attr.attr,
741 &uv_query_max_secrets_attr.attr,
742 &uv_query_max_assoc_secrets_attr.attr,
743 &uv_query_max_retr_secrets_attr.attr,
747 static inline struct uv_cb_query_keys uv_query_keys(void)
749 struct uv_cb_query_keys uvcb = {
750 .header.cmd = UVC_CMD_QUERY_KEYS,
751 .header.len = sizeof(uvcb)
754 uv_call(0, (uint64_t)&uvcb);
758 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
760 return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
761 hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
764 static ssize_t uv_keys_host_key(struct kobject *kobj,
765 struct kobj_attribute *attr, char *buf)
767 struct uv_cb_query_keys uvcb = uv_query_keys();
769 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
772 static struct kobj_attribute uv_keys_host_key_attr =
773 __ATTR(host_key, 0444, uv_keys_host_key, NULL);
775 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
776 struct kobj_attribute *attr, char *buf)
778 struct uv_cb_query_keys uvcb = uv_query_keys();
780 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
783 static struct kobj_attribute uv_keys_backup_host_key_attr =
784 __ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
786 static ssize_t uv_keys_all(struct kobject *kobj,
787 struct kobj_attribute *attr, char *buf)
789 struct uv_cb_query_keys uvcb = uv_query_keys();
793 for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
794 len += emit_hash(uvcb.key_hashes + i, buf, len);
799 static struct kobj_attribute uv_keys_all_attr =
800 __ATTR(all, 0444, uv_keys_all, NULL);
802 static struct attribute_group uv_query_attr_group = {
803 .attrs = uv_query_attrs,
806 static struct attribute *uv_keys_attrs[] = {
807 &uv_keys_host_key_attr.attr,
808 &uv_keys_backup_host_key_attr.attr,
809 &uv_keys_all_attr.attr,
813 static struct attribute_group uv_keys_attr_group = {
814 .attrs = uv_keys_attrs,
817 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
818 struct kobj_attribute *attr, char *buf)
820 return sysfs_emit(buf, "%d\n", prot_virt_guest);
823 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
824 struct kobj_attribute *attr, char *buf)
826 return sysfs_emit(buf, "%d\n", prot_virt_host);
829 static struct kobj_attribute uv_prot_virt_guest =
830 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
832 static struct kobj_attribute uv_prot_virt_host =
833 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
835 static const struct attribute *uv_prot_virt_attrs[] = {
836 &uv_prot_virt_guest.attr,
837 &uv_prot_virt_host.attr,
841 static struct kset *uv_query_kset;
842 static struct kset *uv_keys_kset;
843 static struct kobject *uv_kobj;
845 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
846 struct kset **uv_dir_kset, const char *name)
851 kset = kset_create_and_add(name, NULL, uv_kobj);
856 rc = sysfs_create_group(&kset->kobj, grp);
858 kset_unregister(kset);
862 static int __init uv_sysfs_init(void)
866 if (!test_facility(158))
869 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
873 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
877 rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
881 /* Get installed key hashes if available, ignore any errors */
882 if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
883 uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
888 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
890 kobject_del(uv_kobj);
891 kobject_put(uv_kobj);
894 device_initcall(uv_sysfs_init);
897 * Find the secret with the secret_id in the provided list.
899 * Context: might sleep.
901 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
902 const struct uv_secret_list *list,
903 struct uv_secret_list_item_hdr *secret)
907 for (i = 0; i < list->total_num_secrets; i++) {
908 if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
909 *secret = list->secrets[i].hdr;
917 * Do the actual search for `uv_get_secret_metadata`.
919 * Context: might sleep.
921 static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
922 struct uv_secret_list *list,
923 struct uv_secret_list_item_hdr *secret)
930 uv_list_secrets(list, start_idx, &list_rc, NULL);
931 if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
932 if (list_rc == UVC_RC_INV_CMD)
937 ret = find_secret_in_page(secret_id, list, secret);
940 start_idx = list->next_secret_idx;
941 } while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
947 * uv_get_secret_metadata() - get secret metadata for a given secret id.
948 * @secret_id: search pattern.
949 * @secret: output data, containing the secret's metadata.
951 * Search for a secret with the given secret_id in the Ultravisor secret store.
953 * Context: might sleep.
956 * * %0: - Found entry; secret->idx and secret->type are valid.
957 * * %ENOENT - No entry found.
958 * * %ENODEV: - Not supported: UV not available or command not available.
959 * * %EIO: - Other unexpected UV error.
961 int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
962 struct uv_secret_list_item_hdr *secret)
964 struct uv_secret_list *buf;
967 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
970 rc = find_secret(secret_id, buf, secret);
974 EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
977 * uv_retrieve_secret() - get the secret value for the secret index.
978 * @secret_idx: Secret index for which the secret should be retrieved.
979 * @buf: Buffer to store retrieved secret.
980 * @buf_size: Size of the buffer. The correct buffer size is reported as part of
981 * the result from `uv_get_secret_metadata`.
983 * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
985 * Context: might sleep.
988 * * %0 - Entry found; buffer contains a valid secret.
989 * * %ENOENT: - No entry found or secret at the index is non-retrievable.
990 * * %ENODEV: - Not supported: UV not available or command not available.
991 * * %EINVAL: - Buffer too small for content.
992 * * %EIO: - Other unexpected UV error.
994 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
996 struct uv_cb_retr_secr uvcb = {
997 .header.len = sizeof(uvcb),
998 .header.cmd = UVC_CMD_RETR_SECRET,
999 .secret_idx = secret_idx,
1000 .buf_addr = (u64)buf,
1001 .buf_size = buf_size,
1004 uv_call_sched(0, (u64)&uvcb);
1006 switch (uvcb.header.rc) {
1007 case UVC_RC_EXECUTED:
1009 case UVC_RC_INV_CMD:
1011 case UVC_RC_RETR_SECR_STORE_EMPTY:
1012 case UVC_RC_RETR_SECR_INV_SECRET:
1013 case UVC_RC_RETR_SECR_INV_IDX:
1015 case UVC_RC_RETR_SECR_BUF_SMALL:
1021 EXPORT_SYMBOL_GPL(uv_retrieve_secret);