1 // SPDX-License-Identifier: GPL-2.0
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2024
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/pagewalk.h>
18 #include <asm/facility.h>
19 #include <asm/sections.h>
22 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
23 int __bootdata_preserved(prot_virt_guest);
24 EXPORT_SYMBOL(prot_virt_guest);
27 * uv_info contains both host and guest information but it's currently only
28 * expected to be used within modules if it's the KVM module or for
29 * any PV guest module.
31 * The kernel itself will write these values once in uv_query_info()
32 * and then make some of them readable via a sysfs interface.
34 struct uv_info __bootdata_preserved(uv_info);
35 EXPORT_SYMBOL(uv_info);
37 int __bootdata_preserved(prot_virt_host);
38 EXPORT_SYMBOL(prot_virt_host);
40 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
42 struct uv_cb_init uvcb = {
43 .header.cmd = UVC_CMD_INIT_UV,
44 .header.len = sizeof(uvcb),
45 .stor_origin = stor_base,
49 if (uv_call(0, (uint64_t)&uvcb)) {
50 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
51 uvcb.header.rc, uvcb.header.rrc);
57 void __init setup_uv(void)
61 if (!is_prot_virt_host())
64 uv_stor_base = memblock_alloc_try_nid(
65 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
66 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
68 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
69 uv_info.uv_base_stor_len);
73 if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
74 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
78 pr_info("Reserving %luMB as ultravisor base storage\n",
79 uv_info.uv_base_stor_len >> 20);
82 pr_info("Disabling support for protected virtualization");
87 * Requests the Ultravisor to pin the page in the shared state. This will
88 * cause an intercept when the guest attempts to unshare the pinned page.
90 int uv_pin_shared(unsigned long paddr)
92 struct uv_cb_cfs uvcb = {
93 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
94 .header.len = sizeof(uvcb),
98 if (uv_call(0, (u64)&uvcb))
102 EXPORT_SYMBOL_GPL(uv_pin_shared);
105 * Requests the Ultravisor to destroy a guest page and make it
106 * accessible to the host. The destroy clears the page instead of
109 * @paddr: Absolute host address of page to be destroyed
111 static int uv_destroy(unsigned long paddr)
113 struct uv_cb_cfs uvcb = {
114 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
115 .header.len = sizeof(uvcb),
119 if (uv_call(0, (u64)&uvcb)) {
121 * Older firmware uses 107/d as an indication of a non secure
122 * page. Let us emulate the newer variant (no-op).
124 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
132 * The caller must already hold a reference to the folio
134 int uv_destroy_folio(struct folio *folio)
138 /* See gmap_make_secure(): large folios cannot be secure */
139 if (unlikely(folio_test_large(folio)))
143 rc = uv_destroy(folio_to_phys(folio));
145 clear_bit(PG_arch_1, &folio->flags);
149 EXPORT_SYMBOL(uv_destroy_folio);
152 * The present PTE still indirectly holds a folio reference through the mapping.
154 int uv_destroy_pte(pte_t pte)
156 VM_WARN_ON(!pte_present(pte));
157 return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
161 * Requests the Ultravisor to encrypt a guest page and make it
162 * accessible to the host for paging (export).
164 * @paddr: Absolute host address of page to be exported
166 int uv_convert_from_secure(unsigned long paddr)
168 struct uv_cb_cfs uvcb = {
169 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
170 .header.len = sizeof(uvcb),
174 if (uv_call(0, (u64)&uvcb))
178 EXPORT_SYMBOL_GPL(uv_convert_from_secure);
181 * The caller must already hold a reference to the folio.
183 int uv_convert_from_secure_folio(struct folio *folio)
187 /* See gmap_make_secure(): large folios cannot be secure */
188 if (unlikely(folio_test_large(folio)))
192 rc = uv_convert_from_secure(folio_to_phys(folio));
194 clear_bit(PG_arch_1, &folio->flags);
198 EXPORT_SYMBOL_GPL(uv_convert_from_secure_folio);
201 * The present PTE still indirectly holds a folio reference through the mapping.
203 int uv_convert_from_secure_pte(pte_t pte)
205 VM_WARN_ON(!pte_present(pte));
206 return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
210 * Calculate the expected ref_count for a folio that would otherwise have no
211 * further pins. This was cribbed from similar functions in other places in
212 * the kernel, but with some slight modifications. We know that a secure
213 * folio can not be a large folio, for example.
215 static int expected_folio_refs(struct folio *folio)
219 res = folio_mapcount(folio);
220 if (folio_test_swapcache(folio)) {
222 } else if (folio_mapping(folio)) {
231 * make_folio_secure() - make a folio secure
232 * @folio: the folio to make secure
233 * @uvcb: the uvcb that describes the UVC to be used
235 * The folio @folio will be made secure if possible, @uvcb will be passed
238 * Return: 0 on success;
239 * -EBUSY if the folio is in writeback or has too many references;
240 * -E2BIG if the folio is large;
241 * -EAGAIN if the UVC needs to be attempted again;
242 * -ENXIO if the address is not mapped;
243 * -EINVAL if the UVC failed for other reasons.
245 * Context: The caller must hold exactly one extra reference on the folio
246 * (it's the same logic as split_folio())
248 int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
250 int expected, cc = 0;
252 if (folio_test_large(folio))
254 if (folio_test_writeback(folio))
256 expected = expected_folio_refs(folio) + 1;
257 if (!folio_ref_freeze(folio, expected))
259 set_bit(PG_arch_1, &folio->flags);
261 * If the UVC does not succeed or fail immediately, we don't want to
262 * loop for long, or we might get stall notifications.
263 * On the other hand, this is a complex scenario and we are holding a lot of
264 * locks, so we can't easily sleep and reschedule. We try only once,
265 * and if the UVC returned busy or partial completion, we return
266 * -EAGAIN and we let the callers deal with it.
268 cc = __uv_call(0, (u64)uvcb);
269 folio_ref_unfreeze(folio, expected);
271 * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
272 * If busy or partially completed, return -EAGAIN.
276 else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
278 return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
280 EXPORT_SYMBOL_GPL(make_folio_secure);
283 * To be called with the folio locked or with an extra reference! This will
284 * prevent gmap_make_secure from touching the folio concurrently. Having 2
285 * parallel arch_make_folio_accessible is fine, as the UV calls will become a
286 * no-op if the folio is already exported.
288 int arch_make_folio_accessible(struct folio *folio)
292 /* See gmap_make_secure(): large folios cannot be secure */
293 if (unlikely(folio_test_large(folio)))
297 * PG_arch_1 is used in 2 places:
298 * 1. for storage keys of hugetlb folios and KVM
299 * 2. As an indication that this small folio might be secure. This can
300 * overindicate, e.g. we set the bit before calling
302 * As secure pages are never large folios, both variants can co-exists.
304 if (!test_bit(PG_arch_1, &folio->flags))
307 rc = uv_pin_shared(folio_to_phys(folio));
309 clear_bit(PG_arch_1, &folio->flags);
313 rc = uv_convert_from_secure(folio_to_phys(folio));
315 clear_bit(PG_arch_1, &folio->flags);
321 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
323 static ssize_t uv_query_facilities(struct kobject *kobj,
324 struct kobj_attribute *attr, char *buf)
326 return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
327 uv_info.inst_calls_list[0],
328 uv_info.inst_calls_list[1],
329 uv_info.inst_calls_list[2],
330 uv_info.inst_calls_list[3]);
333 static struct kobj_attribute uv_query_facilities_attr =
334 __ATTR(facilities, 0444, uv_query_facilities, NULL);
336 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
337 struct kobj_attribute *attr, char *buf)
339 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
342 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
343 __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
345 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
346 struct kobj_attribute *attr, char *buf)
348 return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
351 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
352 __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
354 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
355 struct kobj_attribute *attr, char *buf)
357 return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
360 static struct kobj_attribute uv_query_dump_cpu_len_attr =
361 __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
363 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
364 struct kobj_attribute *attr, char *buf)
366 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
369 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
370 __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
372 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
373 struct kobj_attribute *attr, char *buf)
375 return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
378 static struct kobj_attribute uv_query_dump_finalize_len_attr =
379 __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
381 static ssize_t uv_query_feature_indications(struct kobject *kobj,
382 struct kobj_attribute *attr, char *buf)
384 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
387 static struct kobj_attribute uv_query_feature_indications_attr =
388 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
390 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
391 struct kobj_attribute *attr, char *buf)
393 return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
396 static struct kobj_attribute uv_query_max_guest_cpus_attr =
397 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
399 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
400 struct kobj_attribute *attr, char *buf)
402 return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
405 static struct kobj_attribute uv_query_max_guest_vms_attr =
406 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
408 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
409 struct kobj_attribute *attr, char *buf)
411 return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
414 static struct kobj_attribute uv_query_max_guest_addr_attr =
415 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
417 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
418 struct kobj_attribute *attr, char *buf)
420 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
423 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
424 __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
426 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
427 struct kobj_attribute *attr, char *buf)
429 return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
432 static struct kobj_attribute uv_query_supp_att_pflags_attr =
433 __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
435 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
436 struct kobj_attribute *attr, char *buf)
438 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
441 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
442 __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
444 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
445 struct kobj_attribute *attr, char *buf)
447 return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
450 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
451 __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
453 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
454 struct kobj_attribute *attr, char *buf)
456 return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
459 static struct kobj_attribute uv_query_supp_secret_types_attr =
460 __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
462 static ssize_t uv_query_max_secrets(struct kobject *kobj,
463 struct kobj_attribute *attr, char *buf)
465 return sysfs_emit(buf, "%d\n",
466 uv_info.max_assoc_secrets + uv_info.max_retr_secrets);
469 static struct kobj_attribute uv_query_max_secrets_attr =
470 __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
472 static ssize_t uv_query_max_retr_secrets(struct kobject *kobj,
473 struct kobj_attribute *attr, char *buf)
475 return sysfs_emit(buf, "%d\n", uv_info.max_retr_secrets);
478 static struct kobj_attribute uv_query_max_retr_secrets_attr =
479 __ATTR(max_retr_secrets, 0444, uv_query_max_retr_secrets, NULL);
481 static ssize_t uv_query_max_assoc_secrets(struct kobject *kobj,
482 struct kobj_attribute *attr,
485 return sysfs_emit(buf, "%d\n", uv_info.max_assoc_secrets);
488 static struct kobj_attribute uv_query_max_assoc_secrets_attr =
489 __ATTR(max_assoc_secrets, 0444, uv_query_max_assoc_secrets, NULL);
491 static struct attribute *uv_query_attrs[] = {
492 &uv_query_facilities_attr.attr,
493 &uv_query_feature_indications_attr.attr,
494 &uv_query_max_guest_cpus_attr.attr,
495 &uv_query_max_guest_vms_attr.attr,
496 &uv_query_max_guest_addr_attr.attr,
497 &uv_query_supp_se_hdr_ver_attr.attr,
498 &uv_query_supp_se_hdr_pcf_attr.attr,
499 &uv_query_dump_storage_state_len_attr.attr,
500 &uv_query_dump_finalize_len_attr.attr,
501 &uv_query_dump_cpu_len_attr.attr,
502 &uv_query_supp_att_req_hdr_ver_attr.attr,
503 &uv_query_supp_att_pflags_attr.attr,
504 &uv_query_supp_add_secret_req_ver_attr.attr,
505 &uv_query_supp_add_secret_pcf_attr.attr,
506 &uv_query_supp_secret_types_attr.attr,
507 &uv_query_max_secrets_attr.attr,
508 &uv_query_max_assoc_secrets_attr.attr,
509 &uv_query_max_retr_secrets_attr.attr,
513 static inline struct uv_cb_query_keys uv_query_keys(void)
515 struct uv_cb_query_keys uvcb = {
516 .header.cmd = UVC_CMD_QUERY_KEYS,
517 .header.len = sizeof(uvcb)
520 uv_call(0, (uint64_t)&uvcb);
524 static inline ssize_t emit_hash(struct uv_key_hash *hash, char *buf, int at)
526 return sysfs_emit_at(buf, at, "%016llx%016llx%016llx%016llx\n",
527 hash->dword[0], hash->dword[1], hash->dword[2], hash->dword[3]);
530 static ssize_t uv_keys_host_key(struct kobject *kobj,
531 struct kobj_attribute *attr, char *buf)
533 struct uv_cb_query_keys uvcb = uv_query_keys();
535 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_HK], buf, 0);
538 static struct kobj_attribute uv_keys_host_key_attr =
539 __ATTR(host_key, 0444, uv_keys_host_key, NULL);
541 static ssize_t uv_keys_backup_host_key(struct kobject *kobj,
542 struct kobj_attribute *attr, char *buf)
544 struct uv_cb_query_keys uvcb = uv_query_keys();
546 return emit_hash(&uvcb.key_hashes[UVC_QUERY_KEYS_IDX_BACK_HK], buf, 0);
549 static struct kobj_attribute uv_keys_backup_host_key_attr =
550 __ATTR(backup_host_key, 0444, uv_keys_backup_host_key, NULL);
552 static ssize_t uv_keys_all(struct kobject *kobj,
553 struct kobj_attribute *attr, char *buf)
555 struct uv_cb_query_keys uvcb = uv_query_keys();
559 for (i = 0; i < ARRAY_SIZE(uvcb.key_hashes); i++)
560 len += emit_hash(uvcb.key_hashes + i, buf, len);
565 static struct kobj_attribute uv_keys_all_attr =
566 __ATTR(all, 0444, uv_keys_all, NULL);
568 static struct attribute_group uv_query_attr_group = {
569 .attrs = uv_query_attrs,
572 static struct attribute *uv_keys_attrs[] = {
573 &uv_keys_host_key_attr.attr,
574 &uv_keys_backup_host_key_attr.attr,
575 &uv_keys_all_attr.attr,
579 static struct attribute_group uv_keys_attr_group = {
580 .attrs = uv_keys_attrs,
583 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
584 struct kobj_attribute *attr, char *buf)
586 return sysfs_emit(buf, "%d\n", prot_virt_guest);
589 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
590 struct kobj_attribute *attr, char *buf)
592 return sysfs_emit(buf, "%d\n", prot_virt_host);
595 static struct kobj_attribute uv_prot_virt_guest =
596 __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
598 static struct kobj_attribute uv_prot_virt_host =
599 __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
601 static const struct attribute *uv_prot_virt_attrs[] = {
602 &uv_prot_virt_guest.attr,
603 &uv_prot_virt_host.attr,
607 static struct kset *uv_query_kset;
608 static struct kset *uv_keys_kset;
609 static struct kobject *uv_kobj;
611 static int __init uv_sysfs_dir_init(const struct attribute_group *grp,
612 struct kset **uv_dir_kset, const char *name)
617 kset = kset_create_and_add(name, NULL, uv_kobj);
622 rc = sysfs_create_group(&kset->kobj, grp);
624 kset_unregister(kset);
628 static int __init uv_sysfs_init(void)
632 if (!test_facility(158))
635 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
639 rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
643 rc = uv_sysfs_dir_init(&uv_query_attr_group, &uv_query_kset, "query");
647 /* Get installed key hashes if available, ignore any errors */
648 if (test_bit_inv(BIT_UVC_CMD_QUERY_KEYS, uv_info.inst_calls_list))
649 uv_sysfs_dir_init(&uv_keys_attr_group, &uv_keys_kset, "keys");
654 sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
656 kobject_del(uv_kobj);
657 kobject_put(uv_kobj);
660 device_initcall(uv_sysfs_init);
663 * Find the secret with the secret_id in the provided list.
665 * Context: might sleep.
667 static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN],
668 const struct uv_secret_list *list,
669 struct uv_secret_list_item_hdr *secret)
673 for (i = 0; i < list->total_num_secrets; i++) {
674 if (memcmp(secret_id, list->secrets[i].id, UV_SECRET_ID_LEN) == 0) {
675 *secret = list->secrets[i].hdr;
683 * Do the actual search for `uv_get_secret_metadata`.
685 * Context: might sleep.
687 static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN],
688 struct uv_secret_list *list,
689 struct uv_secret_list_item_hdr *secret)
696 uv_list_secrets(list, start_idx, &list_rc, NULL);
697 if (list_rc != UVC_RC_EXECUTED && list_rc != UVC_RC_MORE_DATA) {
698 if (list_rc == UVC_RC_INV_CMD)
703 ret = find_secret_in_page(secret_id, list, secret);
706 start_idx = list->next_secret_idx;
707 } while (list_rc == UVC_RC_MORE_DATA && start_idx < list->next_secret_idx);
713 * uv_get_secret_metadata() - get secret metadata for a given secret id.
714 * @secret_id: search pattern.
715 * @secret: output data, containing the secret's metadata.
717 * Search for a secret with the given secret_id in the Ultravisor secret store.
719 * Context: might sleep.
722 * * %0: - Found entry; secret->idx and secret->type are valid.
723 * * %ENOENT - No entry found.
724 * * %ENODEV: - Not supported: UV not available or command not available.
725 * * %EIO: - Other unexpected UV error.
727 int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
728 struct uv_secret_list_item_hdr *secret)
730 struct uv_secret_list *buf;
733 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
736 rc = find_secret(secret_id, buf, secret);
740 EXPORT_SYMBOL_GPL(uv_get_secret_metadata);
743 * uv_retrieve_secret() - get the secret value for the secret index.
744 * @secret_idx: Secret index for which the secret should be retrieved.
745 * @buf: Buffer to store retrieved secret.
746 * @buf_size: Size of the buffer. The correct buffer size is reported as part of
747 * the result from `uv_get_secret_metadata`.
749 * Calls the Retrieve Secret UVC and translates the UV return code into an errno.
751 * Context: might sleep.
754 * * %0 - Entry found; buffer contains a valid secret.
755 * * %ENOENT: - No entry found or secret at the index is non-retrievable.
756 * * %ENODEV: - Not supported: UV not available or command not available.
757 * * %EINVAL: - Buffer too small for content.
758 * * %EIO: - Other unexpected UV error.
760 int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size)
762 struct uv_cb_retr_secr uvcb = {
763 .header.len = sizeof(uvcb),
764 .header.cmd = UVC_CMD_RETR_SECRET,
765 .secret_idx = secret_idx,
766 .buf_addr = (u64)buf,
767 .buf_size = buf_size,
770 uv_call_sched(0, (u64)&uvcb);
772 switch (uvcb.header.rc) {
773 case UVC_RC_EXECUTED:
777 case UVC_RC_RETR_SECR_STORE_EMPTY:
778 case UVC_RC_RETR_SECR_INV_SECRET:
779 case UVC_RC_RETR_SECR_INV_IDX:
781 case UVC_RC_RETR_SECR_BUF_SMALL:
787 EXPORT_SYMBOL_GPL(uv_retrieve_secret);