1 // SPDX-License-Identifier: GPL-2.0-only
3 * efi.c - EFI subsystem
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
24 #include <linux/initrd.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 #include <linux/notifier.h>
37 #include <asm/early_ioremap.h>
39 struct efi __read_mostly efi = {
40 .runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
41 .acpi = EFI_INVALID_TABLE_ADDR,
42 .acpi20 = EFI_INVALID_TABLE_ADDR,
43 .smbios = EFI_INVALID_TABLE_ADDR,
44 .smbios3 = EFI_INVALID_TABLE_ADDR,
45 .esrt = EFI_INVALID_TABLE_ADDR,
46 .tpm_log = EFI_INVALID_TABLE_ADDR,
47 .tpm_final_log = EFI_INVALID_TABLE_ADDR,
48 #ifdef CONFIG_LOAD_UEFI_KEYS
49 .mokvar_table = EFI_INVALID_TABLE_ADDR,
51 #ifdef CONFIG_EFI_COCO_SECRET
52 .coco_secret = EFI_INVALID_TABLE_ADDR,
54 #ifdef CONFIG_UNACCEPTED_MEMORY
55 .unaccepted = EFI_INVALID_TABLE_ADDR,
60 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
61 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
62 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
63 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
65 extern unsigned long screen_info_table;
67 struct mm_struct efi_mm = {
68 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
69 .mm_users = ATOMIC_INIT(2),
70 .mm_count = ATOMIC_INIT(1),
71 .write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
72 MMAP_LOCK_INITIALIZER(efi_mm)
73 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
74 .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
75 .cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
78 struct workqueue_struct *efi_rts_wq;
80 static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
81 static int __init setup_noefi(char *arg)
83 disable_runtime = true;
86 early_param("noefi", setup_noefi);
88 bool efi_runtime_disabled(void)
90 return disable_runtime;
93 bool __pure __efi_soft_reserve_enabled(void)
95 return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
98 static int __init parse_efi_cmdline(char *str)
101 pr_warn("need at least one option\n");
105 if (parse_option_str(str, "debug"))
106 set_bit(EFI_DBG, &efi.flags);
108 if (parse_option_str(str, "noruntime"))
109 disable_runtime = true;
111 if (parse_option_str(str, "runtime"))
112 disable_runtime = false;
114 if (parse_option_str(str, "nosoftreserve"))
115 set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
119 early_param("efi", parse_efi_cmdline);
121 struct kobject *efi_kobj;
124 * Let's not leave out systab information that snuck into
126 * Note, do not add more fields in systab sysfs file as it breaks sysfs
127 * one value per file rule!
129 static ssize_t systab_show(struct kobject *kobj,
130 struct kobj_attribute *attr, char *buf)
137 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
138 str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
139 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
140 str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
142 * If both SMBIOS and SMBIOS3 entry points are implemented, the
143 * SMBIOS3 entry point shall be preferred, so we list it first to
144 * let applications stop parsing after the first match.
146 if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
147 str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
148 if (efi.smbios != EFI_INVALID_TABLE_ADDR)
149 str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
151 if (IS_ENABLED(CONFIG_X86))
152 str = efi_systab_show_arch(str);
157 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
159 static ssize_t fw_platform_size_show(struct kobject *kobj,
160 struct kobj_attribute *attr, char *buf)
162 return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
165 extern __weak struct kobj_attribute efi_attr_fw_vendor;
166 extern __weak struct kobj_attribute efi_attr_runtime;
167 extern __weak struct kobj_attribute efi_attr_config_table;
168 static struct kobj_attribute efi_attr_fw_platform_size =
169 __ATTR_RO(fw_platform_size);
171 static struct attribute *efi_subsys_attrs[] = {
172 &efi_attr_systab.attr,
173 &efi_attr_fw_platform_size.attr,
174 &efi_attr_fw_vendor.attr,
175 &efi_attr_runtime.attr,
176 &efi_attr_config_table.attr,
180 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
186 static const struct attribute_group efi_subsys_attr_group = {
187 .attrs = efi_subsys_attrs,
188 .is_visible = efi_attr_is_visible,
191 struct blocking_notifier_head efivar_ops_nh;
192 EXPORT_SYMBOL_GPL(efivar_ops_nh);
194 static struct efivars generic_efivars;
195 static struct efivar_operations generic_ops;
197 static bool generic_ops_supported(void)
199 unsigned long name_size;
204 name_size = sizeof(name);
206 if (!efi.get_next_variable)
208 status = efi.get_next_variable(&name_size, &name, &guid);
209 if (status == EFI_UNSUPPORTED)
215 static int generic_ops_register(void)
217 if (!generic_ops_supported())
220 generic_ops.get_variable = efi.get_variable;
221 generic_ops.get_next_variable = efi.get_next_variable;
222 generic_ops.query_variable_store = efi_query_variable_store;
223 generic_ops.query_variable_info = efi.query_variable_info;
225 if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
226 generic_ops.set_variable = efi.set_variable;
227 generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
229 return efivars_register(&generic_efivars, &generic_ops);
232 static void generic_ops_unregister(void)
234 if (!generic_ops.get_variable)
237 efivars_unregister(&generic_efivars);
240 void efivars_generic_ops_register(void)
242 generic_ops_register();
244 EXPORT_SYMBOL_GPL(efivars_generic_ops_register);
246 void efivars_generic_ops_unregister(void)
248 generic_ops_unregister();
250 EXPORT_SYMBOL_GPL(efivars_generic_ops_unregister);
252 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
253 #define EFIVAR_SSDT_NAME_MAX 16UL
254 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
255 static int __init efivar_ssdt_setup(char *str)
257 int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
262 if (strlen(str) < sizeof(efivar_ssdt))
263 memcpy(efivar_ssdt, str, strlen(str));
265 pr_warn("efivar_ssdt: name too long: %s\n", str);
268 __setup("efivar_ssdt=", efivar_ssdt_setup);
270 static __init int efivar_ssdt_load(void)
272 unsigned long name_size = 256;
273 efi_char16_t *name = NULL;
281 name = kzalloc(name_size, GFP_KERNEL);
286 char utf8_name[EFIVAR_SSDT_NAME_MAX];
287 unsigned long data_size = 0;
291 status = efi.get_next_variable(&name_size, name, &guid);
292 if (status == EFI_NOT_FOUND) {
294 } else if (status == EFI_BUFFER_TOO_SMALL) {
295 efi_char16_t *name_tmp =
296 krealloc(name, name_size, GFP_KERNEL);
305 limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
306 ucs2_as_utf8(utf8_name, name, limit - 1);
307 if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
310 pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
312 status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
313 if (status != EFI_BUFFER_TOO_SMALL || !data_size) {
318 data = kmalloc(data_size, GFP_KERNEL);
324 status = efi.get_variable(name, &guid, NULL, &data_size, data);
325 if (status == EFI_SUCCESS) {
326 acpi_status acpi_ret = acpi_load_table(data, NULL);
327 if (ACPI_FAILURE(acpi_ret)) {
328 pr_err("efivar_ssdt: failed to load table: %u\n",
332 * The @data will be in use by ACPI engine,
338 pr_err("efivar_ssdt: failed to get var data: 0x%lx\n", status);
347 static inline int efivar_ssdt_load(void) { return 0; }
350 #ifdef CONFIG_DEBUG_FS
352 #define EFI_DEBUGFS_MAX_BLOBS 32
354 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
356 static void __init efi_debugfs_init(void)
358 struct dentry *efi_debugfs;
359 efi_memory_desc_t *md;
361 int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
364 efi_debugfs = debugfs_create_dir("efi", NULL);
365 if (IS_ERR(efi_debugfs))
368 for_each_efi_memory_desc(md) {
370 case EFI_BOOT_SERVICES_CODE:
371 snprintf(name, sizeof(name), "boot_services_code%d",
372 type_count[md->type]++);
374 case EFI_BOOT_SERVICES_DATA:
375 snprintf(name, sizeof(name), "boot_services_data%d",
376 type_count[md->type]++);
382 if (i >= EFI_DEBUGFS_MAX_BLOBS) {
383 pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
384 EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
388 debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
389 debugfs_blob[i].data = memremap(md->phys_addr,
390 debugfs_blob[i].size,
392 if (!debugfs_blob[i].data)
395 debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
400 static inline void efi_debugfs_init(void) {}
404 * We register the efi subsystem with the firmware subsystem and the
405 * efivars subsystem with the efi subsystem, if the system was booted with
408 static int __init efisubsys_init(void)
412 if (!efi_enabled(EFI_RUNTIME_SERVICES))
413 efi.runtime_supported_mask = 0;
415 if (!efi_enabled(EFI_BOOT))
418 if (efi.runtime_supported_mask) {
420 * Since we process only one efi_runtime_service() at a time, an
421 * ordered workqueue (which creates only one execution context)
422 * should suffice for all our needs.
424 efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
426 pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
427 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
428 efi.runtime_supported_mask = 0;
433 if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
434 platform_device_register_simple("rtc-efi", 0, NULL, 0);
436 /* We register the efi directory at /sys/firmware/efi */
437 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
439 pr_err("efi: Firmware registration failed.\n");
444 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
445 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
446 error = generic_ops_register();
449 error = efivar_ssdt_load();
451 pr_err("efi: failed to load SSDT, error %d.\n", error);
452 platform_device_register_simple("efivars", 0, NULL, 0);
455 BLOCKING_INIT_NOTIFIER_HEAD(&efivar_ops_nh);
457 error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
459 pr_err("efi: Sysfs attribute export failed with error %d.\n",
464 /* and the standard mountpoint for efivarfs */
465 error = sysfs_create_mount_point(efi_kobj, "efivars");
467 pr_err("efivars: Subsystem registration failed.\n");
468 goto err_remove_group;
471 if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
474 #ifdef CONFIG_EFI_COCO_SECRET
475 if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
476 platform_device_register_simple("efi_secret", 0, NULL, 0);
482 sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
484 if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
485 EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
486 generic_ops_unregister();
488 kobject_put(efi_kobj);
492 destroy_workqueue(efi_rts_wq);
497 subsys_initcall(efisubsys_init);
499 void __init efi_find_mirror(void)
501 efi_memory_desc_t *md;
502 u64 mirror_size = 0, total_size = 0;
504 if (!efi_enabled(EFI_MEMMAP))
507 for_each_efi_memory_desc(md) {
508 unsigned long long start = md->phys_addr;
509 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
512 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
513 memblock_mark_mirror(start, size);
518 pr_info("Memory: %lldM/%lldM mirrored memory\n",
519 mirror_size>>20, total_size>>20);
523 * Find the efi memory descriptor for a given physical address. Given a
524 * physical address, determine if it exists within an EFI Memory Map entry,
525 * and if so, populate the supplied memory descriptor with the appropriate
528 int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
530 efi_memory_desc_t *md;
532 if (!efi_enabled(EFI_MEMMAP)) {
533 pr_err_once("EFI_MEMMAP is not enabled.\n");
538 pr_err_once("out_md is null.\n");
542 for_each_efi_memory_desc(md) {
546 /* skip bogus entries (including empty ones) */
547 if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
548 (md->num_pages <= 0) ||
549 (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
552 size = md->num_pages << EFI_PAGE_SHIFT;
553 end = md->phys_addr + size;
554 if (phys_addr >= md->phys_addr && phys_addr < end) {
555 memcpy(out_md, md, sizeof(*out_md));
562 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
563 __weak __alias(__efi_mem_desc_lookup);
566 * Calculate the highest address of an efi memory descriptor.
568 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
570 u64 size = md->num_pages << EFI_PAGE_SHIFT;
571 u64 end = md->phys_addr + size;
575 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
578 * efi_mem_reserve - Reserve an EFI memory region
579 * @addr: Physical address to reserve
580 * @size: Size of reservation
582 * Mark a region as reserved from general kernel allocation and
583 * prevent it being released by efi_free_boot_services().
585 * This function should be called drivers once they've parsed EFI
586 * configuration tables to figure out where their data lives, e.g.
589 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
591 /* efi_mem_reserve() does not work under Xen */
592 if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
595 if (!memblock_is_region_reserved(addr, size))
596 memblock_reserve(addr, size);
599 * Some architectures (x86) reserve all boot services ranges
600 * until efi_free_boot_services() because of buggy firmware
601 * implementations. This means the above memblock_reserve() is
602 * superfluous on x86 and instead what it needs to do is
603 * ensure the @start, @size is not freed.
605 efi_arch_mem_reserve(addr, size);
608 static const efi_config_table_type_t common_tables[] __initconst = {
609 {ACPI_20_TABLE_GUID, &efi.acpi20, "ACPI 2.0" },
610 {ACPI_TABLE_GUID, &efi.acpi, "ACPI" },
611 {SMBIOS_TABLE_GUID, &efi.smbios, "SMBIOS" },
612 {SMBIOS3_TABLE_GUID, &efi.smbios3, "SMBIOS 3.0" },
613 {EFI_SYSTEM_RESOURCE_TABLE_GUID, &efi.esrt, "ESRT" },
614 {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, &efi_mem_attr_table, "MEMATTR" },
615 {LINUX_EFI_RANDOM_SEED_TABLE_GUID, &efi_rng_seed, "RNG" },
616 {LINUX_EFI_TPM_EVENT_LOG_GUID, &efi.tpm_log, "TPMEventLog" },
617 {EFI_TCG2_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "TPMFinalLog" },
618 {EFI_CC_FINAL_EVENTS_TABLE_GUID, &efi.tpm_final_log, "CCFinalLog" },
619 {LINUX_EFI_MEMRESERVE_TABLE_GUID, &mem_reserve, "MEMRESERVE" },
620 {LINUX_EFI_INITRD_MEDIA_GUID, &initrd, "INITRD" },
621 {EFI_RT_PROPERTIES_TABLE_GUID, &rt_prop, "RTPROP" },
622 #ifdef CONFIG_EFI_RCI2_TABLE
623 {DELLEMC_EFI_RCI2_TABLE_GUID, &rci2_table_phys },
625 #ifdef CONFIG_LOAD_UEFI_KEYS
626 {LINUX_EFI_MOK_VARIABLE_TABLE_GUID, &efi.mokvar_table, "MOKvar" },
628 #ifdef CONFIG_EFI_COCO_SECRET
629 {LINUX_EFI_COCO_SECRET_AREA_GUID, &efi.coco_secret, "CocoSecret" },
631 #ifdef CONFIG_UNACCEPTED_MEMORY
632 {LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID, &efi.unaccepted, "Unaccepted" },
634 #ifdef CONFIG_EFI_GENERIC_STUB
635 {LINUX_EFI_SCREEN_INFO_TABLE_GUID, &screen_info_table },
640 static __init int match_config_table(const efi_guid_t *guid,
642 const efi_config_table_type_t *table_types)
646 for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
647 if (efi_guidcmp(*guid, table_types[i].guid))
650 if (!efi_config_table_is_usable(guid, table)) {
651 if (table_types[i].name[0])
652 pr_cont("(%s=0x%lx unusable) ",
653 table_types[i].name, table);
657 *(table_types[i].ptr) = table;
658 if (table_types[i].name[0])
659 pr_cont("%s=0x%lx ", table_types[i].name, table);
667 * reserve_unaccepted - Map and reserve unaccepted configuration table
668 * @unaccepted: Pointer to unaccepted memory table
670 * memblock_add() makes sure that the table is mapped in direct mapping. During
671 * normal boot it happens automatically because the table is allocated from
672 * usable memory. But during crashkernel boot only memory specifically reserved
673 * for crash scenario is mapped. memblock_add() forces the table to be mapped
674 * in crashkernel case.
676 * Align the range to the nearest page borders. Ranges smaller than page size
677 * are not going to be mapped.
679 * memblock_reserve() makes sure that future allocations will not touch the
683 static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
685 phys_addr_t start, size;
687 start = PAGE_ALIGN_DOWN(efi.unaccepted);
688 size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
690 memblock_add(start, size);
691 memblock_reserve(start, size);
694 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
696 const efi_config_table_type_t *arch_tables)
698 const efi_config_table_64_t *tbl64 = (void *)config_tables;
699 const efi_config_table_32_t *tbl32 = (void *)config_tables;
700 const efi_guid_t *guid;
705 for (i = 0; i < count; i++) {
706 if (!IS_ENABLED(CONFIG_X86)) {
707 guid = &config_tables[i].guid;
708 table = (unsigned long)config_tables[i].table;
709 } else if (efi_enabled(EFI_64BIT)) {
710 guid = &tbl64[i].guid;
711 table = tbl64[i].table;
713 if (IS_ENABLED(CONFIG_X86_32) &&
714 tbl64[i].table > U32_MAX) {
716 pr_err("Table located above 4GB, disabling EFI.\n");
720 guid = &tbl32[i].guid;
721 table = tbl32[i].table;
724 if (!match_config_table(guid, table, common_tables) && arch_tables)
725 match_config_table(guid, table, arch_tables);
728 set_bit(EFI_CONFIG_TABLES, &efi.flags);
730 if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
731 struct linux_efi_random_seed *seed;
734 seed = early_memremap(efi_rng_seed, sizeof(*seed));
736 size = min_t(u32, seed->size, SZ_1K); // sanity check
737 early_memunmap(seed, sizeof(*seed));
739 pr_err("Could not map UEFI random seed!\n");
742 seed = early_memremap(efi_rng_seed,
743 sizeof(*seed) + size);
745 add_bootloader_randomness(seed->bits, size);
746 memzero_explicit(seed->bits, size);
747 early_memunmap(seed, sizeof(*seed) + size);
749 pr_err("Could not map UEFI random seed!\n");
754 if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
757 efi_tpm_eventlog_init();
759 if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
760 unsigned long prsv = mem_reserve;
763 struct linux_efi_memreserve *rsv;
767 * Just map a full page: that is what we will get
768 * anyway, and it permits us to map the entire entry
769 * before knowing its size.
771 p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
774 pr_err("Could not map UEFI memreserve entry!\n");
778 rsv = (void *)(p + prsv % PAGE_SIZE);
780 /* reserve the entry itself */
781 memblock_reserve(prsv,
782 struct_size(rsv, entry, rsv->size));
784 for (i = 0; i < atomic_read(&rsv->count); i++) {
785 memblock_reserve(rsv->entry[i].base,
790 early_memunmap(p, PAGE_SIZE);
794 if (rt_prop != EFI_INVALID_TABLE_ADDR) {
795 efi_rt_properties_table_t *tbl;
797 tbl = early_memremap(rt_prop, sizeof(*tbl));
799 efi.runtime_supported_mask &= tbl->runtime_services_supported;
800 early_memunmap(tbl, sizeof(*tbl));
804 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
805 initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
806 struct linux_efi_initrd *tbl;
808 tbl = early_memremap(initrd, sizeof(*tbl));
810 phys_initrd_start = tbl->base;
811 phys_initrd_size = tbl->size;
812 early_memunmap(tbl, sizeof(*tbl));
816 if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
817 efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
818 struct efi_unaccepted_memory *unaccepted;
820 unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
823 if (unaccepted->version == 1) {
824 reserve_unaccepted(unaccepted);
826 efi.unaccepted = EFI_INVALID_TABLE_ADDR;
829 early_memunmap(unaccepted, sizeof(*unaccepted));
836 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
838 if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
839 pr_err("System table signature incorrect!\n");
846 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
849 const efi_char16_t *ret;
851 ret = early_memremap_ro(fw_vendor, size);
853 pr_err("Could not map the firmware vendor!\n");
857 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
859 early_memunmap((void *)fw_vendor, size);
862 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
863 unsigned long fw_vendor)
865 char vendor[100] = "unknown";
866 const efi_char16_t *c16;
870 c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
872 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
876 unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
879 rev = (u16)systab_hdr->revision;
880 pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
886 pr_cont(" by %s\n", vendor);
888 if (IS_ENABLED(CONFIG_X86_64) &&
889 systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
890 !strcmp(vendor, "Apple")) {
891 pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
892 efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
896 static __initdata char memory_type_name[][13] = {
915 char * __init efi_md_typeattr_format(char *buf, size_t size,
916 const efi_memory_desc_t *md)
923 if (md->type >= ARRAY_SIZE(memory_type_name))
924 type_len = snprintf(pos, size, "[type=%u", md->type);
926 type_len = snprintf(pos, size, "[%-*s",
927 (int)(sizeof(memory_type_name[0]) - 1),
928 memory_type_name[md->type]);
929 if (type_len >= size)
935 attr = md->attribute;
936 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
937 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
938 EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
939 EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
940 EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
941 snprintf(pos, size, "|attr=0x%016llx]",
942 (unsigned long long)attr);
945 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
946 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
947 attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
948 attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
949 attr & EFI_MEMORY_SP ? "SP" : "",
950 attr & EFI_MEMORY_NV ? "NV" : "",
951 attr & EFI_MEMORY_XP ? "XP" : "",
952 attr & EFI_MEMORY_RP ? "RP" : "",
953 attr & EFI_MEMORY_WP ? "WP" : "",
954 attr & EFI_MEMORY_RO ? "RO" : "",
955 attr & EFI_MEMORY_UCE ? "UCE" : "",
956 attr & EFI_MEMORY_WB ? "WB" : "",
957 attr & EFI_MEMORY_WT ? "WT" : "",
958 attr & EFI_MEMORY_WC ? "WC" : "",
959 attr & EFI_MEMORY_UC ? "UC" : "");
964 * efi_mem_attributes - lookup memmap attributes for physical address
965 * @phys_addr: the physical address to lookup
967 * Search in the EFI memory map for the region covering
968 * @phys_addr. Returns the EFI memory attributes if the region
969 * was found in the memory map, 0 otherwise.
971 u64 efi_mem_attributes(unsigned long phys_addr)
973 efi_memory_desc_t *md;
975 if (!efi_enabled(EFI_MEMMAP))
978 for_each_efi_memory_desc(md) {
979 if ((md->phys_addr <= phys_addr) &&
980 (phys_addr < (md->phys_addr +
981 (md->num_pages << EFI_PAGE_SHIFT))))
982 return md->attribute;
988 * efi_mem_type - lookup memmap type for physical address
989 * @phys_addr: the physical address to lookup
991 * Search in the EFI memory map for the region covering @phys_addr.
992 * Returns the EFI memory type if the region was found in the memory
993 * map, -EINVAL otherwise.
995 int efi_mem_type(unsigned long phys_addr)
997 const efi_memory_desc_t *md;
999 if (!efi_enabled(EFI_MEMMAP))
1002 for_each_efi_memory_desc(md) {
1003 if ((md->phys_addr <= phys_addr) &&
1004 (phys_addr < (md->phys_addr +
1005 (md->num_pages << EFI_PAGE_SHIFT))))
1011 int efi_status_to_err(efi_status_t status)
1019 case EFI_INVALID_PARAMETER:
1022 case EFI_OUT_OF_RESOURCES:
1025 case EFI_DEVICE_ERROR:
1028 case EFI_WRITE_PROTECTED:
1031 case EFI_SECURITY_VIOLATION:
1046 EXPORT_SYMBOL_GPL(efi_status_to_err);
1048 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1049 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1051 static int __init efi_memreserve_map_root(void)
1053 if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1056 efi_memreserve_root = memremap(mem_reserve,
1057 sizeof(*efi_memreserve_root),
1059 if (WARN_ON_ONCE(!efi_memreserve_root))
1064 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1066 struct resource *res, *parent;
1069 res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1073 res->name = "reserved";
1074 res->flags = IORESOURCE_MEM;
1076 res->end = addr + size - 1;
1078 /* we expect a conflict with a 'System RAM' region */
1079 parent = request_resource_conflict(&iomem_resource, res);
1080 ret = parent ? request_resource(parent, res) : 0;
1083 * Given that efi_mem_reserve_iomem() can be called at any
1084 * time, only call memblock_reserve() if the architecture
1085 * keeps the infrastructure around.
1087 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1088 memblock_reserve(addr, size);
1093 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1095 struct linux_efi_memreserve *rsv;
1099 if (efi_memreserve_root == (void *)ULONG_MAX)
1102 if (!efi_memreserve_root) {
1103 rc = efi_memreserve_map_root();
1108 /* first try to find a slot in an existing linked list entry */
1109 for (prsv = efi_memreserve_root->next; prsv; ) {
1110 rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1113 index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1114 if (index < rsv->size) {
1115 rsv->entry[index].base = addr;
1116 rsv->entry[index].size = size;
1119 return efi_mem_reserve_iomem(addr, size);
1125 /* no slot found - allocate a new linked list entry */
1126 rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1130 rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1132 free_page((unsigned long)rsv);
1137 * The memremap() call above assumes that a linux_efi_memreserve entry
1138 * never crosses a page boundary, so let's ensure that this remains true
1139 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1140 * using SZ_4K explicitly in the size calculation below.
1142 rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1143 atomic_set(&rsv->count, 1);
1144 rsv->entry[0].base = addr;
1145 rsv->entry[0].size = size;
1147 spin_lock(&efi_mem_reserve_persistent_lock);
1148 rsv->next = efi_memreserve_root->next;
1149 efi_memreserve_root->next = __pa(rsv);
1150 spin_unlock(&efi_mem_reserve_persistent_lock);
1152 return efi_mem_reserve_iomem(addr, size);
1155 static int __init efi_memreserve_root_init(void)
1157 if (efi_memreserve_root)
1159 if (efi_memreserve_map_root())
1160 efi_memreserve_root = (void *)ULONG_MAX;
1163 early_initcall(efi_memreserve_root_init);
1166 static int update_efi_random_seed(struct notifier_block *nb,
1167 unsigned long code, void *unused)
1169 struct linux_efi_random_seed *seed;
1172 if (!kexec_in_progress)
1175 seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1177 size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1180 pr_err("Could not map UEFI random seed!\n");
1183 seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1187 get_random_bytes(seed->bits, seed->size);
1190 pr_err("Could not map UEFI random seed!\n");
1196 static struct notifier_block efi_random_seed_nb = {
1197 .notifier_call = update_efi_random_seed,
1200 static int __init register_update_efi_random_seed(void)
1202 if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1204 return register_reboot_notifier(&efi_random_seed_nb);
1206 late_initcall(register_update_efi_random_seed);