2 * x86_64 specific EFI support functions
3 * Based on Extensible Firmware Interface Specification version 1.0
5 * Copyright (C) 2005-2008 Intel Co.
11 * Code to convert EFI to E820 map has been implemented in elilo bootloader
12 * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
13 * is setup appropriately for EFI runtime code.
18 #include <linux/kernel.h>
19 #include <linux/init.h>
21 #include <linux/types.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/ioport.h>
25 #include <linux/module.h>
26 #include <linux/efi.h>
27 #include <linux/uaccess.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
32 #include <asm/setup.h>
35 #include <asm/pgtable.h>
36 #include <asm/tlbflush.h>
37 #include <asm/proto.h>
39 #include <asm/cacheflush.h>
40 #include <asm/fixmap.h>
41 #include <asm/realmode.h>
43 static pgd_t *save_pgd __initdata;
44 static unsigned long efi_flags __initdata;
47 * We allocate runtime services regions bottom-up, starting from -4G, i.e.
48 * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
50 static u64 efi_va = -4 * (1UL << 30);
51 #define EFI_VA_END (-68 * (1UL << 30))
54 * Scratch space used for switching the pagetable in the EFI stub
63 static void __init early_code_mapping_set_exec(int executable)
65 efi_memory_desc_t *md;
68 if (!(__supported_pte_mask & _PAGE_NX))
71 /* Make EFI service code area executable */
72 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
74 if (md->type == EFI_RUNTIME_SERVICES_CODE ||
75 md->type == EFI_BOOT_SERVICES_CODE)
76 efi_set_executable(md, executable);
80 void __init efi_call_phys_prelog(void)
82 unsigned long vaddress;
86 if (!efi_enabled(EFI_OLD_MEMMAP))
89 early_code_mapping_set_exec(1);
90 local_irq_save(efi_flags);
92 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
93 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
95 for (pgd = 0; pgd < n_pgds; pgd++) {
96 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
97 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
98 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
103 void __init efi_call_phys_epilog(void)
106 * After the lock is released, the original page table is restored.
109 int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
111 if (!efi_enabled(EFI_OLD_MEMMAP))
114 for (pgd = 0; pgd < n_pgds; pgd++)
115 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
118 local_irq_restore(efi_flags);
119 early_code_mapping_set_exec(0);
123 * Add low kernel mappings for passing arguments to EFI functions.
125 void efi_sync_low_kernel_mappings(void)
128 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
130 if (efi_enabled(EFI_OLD_MEMMAP))
133 num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
135 memcpy(pgd + pgd_index(PAGE_OFFSET),
136 init_mm.pgd + pgd_index(PAGE_OFFSET),
137 sizeof(pgd_t) * num_pgds);
140 void efi_setup_page_tables(void)
142 efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
144 if (!efi_enabled(EFI_OLD_MEMMAP))
145 efi_scratch.use_pgd = true;
148 static void __init __map_region(efi_memory_desc_t *md, u64 va)
150 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
151 unsigned long pf = 0;
153 if (!(md->attribute & EFI_MEMORY_WB))
156 if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
157 pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
161 void __init efi_map_region(efi_memory_desc_t *md)
163 unsigned long size = md->num_pages << PAGE_SHIFT;
164 u64 pa = md->phys_addr;
166 if (efi_enabled(EFI_OLD_MEMMAP))
167 return old_map_region(md);
170 * Make sure the 1:1 mappings are present as a catch-all for b0rked
171 * firmware which doesn't update all internal pointers after switching
172 * to virtual mode and would otherwise crap on us.
174 __map_region(md, md->phys_addr);
178 /* Is PA 2M-aligned? */
179 if (!(pa & (PMD_SIZE - 1))) {
182 u64 pa_offset = pa & (PMD_SIZE - 1);
183 u64 prev_va = efi_va;
185 /* get us the same offset within this 2M page */
186 efi_va = (efi_va & PMD_MASK) + pa_offset;
188 if (efi_va > prev_va)
192 if (efi_va < EFI_VA_END) {
193 pr_warn(FW_WARN "VA address range overflow!\n");
198 __map_region(md, efi_va);
199 md->virt_addr = efi_va;
203 * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
204 * md->virt_addr is the original virtual address which had been mapped in kexec
207 void __init efi_map_region_fixed(efi_memory_desc_t *md)
209 __map_region(md, md->virt_addr);
212 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
213 u32 type, u64 attribute)
215 unsigned long last_map_pfn;
217 if (type == EFI_MEMORY_MAPPED_IO)
218 return ioremap(phys_addr, size);
220 last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
221 if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
222 unsigned long top = last_map_pfn << PAGE_SHIFT;
223 efi_ioremap(top, size - (top - phys_addr), type, attribute);
226 if (!(attribute & EFI_MEMORY_WB))
227 efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
229 return (void __iomem *)__va(phys_addr);
232 void __init parse_efi_setup(u64 phys_addr, u32 data_len)
234 efi_setup = phys_addr + sizeof(struct setup_data);