2 * Extensible Firmware Interface
4 * Based on Extensible Firmware Interface Specification version 0.9
7 * Copyright (C) 1999 VA Linux Systems
9 * Copyright (C) 1999-2003 Hewlett-Packard Co.
12 * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
15 * All EFI Runtime Services are not implemented yet as EFI only
16 * supports physical mode addressing on SoftSDV. This is to be fixed
17 * in a future version. --drummond 1999-07-20
19 * Implemented EFI runtime services and virtual mode calls. --davidm
22 * Skip non-WB memory and ignore empty memory ranges.
24 #include <linux/module.h>
25 #include <linux/bootmem.h>
26 #include <linux/crash_dump.h>
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/types.h>
30 #include <linux/slab.h>
31 #include <linux/time.h>
32 #include <linux/efi.h>
33 #include <linux/kexec.h>
37 #include <asm/kregs.h>
38 #include <asm/meminit.h>
39 #include <asm/pgtable.h>
40 #include <asm/processor.h>
42 #include <asm/setup.h>
43 #include <asm/tlbflush.h>
47 static __initdata unsigned long palo_phys;
49 static __initdata efi_config_table_type_t arch_tables[] = {
50 {PROCESSOR_ABSTRACTION_LAYER_OVERWRITE_GUID, "PALO", &palo_phys},
54 extern efi_status_t efi_call_phys (void *, ...);
56 static efi_runtime_services_t *runtime;
57 static u64 mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
59 #define efi_call_virt(f, args...) (*(f))(args)
61 #define STUB_GET_TIME(prefix, adjust_arg) \
63 prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
65 struct ia64_fpreg fr[6]; \
66 efi_time_cap_t *atc = NULL; \
70 atc = adjust_arg(tc); \
71 ia64_save_scratch_fpregs(fr); \
72 ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \
73 adjust_arg(tm), atc); \
74 ia64_load_scratch_fpregs(fr); \
78 #define STUB_SET_TIME(prefix, adjust_arg) \
80 prefix##_set_time (efi_time_t *tm) \
82 struct ia64_fpreg fr[6]; \
85 ia64_save_scratch_fpregs(fr); \
86 ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \
88 ia64_load_scratch_fpregs(fr); \
92 #define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
94 prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \
97 struct ia64_fpreg fr[6]; \
100 ia64_save_scratch_fpregs(fr); \
101 ret = efi_call_##prefix( \
102 (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
103 adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
104 ia64_load_scratch_fpregs(fr); \
108 #define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
109 static efi_status_t \
110 prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
112 struct ia64_fpreg fr[6]; \
113 efi_time_t *atm = NULL; \
117 atm = adjust_arg(tm); \
118 ia64_save_scratch_fpregs(fr); \
119 ret = efi_call_##prefix( \
120 (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
122 ia64_load_scratch_fpregs(fr); \
126 #define STUB_GET_VARIABLE(prefix, adjust_arg) \
127 static efi_status_t \
128 prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
129 unsigned long *data_size, void *data) \
131 struct ia64_fpreg fr[6]; \
136 aattr = adjust_arg(attr); \
137 ia64_save_scratch_fpregs(fr); \
138 ret = efi_call_##prefix( \
139 (efi_get_variable_t *) __va(runtime->get_variable), \
140 adjust_arg(name), adjust_arg(vendor), aattr, \
141 adjust_arg(data_size), adjust_arg(data)); \
142 ia64_load_scratch_fpregs(fr); \
146 #define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
147 static efi_status_t \
148 prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
149 efi_guid_t *vendor) \
151 struct ia64_fpreg fr[6]; \
154 ia64_save_scratch_fpregs(fr); \
155 ret = efi_call_##prefix( \
156 (efi_get_next_variable_t *) __va(runtime->get_next_variable), \
157 adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
158 ia64_load_scratch_fpregs(fr); \
162 #define STUB_SET_VARIABLE(prefix, adjust_arg) \
163 static efi_status_t \
164 prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
165 u32 attr, unsigned long data_size, \
168 struct ia64_fpreg fr[6]; \
171 ia64_save_scratch_fpregs(fr); \
172 ret = efi_call_##prefix( \
173 (efi_set_variable_t *) __va(runtime->set_variable), \
174 adjust_arg(name), adjust_arg(vendor), attr, data_size, \
176 ia64_load_scratch_fpregs(fr); \
180 #define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
181 static efi_status_t \
182 prefix##_get_next_high_mono_count (u32 *count) \
184 struct ia64_fpreg fr[6]; \
187 ia64_save_scratch_fpregs(fr); \
188 ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
189 __va(runtime->get_next_high_mono_count), \
190 adjust_arg(count)); \
191 ia64_load_scratch_fpregs(fr); \
195 #define STUB_RESET_SYSTEM(prefix, adjust_arg) \
197 prefix##_reset_system (int reset_type, efi_status_t status, \
198 unsigned long data_size, efi_char16_t *data) \
200 struct ia64_fpreg fr[6]; \
201 efi_char16_t *adata = NULL; \
204 adata = adjust_arg(data); \
206 ia64_save_scratch_fpregs(fr); \
208 (efi_reset_system_t *) __va(runtime->reset_system), \
209 reset_type, status, data_size, adata); \
210 /* should not return, but just in case... */ \
211 ia64_load_scratch_fpregs(fr); \
214 #define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
216 STUB_GET_TIME(phys, phys_ptr)
217 STUB_SET_TIME(phys, phys_ptr)
218 STUB_GET_WAKEUP_TIME(phys, phys_ptr)
219 STUB_SET_WAKEUP_TIME(phys, phys_ptr)
220 STUB_GET_VARIABLE(phys, phys_ptr)
221 STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
222 STUB_SET_VARIABLE(phys, phys_ptr)
223 STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
224 STUB_RESET_SYSTEM(phys, phys_ptr)
228 STUB_GET_TIME(virt, id)
229 STUB_SET_TIME(virt, id)
230 STUB_GET_WAKEUP_TIME(virt, id)
231 STUB_SET_WAKEUP_TIME(virt, id)
232 STUB_GET_VARIABLE(virt, id)
233 STUB_GET_NEXT_VARIABLE(virt, id)
234 STUB_SET_VARIABLE(virt, id)
235 STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
236 STUB_RESET_SYSTEM(virt, id)
239 efi_gettimeofday (struct timespec *ts)
243 if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS) {
244 memset(ts, 0, sizeof(*ts));
248 ts->tv_sec = mktime(tm.year, tm.month, tm.day,
249 tm.hour, tm.minute, tm.second);
250 ts->tv_nsec = tm.nanosecond;
254 is_memory_available (efi_memory_desc_t *md)
256 if (!(md->attribute & EFI_MEMORY_WB))
260 case EFI_LOADER_CODE:
261 case EFI_LOADER_DATA:
262 case EFI_BOOT_SERVICES_CODE:
263 case EFI_BOOT_SERVICES_DATA:
264 case EFI_CONVENTIONAL_MEMORY:
270 typedef struct kern_memdesc {
276 static kern_memdesc_t *kern_memmap;
278 #define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
281 kmd_end(kern_memdesc_t *kmd)
283 return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
287 efi_md_end(efi_memory_desc_t *md)
289 return (md->phys_addr + efi_md_size(md));
293 efi_wb(efi_memory_desc_t *md)
295 return (md->attribute & EFI_MEMORY_WB);
299 efi_uc(efi_memory_desc_t *md)
301 return (md->attribute & EFI_MEMORY_UC);
305 walk (efi_freemem_callback_t callback, void *arg, u64 attr)
308 u64 start, end, voff;
310 voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
311 for (k = kern_memmap; k->start != ~0UL; k++) {
312 if (k->attribute != attr)
314 start = PAGE_ALIGN(k->start);
315 end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
317 if ((*callback)(start + voff, end + voff, arg) < 0)
323 * Walk the EFI memory map and call CALLBACK once for each EFI memory
324 * descriptor that has memory that is available for OS use.
327 efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
329 walk(callback, arg, EFI_MEMORY_WB);
333 * Walk the EFI memory map and call CALLBACK once for each EFI memory
334 * descriptor that has memory that is available for uncached allocator.
337 efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
339 walk(callback, arg, EFI_MEMORY_UC);
343 * Look for the PAL_CODE region reported by EFI and map it using an
344 * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
345 * Abstraction Layer chapter 11 in ADAG
348 efi_get_pal_addr (void)
350 void *efi_map_start, *efi_map_end, *p;
351 efi_memory_desc_t *md;
353 int pal_code_count = 0;
356 efi_map_start = __va(ia64_boot_param->efi_memmap);
357 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
358 efi_desc_size = ia64_boot_param->efi_memdesc_size;
360 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
362 if (md->type != EFI_PAL_CODE)
365 if (++pal_code_count > 1) {
366 printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
367 "dropped @ %llx\n", md->phys_addr);
371 * The only ITLB entry in region 7 that is used is the one
372 * installed by __start(). That entry covers a 64MB range.
374 mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
375 vaddr = PAGE_OFFSET + md->phys_addr;
378 * We must check that the PAL mapping won't overlap with the
381 * PAL code is guaranteed to be aligned on a power of 2 between
382 * 4k and 256KB and that only one ITR is needed to map it. This
383 * implies that the PAL code is always aligned on its size,
384 * i.e., the closest matching page size supported by the TLB.
385 * Therefore PAL code is guaranteed never to cross a 64MB unless
386 * it is bigger than 64MB (very unlikely!). So for now the
387 * following test is enough to determine whether or not we need
388 * a dedicated ITR for the PAL code.
390 if ((vaddr & mask) == (KERNEL_START & mask)) {
391 printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
396 if (efi_md_size(md) > IA64_GRANULE_SIZE)
397 panic("Whoa! PAL code size bigger than a granule!");
400 mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
402 printk(KERN_INFO "CPU %d: mapping PAL code "
403 "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
404 smp_processor_id(), md->phys_addr,
405 md->phys_addr + efi_md_size(md),
406 vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
408 return __va(md->phys_addr);
410 printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
416 static u8 __init palo_checksum(u8 *buffer, u32 length)
419 u8 *end = buffer + length;
422 sum = (u8) (sum + *(buffer++));
428 * Parse and handle PALO table which is published at:
429 * http://www.dig64.org/home/DIG64_PALO_R1_0.pdf
431 static void __init handle_palo(unsigned long phys_addr)
433 struct palo_table *palo = __va(phys_addr);
436 if (strncmp(palo->signature, PALO_SIG, sizeof(PALO_SIG) - 1)) {
437 printk(KERN_INFO "PALO signature incorrect.\n");
441 checksum = palo_checksum((u8 *)palo, palo->length);
443 printk(KERN_INFO "PALO checksum incorrect.\n");
447 setup_ptcg_sem(palo->max_tlb_purges, NPTCG_FROM_PALO);
451 efi_map_pal_code (void)
453 void *pal_vaddr = efi_get_pal_addr ();
460 * Cannot write to CRx with PSR.ic=1
462 psr = ia64_clear_ic();
463 ia64_itr(0x1, IA64_TR_PALCODE,
464 GRANULEROUNDDOWN((unsigned long) pal_vaddr),
465 pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
467 paravirt_dv_serialize_data();
468 ia64_set_psr(psr); /* restore psr */
474 void *efi_map_start, *efi_map_end;
477 char *cp, vendor[100] = "unknown";
480 set_bit(EFI_BOOT, &efi.flags);
481 set_bit(EFI_64BIT, &efi.flags);
484 * It's too early to be able to use the standard kernel command line
487 for (cp = boot_command_line; *cp; ) {
488 if (memcmp(cp, "mem=", 4) == 0) {
489 mem_limit = memparse(cp + 4, &cp);
490 } else if (memcmp(cp, "max_addr=", 9) == 0) {
491 max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
492 } else if (memcmp(cp, "min_addr=", 9) == 0) {
493 min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
495 while (*cp != ' ' && *cp)
502 printk(KERN_INFO "Ignoring memory below %lluMB\n",
504 if (max_addr != ~0UL)
505 printk(KERN_INFO "Ignoring memory above %lluMB\n",
508 efi.systab = __va(ia64_boot_param->efi_systab);
511 * Verify the EFI Table
513 if (efi.systab == NULL)
514 panic("Whoa! Can't find EFI system table.\n");
515 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
516 panic("Whoa! EFI system table signature incorrect\n");
517 if ((efi.systab->hdr.revision >> 16) == 0)
518 printk(KERN_WARNING "Warning: EFI system table version "
519 "%d.%02d, expected 1.00 or greater\n",
520 efi.systab->hdr.revision >> 16,
521 efi.systab->hdr.revision & 0xffff);
523 /* Show what we know for posterity */
524 c16 = __va(efi.systab->fw_vendor);
526 for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
531 printk(KERN_INFO "EFI v%u.%.02u by %s:",
532 efi.systab->hdr.revision >> 16,
533 efi.systab->hdr.revision & 0xffff, vendor);
535 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
537 palo_phys = EFI_INVALID_TABLE_ADDR;
539 if (efi_config_init(arch_tables) != 0)
542 if (palo_phys != EFI_INVALID_TABLE_ADDR)
543 handle_palo(palo_phys);
545 runtime = __va(efi.systab->runtime);
546 efi.get_time = phys_get_time;
547 efi.set_time = phys_set_time;
548 efi.get_wakeup_time = phys_get_wakeup_time;
549 efi.set_wakeup_time = phys_set_wakeup_time;
550 efi.get_variable = phys_get_variable;
551 efi.get_next_variable = phys_get_next_variable;
552 efi.set_variable = phys_set_variable;
553 efi.get_next_high_mono_count = phys_get_next_high_mono_count;
554 efi.reset_system = phys_reset_system;
556 efi_map_start = __va(ia64_boot_param->efi_memmap);
557 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
558 efi_desc_size = ia64_boot_param->efi_memdesc_size;
561 /* print EFI memory map: */
563 efi_memory_desc_t *md;
566 for (i = 0, p = efi_map_start; p < efi_map_end;
567 ++i, p += efi_desc_size)
573 size = md->num_pages << EFI_PAGE_SHIFT;
575 if ((size >> 40) > 0) {
578 } else if ((size >> 30) > 0) {
581 } else if ((size >> 20) > 0) {
589 printk("mem%02d: type=%2u, attr=0x%016lx, "
590 "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
591 i, md->type, md->attribute, md->phys_addr,
592 md->phys_addr + efi_md_size(md), size, unit);
598 efi_enter_virtual_mode();
602 efi_enter_virtual_mode (void)
604 void *efi_map_start, *efi_map_end, *p;
605 efi_memory_desc_t *md;
609 efi_map_start = __va(ia64_boot_param->efi_memmap);
610 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
611 efi_desc_size = ia64_boot_param->efi_memdesc_size;
613 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
615 if (md->attribute & EFI_MEMORY_RUNTIME) {
617 * Some descriptors have multiple bits set, so the
618 * order of the tests is relevant.
620 if (md->attribute & EFI_MEMORY_WB) {
621 md->virt_addr = (u64) __va(md->phys_addr);
622 } else if (md->attribute & EFI_MEMORY_UC) {
623 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
624 } else if (md->attribute & EFI_MEMORY_WC) {
626 md->virt_addr = ia64_remap(md->phys_addr,
634 printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
635 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
637 } else if (md->attribute & EFI_MEMORY_WT) {
639 md->virt_addr = ia64_remap(md->phys_addr,
647 printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
648 md->virt_addr = (u64) ioremap(md->phys_addr, 0);
654 status = efi_call_phys(__va(runtime->set_virtual_address_map),
655 ia64_boot_param->efi_memmap_size,
657 ia64_boot_param->efi_memdesc_version,
658 ia64_boot_param->efi_memmap);
659 if (status != EFI_SUCCESS) {
660 printk(KERN_WARNING "warning: unable to switch EFI into "
661 "virtual mode (status=%lu)\n", status);
665 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
668 * Now that EFI is in virtual mode, we call the EFI functions more
671 efi.get_time = virt_get_time;
672 efi.set_time = virt_set_time;
673 efi.get_wakeup_time = virt_get_wakeup_time;
674 efi.set_wakeup_time = virt_set_wakeup_time;
675 efi.get_variable = virt_get_variable;
676 efi.get_next_variable = virt_get_next_variable;
677 efi.set_variable = virt_set_variable;
678 efi.get_next_high_mono_count = virt_get_next_high_mono_count;
679 efi.reset_system = virt_reset_system;
683 * Walk the EFI memory map looking for the I/O port range. There can only be
684 * one entry of this type, other I/O port ranges should be described via ACPI.
687 efi_get_iobase (void)
689 void *efi_map_start, *efi_map_end, *p;
690 efi_memory_desc_t *md;
693 efi_map_start = __va(ia64_boot_param->efi_memmap);
694 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
695 efi_desc_size = ia64_boot_param->efi_memdesc_size;
697 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
699 if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
700 if (md->attribute & EFI_MEMORY_UC)
701 return md->phys_addr;
707 static struct kern_memdesc *
708 kern_memory_descriptor (unsigned long phys_addr)
710 struct kern_memdesc *md;
712 for (md = kern_memmap; md->start != ~0UL; md++) {
713 if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
719 static efi_memory_desc_t *
720 efi_memory_descriptor (unsigned long phys_addr)
722 void *efi_map_start, *efi_map_end, *p;
723 efi_memory_desc_t *md;
726 efi_map_start = __va(ia64_boot_param->efi_memmap);
727 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
728 efi_desc_size = ia64_boot_param->efi_memdesc_size;
730 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
733 if (phys_addr - md->phys_addr < efi_md_size(md))
740 efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
742 void *efi_map_start, *efi_map_end, *p;
743 efi_memory_desc_t *md;
747 efi_map_start = __va(ia64_boot_param->efi_memmap);
748 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
749 efi_desc_size = ia64_boot_param->efi_memdesc_size;
751 end = phys_addr + size;
753 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
755 if (md->phys_addr < end && efi_md_end(md) > phys_addr)
762 efi_mem_type (unsigned long phys_addr)
764 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
772 efi_mem_attributes (unsigned long phys_addr)
774 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
777 return md->attribute;
780 EXPORT_SYMBOL(efi_mem_attributes);
783 efi_mem_attribute (unsigned long phys_addr, unsigned long size)
785 unsigned long end = phys_addr + size;
786 efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
793 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
794 * the kernel that firmware needs this region mapped.
796 attr = md->attribute & ~EFI_MEMORY_RUNTIME;
798 unsigned long md_end = efi_md_end(md);
803 md = efi_memory_descriptor(md_end);
804 if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
807 return 0; /* never reached */
811 kern_mem_attribute (unsigned long phys_addr, unsigned long size)
813 unsigned long end = phys_addr + size;
814 struct kern_memdesc *md;
818 * This is a hack for ioremap calls before we set up kern_memmap.
819 * Maybe we should do efi_memmap_init() earlier instead.
822 attr = efi_mem_attribute(phys_addr, size);
823 if (attr & EFI_MEMORY_WB)
824 return EFI_MEMORY_WB;
828 md = kern_memory_descriptor(phys_addr);
832 attr = md->attribute;
834 unsigned long md_end = kmd_end(md);
839 md = kern_memory_descriptor(md_end);
840 if (!md || md->attribute != attr)
843 return 0; /* never reached */
845 EXPORT_SYMBOL(kern_mem_attribute);
848 valid_phys_addr_range (phys_addr_t phys_addr, unsigned long size)
853 * /dev/mem reads and writes use copy_to_user(), which implicitly
854 * uses a granule-sized kernel identity mapping. It's really
855 * only safe to do this for regions in kern_memmap. For more
856 * details, see Documentation/ia64/aliasing.txt.
858 attr = kern_mem_attribute(phys_addr, size);
859 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
865 valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
867 unsigned long phys_addr = pfn << PAGE_SHIFT;
870 attr = efi_mem_attribute(phys_addr, size);
873 * /dev/mem mmap uses normal user pages, so we don't need the entire
874 * granule, but the entire region we're mapping must support the same
877 if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
881 * Intel firmware doesn't tell us about all the MMIO regions, so
882 * in general we have to allow mmap requests. But if EFI *does*
883 * tell us about anything inside this region, we should deny it.
884 * The user can always map a smaller region to avoid the overlap.
886 if (efi_memmap_intersects(phys_addr, size))
893 phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
896 unsigned long phys_addr = pfn << PAGE_SHIFT;
900 * For /dev/mem mmap, we use user mappings, but if the region is
901 * in kern_memmap (and hence may be covered by a kernel mapping),
902 * we must use the same attribute as the kernel mapping.
904 attr = kern_mem_attribute(phys_addr, size);
905 if (attr & EFI_MEMORY_WB)
906 return pgprot_cacheable(vma_prot);
907 else if (attr & EFI_MEMORY_UC)
908 return pgprot_noncached(vma_prot);
911 * Some chipsets don't support UC access to memory. If
912 * WB is supported, we prefer that.
914 if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
915 return pgprot_cacheable(vma_prot);
917 return pgprot_noncached(vma_prot);
921 efi_uart_console_only(void)
924 char *s, name[] = "ConOut";
925 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
926 efi_char16_t *utf16, name_utf16[32];
927 unsigned char data[1024];
928 unsigned long size = sizeof(data);
929 struct efi_generic_dev_path *hdr, *end_addr;
932 /* Convert to UTF-16 */
936 *utf16++ = *s++ & 0x7f;
939 status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
940 if (status != EFI_SUCCESS) {
941 printk(KERN_ERR "No EFI %s variable?\n", name);
945 hdr = (struct efi_generic_dev_path *) data;
946 end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
947 while (hdr < end_addr) {
948 if (hdr->type == EFI_DEV_MSG &&
949 hdr->sub_type == EFI_DEV_MSG_UART)
951 else if (hdr->type == EFI_DEV_END_PATH ||
952 hdr->type == EFI_DEV_END_PATH2) {
955 if (hdr->sub_type == EFI_DEV_END_ENTIRE)
959 hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length);
961 printk(KERN_ERR "Malformed %s value\n", name);
966 * Look for the first granule aligned memory descriptor memory
967 * that is big enough to hold EFI memory map. Make sure this
968 * descriptor is atleast granule sized so it does not get trimmed
970 struct kern_memdesc *
971 find_memmap_space (void)
973 u64 contig_low=0, contig_high=0;
975 void *efi_map_start, *efi_map_end, *p, *q;
976 efi_memory_desc_t *md, *pmd = NULL, *check_md;
977 u64 space_needed, efi_desc_size;
978 unsigned long total_mem = 0;
980 efi_map_start = __va(ia64_boot_param->efi_memmap);
981 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
982 efi_desc_size = ia64_boot_param->efi_memdesc_size;
985 * Worst case: we need 3 kernel descriptors for each efi descriptor
986 * (if every entry has a WB part in the middle, and UC head and tail),
987 * plus one for the end marker.
989 space_needed = sizeof(kern_memdesc_t) *
990 (3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
992 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
997 if (pmd == NULL || !efi_wb(pmd) ||
998 efi_md_end(pmd) != md->phys_addr) {
999 contig_low = GRANULEROUNDUP(md->phys_addr);
1000 contig_high = efi_md_end(md);
1001 for (q = p + efi_desc_size; q < efi_map_end;
1002 q += efi_desc_size) {
1004 if (!efi_wb(check_md))
1006 if (contig_high != check_md->phys_addr)
1008 contig_high = efi_md_end(check_md);
1010 contig_high = GRANULEROUNDDOWN(contig_high);
1012 if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
1015 /* Round ends inward to granule boundaries */
1016 as = max(contig_low, md->phys_addr);
1017 ae = min(contig_high, efi_md_end(md));
1019 /* keep within max_addr= and min_addr= command line arg */
1020 as = max(as, min_addr);
1021 ae = min(ae, max_addr);
1025 /* avoid going over mem= command line arg */
1026 if (total_mem + (ae - as) > mem_limit)
1027 ae -= total_mem + (ae - as) - mem_limit;
1032 if (ae - as > space_needed)
1035 if (p >= efi_map_end)
1036 panic("Can't allocate space for kernel memory descriptors");
1042 * Walk the EFI memory map and gather all memory available for kernel
1043 * to use. We can allocate partial granules only if the unavailable
1044 * parts exist, and are WB.
1047 efi_memmap_init(u64 *s, u64 *e)
1049 struct kern_memdesc *k, *prev = NULL;
1050 u64 contig_low=0, contig_high=0;
1052 void *efi_map_start, *efi_map_end, *p, *q;
1053 efi_memory_desc_t *md, *pmd = NULL, *check_md;
1055 unsigned long total_mem = 0;
1057 k = kern_memmap = find_memmap_space();
1059 efi_map_start = __va(ia64_boot_param->efi_memmap);
1060 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1061 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1063 for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
1067 (md->type == EFI_CONVENTIONAL_MEMORY ||
1068 md->type == EFI_BOOT_SERVICES_DATA)) {
1069 k->attribute = EFI_MEMORY_UC;
1070 k->start = md->phys_addr;
1071 k->num_pages = md->num_pages;
1076 if (pmd == NULL || !efi_wb(pmd) ||
1077 efi_md_end(pmd) != md->phys_addr) {
1078 contig_low = GRANULEROUNDUP(md->phys_addr);
1079 contig_high = efi_md_end(md);
1080 for (q = p + efi_desc_size; q < efi_map_end;
1081 q += efi_desc_size) {
1083 if (!efi_wb(check_md))
1085 if (contig_high != check_md->phys_addr)
1087 contig_high = efi_md_end(check_md);
1089 contig_high = GRANULEROUNDDOWN(contig_high);
1091 if (!is_memory_available(md))
1095 * Round ends inward to granule boundaries
1096 * Give trimmings to uncached allocator
1098 if (md->phys_addr < contig_low) {
1099 lim = min(efi_md_end(md), contig_low);
1101 if (k > kern_memmap &&
1102 (k-1)->attribute == EFI_MEMORY_UC &&
1103 kmd_end(k-1) == md->phys_addr) {
1105 (lim - md->phys_addr)
1108 k->attribute = EFI_MEMORY_UC;
1109 k->start = md->phys_addr;
1110 k->num_pages = (lim - md->phys_addr)
1119 if (efi_md_end(md) > contig_high) {
1120 lim = max(md->phys_addr, contig_high);
1122 if (lim == md->phys_addr && k > kern_memmap &&
1123 (k-1)->attribute == EFI_MEMORY_UC &&
1124 kmd_end(k-1) == md->phys_addr) {
1125 (k-1)->num_pages += md->num_pages;
1127 k->attribute = EFI_MEMORY_UC;
1129 k->num_pages = (efi_md_end(md) - lim)
1136 ae = efi_md_end(md);
1138 /* keep within max_addr= and min_addr= command line arg */
1139 as = max(as, min_addr);
1140 ae = min(ae, max_addr);
1144 /* avoid going over mem= command line arg */
1145 if (total_mem + (ae - as) > mem_limit)
1146 ae -= total_mem + (ae - as) - mem_limit;
1150 if (prev && kmd_end(prev) == md->phys_addr) {
1151 prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
1152 total_mem += ae - as;
1155 k->attribute = EFI_MEMORY_WB;
1157 k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
1158 total_mem += ae - as;
1161 k->start = ~0L; /* end-marker */
1163 /* reserve the memory we are using for kern_memmap */
1164 *s = (u64)kern_memmap;
1171 efi_initialize_iomem_resources(struct resource *code_resource,
1172 struct resource *data_resource,
1173 struct resource *bss_resource)
1175 struct resource *res;
1176 void *efi_map_start, *efi_map_end, *p;
1177 efi_memory_desc_t *md;
1180 unsigned long flags;
1182 efi_map_start = __va(ia64_boot_param->efi_memmap);
1183 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1184 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1188 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1191 if (md->num_pages == 0) /* should not happen */
1194 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
1197 case EFI_MEMORY_MAPPED_IO:
1198 case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
1201 case EFI_LOADER_CODE:
1202 case EFI_LOADER_DATA:
1203 case EFI_BOOT_SERVICES_DATA:
1204 case EFI_BOOT_SERVICES_CODE:
1205 case EFI_CONVENTIONAL_MEMORY:
1206 if (md->attribute & EFI_MEMORY_WP) {
1207 name = "System ROM";
1208 flags |= IORESOURCE_READONLY;
1209 } else if (md->attribute == EFI_MEMORY_UC)
1210 name = "Uncached RAM";
1212 name = "System RAM";
1215 case EFI_ACPI_MEMORY_NVS:
1216 name = "ACPI Non-volatile Storage";
1219 case EFI_UNUSABLE_MEMORY:
1221 flags |= IORESOURCE_DISABLED;
1224 case EFI_RESERVED_TYPE:
1225 case EFI_RUNTIME_SERVICES_CODE:
1226 case EFI_RUNTIME_SERVICES_DATA:
1227 case EFI_ACPI_RECLAIM_MEMORY:
1233 if ((res = kzalloc(sizeof(struct resource),
1234 GFP_KERNEL)) == NULL) {
1236 "failed to allocate resource for iomem\n");
1241 res->start = md->phys_addr;
1242 res->end = md->phys_addr + efi_md_size(md) - 1;
1245 if (insert_resource(&iomem_resource, res) < 0)
1249 * We don't know which region contains
1250 * kernel data so we try it repeatedly and
1251 * let the resource manager test it.
1253 insert_resource(res, code_resource);
1254 insert_resource(res, data_resource);
1255 insert_resource(res, bss_resource);
1257 insert_resource(res, &efi_memmap_res);
1258 insert_resource(res, &boot_param_res);
1259 if (crashk_res.end > crashk_res.start)
1260 insert_resource(res, &crashk_res);
1267 /* find a block of memory aligned to 64M exclude reserved regions
1268 rsvd_regions are sorted
1270 unsigned long __init
1271 kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n)
1275 u64 alignment = 1UL << _PAGE_SIZE_64M;
1276 void *efi_map_start, *efi_map_end, *p;
1277 efi_memory_desc_t *md;
1280 efi_map_start = __va(ia64_boot_param->efi_memmap);
1281 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1282 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1284 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1288 start = ALIGN(md->phys_addr, alignment);
1289 end = efi_md_end(md);
1290 for (i = 0; i < n; i++) {
1291 if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1292 if (__pa(r[i].start) > start + size)
1294 start = ALIGN(__pa(r[i].end), alignment);
1296 __pa(r[i+1].start) < start + size)
1302 if (end > start + size)
1307 "Cannot reserve 0x%lx byte of memory for crashdump\n", size);
1312 #ifdef CONFIG_CRASH_DUMP
1313 /* locate the size find a the descriptor at a certain address */
1314 unsigned long __init
1315 vmcore_find_descriptor_size (unsigned long address)
1317 void *efi_map_start, *efi_map_end, *p;
1318 efi_memory_desc_t *md;
1320 unsigned long ret = 0;
1322 efi_map_start = __va(ia64_boot_param->efi_memmap);
1323 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1324 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1326 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1328 if (efi_wb(md) && md->type == EFI_LOADER_DATA
1329 && md->phys_addr == address) {
1330 ret = efi_md_size(md);
1336 printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");