1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/ioremap.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mmiotrace.h>
18 #include <linux/cc_platform.h>
19 #include <linux/efi.h>
20 #include <linux/pgtable.h>
21 #include <linux/kmsan.h>
23 #include <asm/set_memory.h>
24 #include <asm/e820/api.h>
26 #include <asm/fixmap.h>
27 #include <asm/tlbflush.h>
28 #include <asm/pgalloc.h>
29 #include <asm/memtype.h>
30 #include <asm/setup.h>
35 * Descriptor controlling ioremap() behavior.
42 * Fix up the linear direct mapping of the kernel to avoid cache attribute
45 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
46 enum page_cache_mode pcm)
48 unsigned long nrpages = size >> PAGE_SHIFT;
52 case _PAGE_CACHE_MODE_UC:
54 err = _set_memory_uc(vaddr, nrpages);
56 case _PAGE_CACHE_MODE_WC:
57 err = _set_memory_wc(vaddr, nrpages);
59 case _PAGE_CACHE_MODE_WT:
60 err = _set_memory_wt(vaddr, nrpages);
62 case _PAGE_CACHE_MODE_WB:
63 err = _set_memory_wb(vaddr, nrpages);
70 /* Does the range (or a subset of) contain normal RAM? */
71 static unsigned int __ioremap_check_ram(struct resource *res)
73 unsigned long start_pfn, stop_pfn;
76 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
79 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
80 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
81 if (stop_pfn > start_pfn) {
82 for (i = 0; i < (stop_pfn - start_pfn); ++i)
83 if (pfn_valid(start_pfn + i) &&
84 !PageReserved(pfn_to_page(start_pfn + i)))
85 return IORES_MAP_SYSTEM_RAM;
92 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
93 * there the whole memory is already encrypted.
95 static unsigned int __ioremap_check_encrypted(struct resource *res)
97 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
101 case IORES_DESC_NONE:
102 case IORES_DESC_RESERVED:
105 return IORES_MAP_ENCRYPTED;
112 * The EFI runtime services data area is not covered by walk_mem_res(), but must
113 * be mapped encrypted when SEV is active.
115 static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
117 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
120 if (x86_platform.hyper.is_private_mmio(addr)) {
121 desc->flags |= IORES_MAP_ENCRYPTED;
125 if (!IS_ENABLED(CONFIG_EFI))
128 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
129 (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
130 efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
131 desc->flags |= IORES_MAP_ENCRYPTED;
134 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
136 struct ioremap_desc *desc = arg;
138 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
139 desc->flags |= __ioremap_check_ram(res);
141 if (!(desc->flags & IORES_MAP_ENCRYPTED))
142 desc->flags |= __ioremap_check_encrypted(res);
144 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
145 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
149 * To avoid multiple resource walks, this function walks resources marked as
150 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
151 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
153 * After that, deal with misc other ranges in __ioremap_check_other() which do
154 * not fall into the above category.
156 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
157 struct ioremap_desc *desc)
162 end = start + size - 1;
163 memset(desc, 0, sizeof(struct ioremap_desc));
165 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
167 __ioremap_check_other(addr, desc);
171 * Remap an arbitrary physical address space into the kernel virtual
172 * address space. It transparently creates kernel huge I/O mapping when
173 * the physical address is aligned by a huge page size (1GB or 2MB) and
174 * the requested size is at least the huge page size.
176 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
177 * Therefore, the mapping code falls back to use a smaller page toward 4KB
178 * when a mapping range is covered by non-WB type of MTRRs.
180 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
181 * have to convert them into an offset in a page-aligned mapping, but the
182 * caller shouldn't need to know that small detail.
184 static void __iomem *
185 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
186 enum page_cache_mode pcm, void *caller, bool encrypted)
188 unsigned long offset, vaddr;
189 resource_size_t last_addr;
190 const resource_size_t unaligned_phys_addr = phys_addr;
191 const unsigned long unaligned_size = size;
192 struct ioremap_desc io_desc;
193 struct vm_struct *area;
194 enum page_cache_mode new_pcm;
197 void __iomem *ret_addr;
199 /* Don't allow wraparound or zero size */
200 last_addr = phys_addr + size - 1;
201 if (!size || last_addr < phys_addr)
204 if (!phys_addr_valid(phys_addr)) {
205 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
206 (unsigned long long)phys_addr);
211 __ioremap_check_mem(phys_addr, size, &io_desc);
214 * Don't allow anybody to remap normal RAM that we're using..
216 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
217 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
218 &phys_addr, &last_addr);
223 * Mappings have to be page-aligned
225 offset = phys_addr & ~PAGE_MASK;
226 phys_addr &= PAGE_MASK;
227 size = PAGE_ALIGN(last_addr+1) - phys_addr;
230 * Mask out any bits not part of the actual physical
231 * address, like memory encryption bits.
233 phys_addr &= PHYSICAL_PAGE_MASK;
235 retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
238 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
242 if (pcm != new_pcm) {
243 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
245 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
246 (unsigned long long)phys_addr,
247 (unsigned long long)(phys_addr + size),
249 goto err_free_memtype;
255 * If the page being mapped is in memory and SEV is active then
256 * make sure the memory encryption attribute is enabled in the
258 * In TDX guests, memory is marked private by default. If encryption
259 * is not requested (using encrypted), explicitly set decrypt
260 * attribute in all IOREMAPPED memory.
262 prot = PAGE_KERNEL_IO;
263 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
264 prot = pgprot_encrypted(prot);
266 prot = pgprot_decrypted(prot);
269 case _PAGE_CACHE_MODE_UC:
271 prot = __pgprot(pgprot_val(prot) |
272 cachemode2protval(_PAGE_CACHE_MODE_UC));
274 case _PAGE_CACHE_MODE_UC_MINUS:
275 prot = __pgprot(pgprot_val(prot) |
276 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
278 case _PAGE_CACHE_MODE_WC:
279 prot = __pgprot(pgprot_val(prot) |
280 cachemode2protval(_PAGE_CACHE_MODE_WC));
282 case _PAGE_CACHE_MODE_WT:
283 prot = __pgprot(pgprot_val(prot) |
284 cachemode2protval(_PAGE_CACHE_MODE_WT));
286 case _PAGE_CACHE_MODE_WB:
293 area = get_vm_area_caller(size, VM_IOREMAP, caller);
295 goto err_free_memtype;
296 area->phys_addr = phys_addr;
297 vaddr = (unsigned long) area->addr;
299 if (memtype_kernel_map_sync(phys_addr, size, pcm))
302 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
305 ret_addr = (void __iomem *) (vaddr + offset);
306 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
309 * Check if the request spans more than any BAR in the iomem resource
312 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
313 pr_warn("caller %pS mapping multiple BARs\n", caller);
319 memtype_free(phys_addr, phys_addr + size);
324 * ioremap - map bus memory into CPU space
325 * @phys_addr: bus address of the memory
326 * @size: size of the resource to map
328 * ioremap performs a platform specific sequence of operations to
329 * make bus memory CPU accessible via the readb/readw/readl/writeb/
330 * writew/writel functions and the other mmio helpers. The returned
331 * address is not guaranteed to be usable directly as a virtual
334 * This version of ioremap ensures that the memory is marked uncachable
335 * on the CPU as well as honouring existing caching rules from things like
336 * the PCI bus. Note that there are other caches and buffers on many
337 * busses. In particular driver authors should read up on PCI writes
339 * It's useful if some control registers are in such an area and
340 * write combining or read caching is not desirable:
342 * Must be freed with iounmap.
344 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
347 * Ideally, this should be:
348 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
350 * Till we fix all X drivers to use ioremap_wc(), we will use
351 * UC MINUS. Drivers that are certain they need or can already
352 * be converted over to strong UC can use ioremap_uc().
354 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
356 return __ioremap_caller(phys_addr, size, pcm,
357 __builtin_return_address(0), false);
359 EXPORT_SYMBOL(ioremap);
362 * ioremap_uc - map bus memory into CPU space as strongly uncachable
363 * @phys_addr: bus address of the memory
364 * @size: size of the resource to map
366 * ioremap_uc performs a platform specific sequence of operations to
367 * make bus memory CPU accessible via the readb/readw/readl/writeb/
368 * writew/writel functions and the other mmio helpers. The returned
369 * address is not guaranteed to be usable directly as a virtual
372 * This version of ioremap ensures that the memory is marked with a strong
373 * preference as completely uncachable on the CPU when possible. For non-PAT
374 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
375 * systems this will set the PAT entry for the pages as strong UC. This call
376 * will honor existing caching rules from things like the PCI bus. Note that
377 * there are other caches and buffers on many busses. In particular driver
378 * authors should read up on PCI writes.
380 * It's useful if some control registers are in such an area and
381 * write combining or read caching is not desirable:
383 * Must be freed with iounmap.
385 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
387 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
389 return __ioremap_caller(phys_addr, size, pcm,
390 __builtin_return_address(0), false);
392 EXPORT_SYMBOL_GPL(ioremap_uc);
395 * ioremap_wc - map memory into CPU space write combined
396 * @phys_addr: bus address of the memory
397 * @size: size of the resource to map
399 * This version of ioremap ensures that the memory is marked write combining.
400 * Write combining allows faster writes to some hardware devices.
402 * Must be freed with iounmap.
404 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
406 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
407 __builtin_return_address(0), false);
409 EXPORT_SYMBOL(ioremap_wc);
412 * ioremap_wt - map memory into CPU space write through
413 * @phys_addr: bus address of the memory
414 * @size: size of the resource to map
416 * This version of ioremap ensures that the memory is marked write through.
417 * Write through stores data into memory while keeping the cache up-to-date.
419 * Must be freed with iounmap.
421 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
423 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
424 __builtin_return_address(0), false);
426 EXPORT_SYMBOL(ioremap_wt);
428 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
430 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
431 __builtin_return_address(0), true);
433 EXPORT_SYMBOL(ioremap_encrypted);
435 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
437 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
438 __builtin_return_address(0), false);
440 EXPORT_SYMBOL(ioremap_cache);
442 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
443 unsigned long prot_val)
445 return __ioremap_caller(phys_addr, size,
446 pgprot2cachemode(__pgprot(prot_val)),
447 __builtin_return_address(0), false);
449 EXPORT_SYMBOL(ioremap_prot);
452 * iounmap - Free a IO remapping
453 * @addr: virtual address from ioremap_*
455 * Caller must ensure there is only one unmapping for the same pointer.
457 void iounmap(volatile void __iomem *addr)
459 struct vm_struct *p, *o;
461 if (WARN_ON_ONCE(!is_ioremap_addr((void __force *)addr)))
465 * The PCI/ISA range special-casing was removed from __ioremap()
466 * so this check, in theory, can be removed. However, there are
467 * cases where iounmap() is called for addresses not obtained via
468 * ioremap() (vga16fb for example). Add a warning so that these
469 * cases can be caught and fixed.
471 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
472 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
473 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
477 mmiotrace_iounmap(addr);
479 addr = (volatile void __iomem *)
480 (PAGE_MASK & (unsigned long __force)addr);
482 /* Use the vm area unlocked, assuming the caller
483 ensures there isn't another iounmap for the same address
484 in parallel. Reuse of the virtual address is prevented by
485 leaving it in the global lists until we're done with it.
486 cpa takes care of the direct mappings. */
487 p = find_vm_area((void __force *)addr);
490 printk(KERN_ERR "iounmap: bad address %p\n", addr);
495 kmsan_iounmap_page_range((unsigned long)addr,
496 (unsigned long)addr + get_vm_area_size(p));
497 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
499 /* Finally remove it */
500 o = remove_vm_area((void __force *)addr);
501 BUG_ON(p != o || o == NULL);
504 EXPORT_SYMBOL(iounmap);
507 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
510 void *xlate_dev_mem_ptr(phys_addr_t phys)
512 unsigned long start = phys & PAGE_MASK;
513 unsigned long offset = phys & ~PAGE_MASK;
516 /* memremap() maps if RAM, otherwise falls back to ioremap() */
517 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
519 /* Only add the offset on success and return NULL if memremap() failed */
526 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
528 memunmap((void *)((unsigned long)addr & PAGE_MASK));
531 #ifdef CONFIG_AMD_MEM_ENCRYPT
533 * Examine the physical address to determine if it is an area of memory
534 * that should be mapped decrypted. If the memory is not part of the
535 * kernel usable area it was accessed and created decrypted, so these
536 * areas should be mapped decrypted. And since the encryption key can
537 * change across reboots, persistent memory should also be mapped
540 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
541 * only persistent memory should be mapped decrypted.
543 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
549 * Check if the address is part of a persistent memory region.
550 * This check covers areas added by E820, EFI and ACPI.
552 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
553 IORES_DESC_PERSISTENT_MEMORY);
554 if (is_pmem != REGION_DISJOINT)
558 * Check if the non-volatile attribute is set for an EFI
561 if (efi_enabled(EFI_BOOT)) {
562 switch (efi_mem_type(phys_addr)) {
563 case EFI_RESERVED_TYPE:
564 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
572 /* Check if the address is outside kernel usable area */
573 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
574 case E820_TYPE_RESERVED:
577 case E820_TYPE_UNUSABLE:
578 /* For SEV, these areas are encrypted */
579 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
593 * Examine the physical address to determine if it is EFI data. Check
594 * it against the boot params structure and EFI tables and memory types.
596 static bool memremap_is_efi_data(resource_size_t phys_addr)
600 /* Check if the address is part of EFI boot/runtime data */
601 if (!efi_enabled(EFI_BOOT))
604 paddr = boot_params.efi_info.efi_memmap_hi;
606 paddr |= boot_params.efi_info.efi_memmap;
607 if (phys_addr == paddr)
610 paddr = boot_params.efi_info.efi_systab_hi;
612 paddr |= boot_params.efi_info.efi_systab;
613 if (phys_addr == paddr)
616 if (efi_is_table_address(phys_addr))
619 switch (efi_mem_type(phys_addr)) {
620 case EFI_BOOT_SERVICES_DATA:
621 case EFI_RUNTIME_SERVICES_DATA:
631 * Examine the physical address to determine if it is boot data by checking
632 * it against the boot params setup_data chain.
634 static bool __ref __memremap_is_setup_data(resource_size_t phys_addr, bool early)
636 unsigned int setup_data_sz = sizeof(struct setup_data);
637 struct setup_indirect *indirect;
638 struct setup_data *data;
639 u64 paddr, paddr_next;
641 paddr = boot_params.hdr.setup_data;
643 unsigned int len, size;
645 if (phys_addr == paddr)
649 data = early_memremap_decrypted(paddr, setup_data_sz);
651 data = memremap(paddr, setup_data_sz, MEMREMAP_WB | MEMREMAP_DEC);
653 pr_warn("failed to remap setup_data entry\n");
657 size = setup_data_sz;
659 paddr_next = data->next;
662 if ((phys_addr > paddr) &&
663 (phys_addr < (paddr + setup_data_sz + len))) {
665 early_memunmap(data, setup_data_sz);
671 if (data->type == SETUP_INDIRECT) {
674 early_memunmap(data, setup_data_sz);
675 data = early_memremap_decrypted(paddr, size);
678 data = memremap(paddr, size, MEMREMAP_WB | MEMREMAP_DEC);
681 pr_warn("failed to remap indirect setup_data\n");
685 indirect = (struct setup_indirect *)data->data;
687 if (indirect->type != SETUP_INDIRECT) {
688 paddr = indirect->addr;
694 early_memunmap(data, size);
698 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
707 static bool memremap_is_setup_data(resource_size_t phys_addr)
709 return __memremap_is_setup_data(phys_addr, false);
712 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr)
714 return __memremap_is_setup_data(phys_addr, true);
718 * Architecture function to determine if RAM remap is allowed. By default, a
719 * RAM remap will map the data as encrypted. Determine if a RAM remap should
720 * not be done so that the data will be mapped decrypted.
722 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
725 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
728 if (flags & MEMREMAP_ENC)
731 if (flags & MEMREMAP_DEC)
734 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
735 if (memremap_is_setup_data(phys_addr) ||
736 memremap_is_efi_data(phys_addr))
740 return !memremap_should_map_decrypted(phys_addr, size);
744 * Architecture override of __weak function to adjust the protection attributes
745 * used when remapping memory. By default, early_memremap() will map the data
746 * as encrypted. Determine if an encrypted mapping should not be done and set
747 * the appropriate protection attributes.
749 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
755 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
758 encrypted_prot = true;
760 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
761 if (early_memremap_is_setup_data(phys_addr) ||
762 memremap_is_efi_data(phys_addr))
763 encrypted_prot = false;
766 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
767 encrypted_prot = false;
769 return encrypted_prot ? pgprot_encrypted(prot)
770 : pgprot_decrypted(prot);
773 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
775 return arch_memremap_can_ram_remap(phys_addr, size, 0);
778 /* Remap memory with encryption */
779 void __init *early_memremap_encrypted(resource_size_t phys_addr,
782 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
786 * Remap memory with encryption and write-protected - cannot be called
787 * before pat_init() is called
789 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
792 if (!x86_has_pat_wp())
794 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
797 /* Remap memory without encryption */
798 void __init *early_memremap_decrypted(resource_size_t phys_addr,
801 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
805 * Remap memory without encryption and write-protected - cannot be called
806 * before pat_init() is called
808 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
811 if (!x86_has_pat_wp())
813 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
815 #endif /* CONFIG_AMD_MEM_ENCRYPT */
817 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
819 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
821 /* Don't assume we're using swapper_pg_dir at this point */
822 pgd_t *base = __va(read_cr3_pa());
823 pgd_t *pgd = &base[pgd_index(addr)];
824 p4d_t *p4d = p4d_offset(pgd, addr);
825 pud_t *pud = pud_offset(p4d, addr);
826 pmd_t *pmd = pmd_offset(pud, addr);
831 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
833 return &bm_pte[pte_index(addr)];
836 bool __init is_early_ioremap_ptep(pte_t *ptep)
838 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
841 void __init early_ioremap_init(void)
846 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
848 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
851 early_ioremap_setup();
853 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
854 memset(bm_pte, 0, sizeof(bm_pte));
855 pmd_populate_kernel(&init_mm, pmd, bm_pte);
858 * The boot-ioremap range spans multiple pmds, for which
859 * we are not prepared:
861 #define __FIXADDR_TOP (-PAGE_SIZE)
862 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
863 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
865 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
867 printk(KERN_WARNING "pmd %p != %p\n",
868 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
869 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
870 fix_to_virt(FIX_BTMAP_BEGIN));
871 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
872 fix_to_virt(FIX_BTMAP_END));
874 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
875 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
880 void __init __early_set_fixmap(enum fixed_addresses idx,
881 phys_addr_t phys, pgprot_t flags)
883 unsigned long addr = __fix_to_virt(idx);
886 if (idx >= __end_of_fixed_addresses) {
890 pte = early_ioremap_pte(addr);
892 /* Sanitize 'prot' against any unsupported bits: */
893 pgprot_val(flags) &= __supported_pte_mask;
895 if (pgprot_val(flags))
896 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
898 pte_clear(&init_mm, addr, pte);
899 flush_tlb_one_kernel(addr);