1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/init.c
5 * Copyright (C) 1995-2005 Russell King
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/swap.h>
10 #include <linux/init.h>
11 #include <linux/mman.h>
12 #include <linux/sched/signal.h>
13 #include <linux/sched/task.h>
14 #include <linux/export.h>
15 #include <linux/nodemask.h>
16 #include <linux/initrd.h>
17 #include <linux/of_fdt.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
21 #include <linux/dma-contiguous.h>
22 #include <linux/sizes.h>
23 #include <linux/stop_machine.h>
24 #include <linux/swiotlb.h>
27 #include <asm/mach-types.h>
28 #include <asm/memblock.h>
29 #include <asm/memory.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/system_info.h>
35 #include <asm/fixmap.h>
36 #include <asm/ptdump.h>
38 #include <asm/mach/arch.h>
39 #include <asm/mach/map.h>
43 #ifdef CONFIG_CPU_CP15_MMU
44 unsigned long __init __clear_cr(unsigned long mask)
46 cr_alignment = cr_alignment & ~mask;
51 #ifdef CONFIG_BLK_DEV_INITRD
52 static int __init parse_tag_initrd(const struct tag *tag)
54 pr_warn("ATAG_INITRD is deprecated; "
55 "please update your bootloader.\n");
56 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
57 phys_initrd_size = tag->u.initrd.size;
61 __tagtable(ATAG_INITRD, parse_tag_initrd);
63 static int __init parse_tag_initrd2(const struct tag *tag)
65 phys_initrd_start = tag->u.initrd.start;
66 phys_initrd_size = tag->u.initrd.size;
70 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
73 static void __init find_limits(unsigned long *min, unsigned long *max_low,
74 unsigned long *max_high)
76 *max_low = PFN_DOWN(memblock_get_current_limit());
77 *min = PFN_UP(memblock_start_of_DRAM());
78 *max_high = PFN_DOWN(memblock_end_of_DRAM());
81 #ifdef CONFIG_ZONE_DMA
83 phys_addr_t arm_dma_zone_size __read_mostly;
84 EXPORT_SYMBOL(arm_dma_zone_size);
87 * The DMA mask corresponding to the maximum bus address allocatable
88 * using GFP_DMA. The default here places no restriction on DMA
89 * allocations. This must be the smallest DMA mask in the system,
90 * so a successful GFP_DMA allocation will always satisfy this.
92 phys_addr_t arm_dma_limit;
93 unsigned long arm_dma_pfn_limit;
95 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
96 unsigned long dma_size)
98 if (size[0] <= dma_size)
101 size[ZONE_NORMAL] = size[0] - dma_size;
102 size[ZONE_DMA] = dma_size;
103 hole[ZONE_NORMAL] = hole[0];
108 void __init setup_dma_zone(const struct machine_desc *mdesc)
110 #ifdef CONFIG_ZONE_DMA
111 if (mdesc->dma_zone_size) {
112 arm_dma_zone_size = mdesc->dma_zone_size;
113 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
115 arm_dma_limit = 0xffffffff;
116 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
120 static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
121 unsigned long max_high)
123 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
124 struct memblock_region *reg;
127 * initialise the zones.
129 memset(zone_size, 0, sizeof(zone_size));
132 * The memory size has already been determined. If we need
133 * to do anything fancy with the allocation of this memory
134 * to the zones, now is the time to do it.
136 zone_size[0] = max_low - min;
137 #ifdef CONFIG_HIGHMEM
138 zone_size[ZONE_HIGHMEM] = max_high - max_low;
142 * Calculate the size of the holes.
143 * holes = node_size - sum(bank_sizes)
145 memcpy(zhole_size, zone_size, sizeof(zhole_size));
146 for_each_memblock(memory, reg) {
147 unsigned long start = memblock_region_memory_base_pfn(reg);
148 unsigned long end = memblock_region_memory_end_pfn(reg);
150 if (start < max_low) {
151 unsigned long low_end = min(end, max_low);
152 zhole_size[0] -= low_end - start;
154 #ifdef CONFIG_HIGHMEM
156 unsigned long high_start = max(start, max_low);
157 zhole_size[ZONE_HIGHMEM] -= end - high_start;
162 #ifdef CONFIG_ZONE_DMA
164 * Adjust the sizes according to any special requirements for
167 if (arm_dma_zone_size)
168 arm_adjust_dma_zone(zone_size, zhole_size,
169 arm_dma_zone_size >> PAGE_SHIFT);
172 free_area_init_node(0, zone_size, min, zhole_size);
175 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
176 int pfn_valid(unsigned long pfn)
178 phys_addr_t addr = __pfn_to_phys(pfn);
180 if (__phys_to_pfn(addr) != pfn)
183 return memblock_is_map_memory(__pfn_to_phys(pfn));
185 EXPORT_SYMBOL(pfn_valid);
188 static bool arm_memblock_steal_permitted = true;
190 phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
194 BUG_ON(!arm_memblock_steal_permitted);
196 phys = memblock_phys_alloc(size, align);
198 panic("Failed to steal %pa bytes at %pS\n",
199 &size, (void *)_RET_IP_);
201 memblock_free(phys, size);
202 memblock_remove(phys, size);
207 static void __init arm_initrd_init(void)
209 #ifdef CONFIG_BLK_DEV_INITRD
213 initrd_start = initrd_end = 0;
215 if (!phys_initrd_size)
219 * Round the memory region to page boundaries as per free_initrd_mem()
220 * This allows us to detect whether the pages overlapping the initrd
221 * are in use, but more importantly, reserves the entire set of pages
222 * as we don't want these pages allocated for other purposes.
224 start = round_down(phys_initrd_start, PAGE_SIZE);
225 size = phys_initrd_size + (phys_initrd_start - start);
226 size = round_up(size, PAGE_SIZE);
228 if (!memblock_is_region_memory(start, size)) {
229 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
234 if (memblock_is_region_reserved(start, size)) {
235 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
240 memblock_reserve(start, size);
242 /* Now convert initrd to virtual addresses */
243 initrd_start = __phys_to_virt(phys_initrd_start);
244 initrd_end = initrd_start + phys_initrd_size;
248 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
249 void check_cpu_icache_size(int cpuid)
253 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
255 size = 1 << ((ctr & 0xf) + 2);
256 if (cpuid != 0 && icache_size != size)
257 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
259 if (icache_size > size)
264 void __init arm_memblock_init(const struct machine_desc *mdesc)
266 /* Register the kernel text, kernel data and initrd with memblock. */
267 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
271 arm_mm_memblock_reserve();
273 /* reserve any platform specific memblock areas */
277 early_init_fdt_reserve_self();
278 early_init_fdt_scan_reserved_mem();
280 /* reserve memory for DMA contiguous allocations */
281 dma_contiguous_reserve(arm_dma_limit);
283 arm_memblock_steal_permitted = false;
287 void __init bootmem_init(void)
289 memblock_allow_resize();
291 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
293 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
294 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
297 * Sparsemem tries to allocate bootmem in memory_present(),
298 * so must be done after the fixed reservations
303 * sparse_init() needs the bootmem allocator up and running.
308 * Now free the memory - free_area_init_node needs
309 * the sparse mem_map arrays initialized by sparse_init()
310 * for memmap_init_zone(), otherwise all PFNs are invalid.
312 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
316 * Poison init memory with an undefined instruction (ARM) or a branch to an
317 * undefined instruction (Thumb).
319 static inline void poison_init_mem(void *s, size_t count)
322 for (; count != 0; count -= 4)
327 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
329 struct page *start_pg, *end_pg;
330 phys_addr_t pg, pgend;
333 * Convert start_pfn/end_pfn to a struct page pointer.
335 start_pg = pfn_to_page(start_pfn - 1) + 1;
336 end_pg = pfn_to_page(end_pfn - 1) + 1;
339 * Convert to physical addresses, and
340 * round start upwards and end downwards.
342 pg = PAGE_ALIGN(__pa(start_pg));
343 pgend = __pa(end_pg) & PAGE_MASK;
346 * If there are free pages between these,
347 * free the section of the memmap array.
350 memblock_free_early(pg, pgend - pg);
354 * The mem_map array can get very big. Free the unused area of the memory map.
356 static void __init free_unused_memmap(void)
358 unsigned long start, prev_end = 0;
359 struct memblock_region *reg;
362 * This relies on each bank being in address order.
363 * The banks are sorted previously in bootmem_init().
365 for_each_memblock(memory, reg) {
366 start = memblock_region_memory_base_pfn(reg);
368 #ifdef CONFIG_SPARSEMEM
370 * Take care not to free memmap entries that don't exist
371 * due to SPARSEMEM sections which aren't present.
374 ALIGN(prev_end, PAGES_PER_SECTION));
377 * Align down here since the VM subsystem insists that the
378 * memmap entries are valid from the bank start aligned to
379 * MAX_ORDER_NR_PAGES.
381 start = round_down(start, MAX_ORDER_NR_PAGES);
384 * If we had a previous bank, and there is a space
385 * between the current bank and the previous, free it.
387 if (prev_end && prev_end < start)
388 free_memmap(prev_end, start);
391 * Align up here since the VM subsystem insists that the
392 * memmap entries are valid from the bank end aligned to
393 * MAX_ORDER_NR_PAGES.
395 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
399 #ifdef CONFIG_SPARSEMEM
400 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
401 free_memmap(prev_end,
402 ALIGN(prev_end, PAGES_PER_SECTION));
406 #ifdef CONFIG_HIGHMEM
407 static inline void free_area_high(unsigned long pfn, unsigned long end)
409 for (; pfn < end; pfn++)
410 free_highmem_page(pfn_to_page(pfn));
414 static void __init free_highpages(void)
416 #ifdef CONFIG_HIGHMEM
417 unsigned long max_low = max_low_pfn;
418 struct memblock_region *mem, *res;
420 /* set highmem page free */
421 for_each_memblock(memory, mem) {
422 unsigned long start = memblock_region_memory_base_pfn(mem);
423 unsigned long end = memblock_region_memory_end_pfn(mem);
425 /* Ignore complete lowmem entries */
429 if (memblock_is_nomap(mem))
432 /* Truncate partial highmem entries */
436 /* Find and exclude any reserved regions */
437 for_each_memblock(reserved, res) {
438 unsigned long res_start, res_end;
440 res_start = memblock_region_reserved_base_pfn(res);
441 res_end = memblock_region_reserved_end_pfn(res);
445 if (res_start < start)
451 if (res_start != start)
452 free_area_high(start, res_start);
458 /* And now free anything which remains */
460 free_area_high(start, end);
466 * mem_init() marks the free areas in the mem_map and tells us how much
467 * memory is free. This is done after various parts of the system have
468 * claimed their memory after the kernel image.
470 void __init mem_init(void)
472 #ifdef CONFIG_ARM_LPAE
476 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
478 /* this will put all unused low memory onto the freelists */
479 free_unused_memmap();
483 /* now that our DMA memory is actually so designated, we can free it */
484 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
489 mem_init_print_info(NULL);
492 * Check boundaries twice: Some fundamental inconsistencies can
493 * be detected at build time already.
496 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
497 BUG_ON(TASK_SIZE > MODULES_VADDR);
500 #ifdef CONFIG_HIGHMEM
501 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
502 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
506 #ifdef CONFIG_STRICT_KERNEL_RWX
507 struct section_perm {
516 /* First section-aligned location at or after __start_rodata. */
517 extern char __start_rodata_section_aligned[];
519 static struct section_perm nx_perms[] = {
520 /* Make pages tables, etc before _stext RW (set NX). */
522 .name = "pre-text NX",
523 .start = PAGE_OFFSET,
524 .end = (unsigned long)_stext,
525 .mask = ~PMD_SECT_XN,
528 /* Make init RW (set NX). */
531 .start = (unsigned long)__init_begin,
532 .end = (unsigned long)_sdata,
533 .mask = ~PMD_SECT_XN,
536 /* Make rodata NX (set RO in ro_perms below). */
539 .start = (unsigned long)__start_rodata_section_aligned,
540 .end = (unsigned long)__init_begin,
541 .mask = ~PMD_SECT_XN,
546 static struct section_perm ro_perms[] = {
547 /* Make kernel code and rodata RX (set RO). */
549 .name = "text/rodata RO",
550 .start = (unsigned long)_stext,
551 .end = (unsigned long)__init_begin,
552 #ifdef CONFIG_ARM_LPAE
553 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
554 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
556 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
557 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
558 .clear = PMD_SECT_AP_WRITE,
564 * Updates section permissions only for the current mm (sections are
565 * copied into each mm). During startup, this is the init_mm. Is only
566 * safe to be called with preemption disabled, as under stop_machine().
568 static inline void section_update(unsigned long addr, pmdval_t mask,
569 pmdval_t prot, struct mm_struct *mm)
573 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
575 #ifdef CONFIG_ARM_LPAE
576 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
578 if (addr & SECTION_SIZE)
579 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
581 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
583 flush_pmd_entry(pmd);
584 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
587 /* Make sure extended page tables are in use. */
588 static inline bool arch_has_strict_perms(void)
590 if (cpu_architecture() < CPU_ARCH_ARMv6)
593 return !!(get_cr() & CR_XP);
596 void set_section_perms(struct section_perm *perms, int n, bool set,
597 struct mm_struct *mm)
602 if (!arch_has_strict_perms())
605 for (i = 0; i < n; i++) {
606 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
607 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
608 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
609 perms[i].name, perms[i].start, perms[i].end,
614 for (addr = perms[i].start;
616 addr += SECTION_SIZE)
617 section_update(addr, perms[i].mask,
618 set ? perms[i].prot : perms[i].clear, mm);
624 * update_sections_early intended to be called only through stop_machine
625 * framework and executed by only one CPU while all other CPUs will spin and
626 * wait, so no locking is required in this function.
628 static void update_sections_early(struct section_perm perms[], int n)
630 struct task_struct *t, *s;
632 for_each_process(t) {
633 if (t->flags & PF_KTHREAD)
635 for_each_thread(t, s)
637 set_section_perms(perms, n, true, s->mm);
639 set_section_perms(perms, n, true, current->active_mm);
640 set_section_perms(perms, n, true, &init_mm);
643 static int __fix_kernmem_perms(void *unused)
645 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
649 static void fix_kernmem_perms(void)
651 stop_machine(__fix_kernmem_perms, NULL, NULL);
654 static int __mark_rodata_ro(void *unused)
656 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
660 static int kernel_set_to_readonly __read_mostly;
662 void mark_rodata_ro(void)
664 kernel_set_to_readonly = 1;
665 stop_machine(__mark_rodata_ro, NULL, NULL);
669 void set_kernel_text_rw(void)
671 if (!kernel_set_to_readonly)
674 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
678 void set_kernel_text_ro(void)
680 if (!kernel_set_to_readonly)
683 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
688 static inline void fix_kernmem_perms(void) { }
689 #endif /* CONFIG_STRICT_KERNEL_RWX */
691 void free_initmem(void)
695 poison_init_mem(__init_begin, __init_end - __init_begin);
696 if (!machine_is_integrator() && !machine_is_cintegrator())
697 free_initmem_default(-1);
700 #ifdef CONFIG_BLK_DEV_INITRD
701 void free_initrd_mem(unsigned long start, unsigned long end)
703 if (start == initrd_start)
704 start = round_down(start, PAGE_SIZE);
705 if (end == initrd_end)
706 end = round_up(end, PAGE_SIZE);
708 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
709 free_reserved_area((void *)start, (void *)end, -1, "initrd");