1 // SPDX-License-Identifier: GPL-2.0-or-later
8 * Copyright (C) 1996 Paul Mackerras
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/gfp.h>
21 #include <linux/types.h>
23 #include <linux/stddef.h>
24 #include <linux/init.h>
25 #include <linux/memblock.h>
26 #include <linux/highmem.h>
27 #include <linux/initrd.h>
28 #include <linux/pagemap.h>
29 #include <linux/suspend.h>
30 #include <linux/hugetlb.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/memremap.h>
35 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
45 #include <asm/sections.h>
46 #include <asm/sparsemem.h>
48 #include <asm/fixmap.h>
49 #include <asm/swiotlb.h>
52 #include <mm/mmu_decl.h>
54 #ifndef CPU_FTR_COHERENT_ICACHE
55 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
56 #define CPU_FTR_NOEXECUTE 0
59 unsigned long long memory_limit;
60 bool init_mem_is_free;
64 EXPORT_SYMBOL(kmap_pte);
66 EXPORT_SYMBOL(kmap_prot);
68 static inline pte_t *virt_to_kpte(unsigned long vaddr)
70 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
71 vaddr), vaddr), vaddr);
75 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
76 unsigned long size, pgprot_t vma_prot)
78 if (ppc_md.phys_mem_access_prot)
79 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
81 if (!page_is_ram(pfn))
82 vma_prot = pgprot_noncached(vma_prot);
86 EXPORT_SYMBOL(phys_mem_access_prot);
88 #ifdef CONFIG_MEMORY_HOTPLUG
91 int memory_add_physaddr_to_nid(u64 start)
93 return hot_add_scn_to_nid(start);
97 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
102 int __weak remove_section_mapping(unsigned long start, unsigned long end)
107 int __ref arch_add_memory(int nid, u64 start, u64 size,
108 struct mhp_restrictions *restrictions)
110 unsigned long start_pfn = start >> PAGE_SHIFT;
111 unsigned long nr_pages = size >> PAGE_SHIFT;
114 resize_hpt_for_hotplug(memblock_phys_mem_size());
116 start = (unsigned long)__va(start);
117 rc = create_section_mapping(start, start + size, nid);
119 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
120 start, start + size, rc);
123 flush_inval_dcache_range(start, start + size);
125 return __add_pages(nid, start_pfn, nr_pages, restrictions);
128 #ifdef CONFIG_MEMORY_HOTREMOVE
129 void __ref arch_remove_memory(int nid, u64 start, u64 size,
130 struct vmem_altmap *altmap)
132 unsigned long start_pfn = start >> PAGE_SHIFT;
133 unsigned long nr_pages = size >> PAGE_SHIFT;
138 * If we have an altmap then we need to skip over any reserved PFNs
139 * when querying the zone.
141 page = pfn_to_page(start_pfn);
143 page += vmem_altmap_offset(altmap);
145 __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
147 /* Remove htab bolted mappings for this section of memory */
148 start = (unsigned long)__va(start);
149 flush_inval_dcache_range(start, start + size);
150 ret = remove_section_mapping(start, start + size);
153 /* Ensure all vmalloc mappings are flushed in case they also
154 * hit that section of memory
158 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
159 pr_warn("Hash collision while resizing HPT\n");
162 #endif /* CONFIG_MEMORY_HOTPLUG */
164 #ifndef CONFIG_NEED_MULTIPLE_NODES
165 void __init mem_topology_setup(void)
167 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
168 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
169 #ifdef CONFIG_HIGHMEM
170 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
173 /* Place all memblock_regions in the same node and merge contiguous
176 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
179 void __init initmem_init(void)
181 /* XXX need to clip this if using highmem? */
182 sparse_memory_present_with_active_regions(0);
186 /* mark pages that don't exist as nosave */
187 static int __init mark_nonram_nosave(void)
189 struct memblock_region *reg, *prev = NULL;
191 for_each_memblock(memory, reg) {
193 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
194 register_nosave_region(memblock_region_memory_end_pfn(prev),
195 memblock_region_memory_base_pfn(reg));
200 #else /* CONFIG_NEED_MULTIPLE_NODES */
201 static int __init mark_nonram_nosave(void)
210 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
211 * everything else. GFP_DMA32 page allocations automatically fall back to
214 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
215 * inform the generic DMA mapping code. 32-bit only devices (if not handled
216 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
217 * otherwise served by ZONE_DMA.
219 static unsigned long max_zone_pfns[MAX_NR_ZONES];
222 * paging_init() sets up the page tables - in fact we've already done this.
224 void __init paging_init(void)
226 unsigned long long total_ram = memblock_phys_mem_size();
227 phys_addr_t top_of_ram = memblock_end_of_DRAM();
230 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
231 unsigned long end = __fix_to_virt(FIX_HOLE);
233 for (; v < end; v += PAGE_SIZE)
234 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
237 #ifdef CONFIG_HIGHMEM
238 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
239 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
241 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
242 kmap_prot = PAGE_KERNEL;
243 #endif /* CONFIG_HIGHMEM */
245 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
246 (unsigned long long)top_of_ram, total_ram);
247 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
248 (long int)((top_of_ram - total_ram) >> 20));
250 #ifdef CONFIG_ZONE_DMA
251 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
252 ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
254 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
255 #ifdef CONFIG_HIGHMEM
256 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
259 free_area_init_nodes(max_zone_pfns);
261 mark_nonram_nosave();
264 void __init mem_init(void)
267 * book3s is limited to 16 page sizes due to encoding this in
268 * a 4-bit field for slices.
270 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
272 #ifdef CONFIG_SWIOTLB
276 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
277 set_max_mapnr(max_pfn);
280 #ifdef CONFIG_HIGHMEM
282 unsigned long pfn, highmem_mapnr;
284 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
285 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
286 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
287 struct page *page = pfn_to_page(pfn);
288 if (!memblock_is_reserved(paddr))
289 free_highmem_page(page);
292 #endif /* CONFIG_HIGHMEM */
294 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
296 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
297 * functions.... do it here for the non-smp case.
299 per_cpu(next_tlbcam_idx, smp_processor_id()) =
300 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
303 mem_init_print_info(NULL);
305 pr_info("Kernel virtual memory layout:\n");
307 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
308 KASAN_SHADOW_START, KASAN_SHADOW_END);
310 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
311 #ifdef CONFIG_HIGHMEM
312 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
313 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
314 #endif /* CONFIG_HIGHMEM */
315 #ifdef CONFIG_NOT_COHERENT_CACHE
316 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
317 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
318 #endif /* CONFIG_NOT_COHERENT_CACHE */
319 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
320 ioremap_bot, IOREMAP_TOP);
321 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
322 VMALLOC_START, VMALLOC_END);
323 #endif /* CONFIG_PPC32 */
326 void free_initmem(void)
328 ppc_md.progress = ppc_printk_progress;
330 init_mem_is_free = true;
331 free_initmem_default(POISON_FREE_INITMEM);
335 * This is called when a page has been modified by the kernel.
336 * It just marks the page as not i-cache clean. We do the i-cache
337 * flush later when the page is given to a user process, if necessary.
339 void flush_dcache_page(struct page *page)
341 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
343 /* avoid an atomic op if possible */
344 if (test_bit(PG_arch_1, &page->flags))
345 clear_bit(PG_arch_1, &page->flags);
347 EXPORT_SYMBOL(flush_dcache_page);
349 void flush_dcache_icache_page(struct page *page)
351 #ifdef CONFIG_HUGETLB_PAGE
352 if (PageCompound(page)) {
353 flush_dcache_icache_hugepage(page);
357 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
358 /* On 8xx there is no need to kmap since highmem is not supported */
359 __flush_dcache_icache(page_address(page));
361 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
362 void *start = kmap_atomic(page);
363 __flush_dcache_icache(start);
364 kunmap_atomic(start);
366 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
370 EXPORT_SYMBOL(flush_dcache_icache_page);
372 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
377 * We shouldn't have to do this, but some versions of glibc
378 * require it (ld.so assumes zero filled pages are icache clean)
381 flush_dcache_page(pg);
383 EXPORT_SYMBOL(clear_user_page);
385 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
388 copy_page(vto, vfrom);
391 * We should be able to use the following optimisation, however
392 * there are two problems.
393 * Firstly a bug in some versions of binutils meant PLT sections
394 * were not marked executable.
395 * Secondly the first word in the GOT section is blrl, used
396 * to establish the GOT address. Until recently the GOT was
397 * not marked executable.
401 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
405 flush_dcache_page(pg);
408 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
409 unsigned long addr, int len)
413 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
414 flush_icache_range(maddr, maddr + len);
417 EXPORT_SYMBOL(flush_icache_user_range);
420 * This is called at the end of handling a user page fault, when the
421 * fault has been handled by updating a PTE in the linux page tables.
422 * We use it to preload an HPTE into the hash table corresponding to
423 * the updated linux PTE.
425 * This must always be called with the pte lock held.
427 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
430 #ifdef CONFIG_PPC_BOOK3S
432 * We don't need to worry about _PAGE_PRESENT here because we are
433 * called with either mm->page_table_lock held or ptl lock held
438 if (radix_enabled()) {
439 prefetch((void *)address);
443 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
444 if (!pte_young(*ptep) || address >= TASK_SIZE)
447 /* We try to figure out if we are coming from an instruction
448 * access fault and pass that down to __hash_page so we avoid
449 * double-faulting on execution of fresh text. We have to test
450 * for regs NULL since init will get here first thing at boot
452 * We also avoid filling the hash if not coming from a fault
455 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
467 hash_preload(vma->vm_mm, address, is_exec, trap);
468 #endif /* CONFIG_PPC_BOOK3S */
469 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
470 && defined(CONFIG_HUGETLB_PAGE)
471 if (is_vm_hugetlb_page(vma))
472 book3e_hugetlb_preload(vma, address, *ptep);
477 * System memory should not be in /proc/iomem but various tools expect it
480 static int __init add_system_ram_resources(void)
482 struct memblock_region *reg;
484 for_each_memblock(memory, reg) {
485 struct resource *res;
486 unsigned long base = reg->base;
487 unsigned long size = reg->size;
489 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
493 res->name = "System RAM";
495 res->end = base + size - 1;
496 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
497 WARN_ON(request_resource(&iomem_resource, res) < 0);
503 subsys_initcall(add_system_ram_resources);
505 #ifdef CONFIG_STRICT_DEVMEM
507 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
508 * is valid. The argument is a physical page number.
510 * Access has to be given to non-kernel-ram areas as well, these contain the
511 * PCI mmio resources as well as potential bios/acpi data regions.
513 int devmem_is_allowed(unsigned long pfn)
515 if (page_is_rtas_user_buf(pfn))
517 if (iomem_is_exclusive(PFN_PHYS(pfn)))
519 if (!page_is_ram(pfn))
523 #endif /* CONFIG_STRICT_DEVMEM */
526 * This is defined in kernel/resource.c but only powerpc needs to export it, for
527 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
529 EXPORT_SYMBOL_GPL(walk_system_ram_range);