From: Linus Torvalds Date: Wed, 16 Dec 2020 22:44:53 +0000 (-0800) Subject: Merge tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt... X-Git-Tag: v5.11-rc1~103 X-Git-Url: https://repo.jachan.dev/linux.git/commitdiff_plain/fff875a18382f1983b4a27be9282e697dbccb3db?hp=-c Merge tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock Pull memblock updates from Mike Rapoport: "memblock debug enhancements. Improve tracking of early memory allocations when memblock debug is enabled: - Add memblock_dbg() to memblock_phys_alloc_range() to get details about its usage - Make memblock allocator wrappers actually inline to track their callers in memblock debug messages" * tag 'memblock-v5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock: mm: memblock: drop __init from memblock functions to make it inline mm: memblock: add more debug logs --- fff875a18382f1983b4a27be9282e697dbccb3db diff --combined mm/memblock.c index a3c406070f4d,db7a4f527abc..d24bcfa88d2f --- a/mm/memblock.c +++ b/mm/memblock.c @@@ -871,7 -871,7 +871,7 @@@ int __init_memblock memblock_physmem_ad * @base: base address of the region * @size: size of the region * @set: set or clear the flag - * @flag: the flag to udpate + * @flag: the flag to update * * This function isolates region [@base, @base + @size), and sets/clears flag * @@@ -1419,6 -1419,9 +1419,9 @@@ phys_addr_t __init memblock_phys_alloc_ phys_addr_t start, phys_addr_t end) { + memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n", + __func__, (u64)size, (u64)align, &start, &end, + (void *)_RET_IP_); return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, false); } @@@ -1926,85 -1929,6 +1929,85 @@@ static int __init early_memblock(char * } early_param("memblock", early_memblock); +static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *start_pg, *end_pg; + phys_addr_t pg, pgend; + + /* + * Convert start_pfn/end_pfn to a struct page pointer. + */ + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; + + /* + * Convert to physical addresses, and round start upwards and end + * downwards. + */ + pg = PAGE_ALIGN(__pa(start_pg)); + pgend = __pa(end_pg) & PAGE_MASK; + + /* + * If there are free pages between these, free the section of the + * memmap array. + */ + if (pg < pgend) + memblock_free(pg, pgend - pg); +} + +/* + * The mem_map array can get very big. Free the unused area of the memory map. + */ +static void __init free_unused_memmap(void) +{ + unsigned long start, end, prev_end = 0; + int i; + + if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) || + IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) + return; + + /* + * This relies on each bank being in address order. + * The banks are sorted previously in bootmem_init(). + */ + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist + * due to SPARSEMEM sections which aren't present. + */ + start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); +#else + /* + * Align down here since the VM subsystem insists that the + * memmap entries are valid from the bank start aligned to + * MAX_ORDER_NR_PAGES. + */ + start = round_down(start, MAX_ORDER_NR_PAGES); +#endif + + /* + * If we had a previous bank, and there is a space + * between the current bank and the previous, free it. + */ + if (prev_end && prev_end < start) + free_memmap(prev_end, start); + + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); + } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); +#endif +} + static void __init __free_pages_memory(unsigned long start, unsigned long end) { int order; @@@ -2091,7 -2015,6 +2094,7 @@@ unsigned long __init memblock_free_all( { unsigned long pages; + free_unused_memmap(); reset_all_zones_managed_pages(); pages = free_low_memory_core_early();