]> Git Repo - linux.git/blobdiff - mm/mm_init.c
mm/mm_init.c: introduce reset_memoryless_node_totalpages()
[linux.git] / mm / mm_init.c
index 7f7f9c6778546b1ce3618a73621f7b27150c42a7..6f7da396b67b5a22c2508d64b6e419c525bc46c9 100644 (file)
@@ -259,6 +259,8 @@ static int __init cmdline_parse_core(char *p, unsigned long *core,
        return 0;
 }
 
+bool mirrored_kernelcore __initdata_memblock;
+
 /*
  * kernelcore=size sets the amount of memory for use for allocations that
  * cannot be reclaimed or migrated.
@@ -1174,10 +1176,6 @@ static unsigned long __init zone_absent_pages_in_node(int nid,
        unsigned long zone_start_pfn, zone_end_pfn;
        unsigned long nr_absent;
 
-       /* When hotadd a new node from cpu_up(), the node should be empty */
-       if (!node_start_pfn && !node_end_pfn)
-               return 0;
-
        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
 
@@ -1227,9 +1225,6 @@ static unsigned long __init zone_spanned_pages_in_node(int nid,
 {
        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
-       /* When hotadd a new node from cpu_up(), the node should be empty */
-       if (!node_start_pfn && !node_end_pfn)
-               return 0;
 
        /* Get the start and end of the zone */
        *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
@@ -1250,6 +1245,24 @@ static unsigned long __init zone_spanned_pages_in_node(int nid,
        return *zone_end_pfn - *zone_start_pfn;
 }
 
+static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
+{
+       struct zone *z;
+
+       for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) {
+               z->zone_start_pfn = 0;
+               z->spanned_pages = 0;
+               z->present_pages = 0;
+#if defined(CONFIG_MEMORY_HOTPLUG)
+               z->present_early_pages = 0;
+#endif
+       }
+
+       pgdat->node_spanned_pages = 0;
+       pgdat->node_present_pages = 0;
+       pr_debug("On node %d totalpages: 0\n", pgdat->node_id);
+}
+
 static void __init calculate_node_totalpages(struct pglist_data *pgdat,
                                                unsigned long node_start_pfn,
                                                unsigned long node_end_pfn)
@@ -1702,11 +1715,13 @@ static void __init free_area_init_node(int nid)
                pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
                        (u64)start_pfn << PAGE_SHIFT,
                        end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
+
+               calculate_node_totalpages(pgdat, start_pfn, end_pfn);
        } else {
                pr_info("Initmem setup node %d as memoryless\n", nid);
-       }
 
-       calculate_node_totalpages(pgdat, start_pfn, end_pfn);
+               reset_memoryless_node_totalpages(pgdat);
+       }
 
        alloc_node_mem_map(pgdat);
        pgdat_set_deferred_range(pgdat);
@@ -2328,6 +2343,28 @@ void __init init_cma_reserved_pageblock(struct page *page)
 }
 #endif
 
+void set_zone_contiguous(struct zone *zone)
+{
+       unsigned long block_start_pfn = zone->zone_start_pfn;
+       unsigned long block_end_pfn;
+
+       block_end_pfn = pageblock_end_pfn(block_start_pfn);
+       for (; block_start_pfn < zone_end_pfn(zone);
+                       block_start_pfn = block_end_pfn,
+                        block_end_pfn += pageblock_nr_pages) {
+
+               block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
+
+               if (!__pageblock_pfn_to_page(block_start_pfn,
+                                            block_end_pfn, zone))
+                       return;
+               cond_resched();
+       }
+
+       /* We confirm that there is no hole */
+       zone->contiguous = true;
+}
+
 void __init page_alloc_init_late(void)
 {
        struct zone *zone;
@@ -2368,6 +2405,8 @@ void __init page_alloc_init_late(void)
        /* Initialize page ext after all struct pages are initialized. */
        if (deferred_struct_pages)
                page_ext_init();
+
+       page_alloc_sysctl_init();
 }
 
 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
@@ -2541,6 +2580,12 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
        __free_pages_core(page, order);
 }
 
+DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
+EXPORT_SYMBOL(init_on_alloc);
+
+DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
+EXPORT_SYMBOL(init_on_free);
+
 static bool _init_on_alloc_enabled_early __read_mostly
                                = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
 static int __init early_init_on_alloc(char *buf)
This page took 0.0345 seconds and 4 git commands to generate.