]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6b74ab97 MG |
2 | /* |
3 | * mm_init.c - Memory initialisation verification and debugging | |
4 | * | |
5 | * Copyright 2008 IBM Corporation, 2008 | |
6 | * Author Mel Gorman <[email protected]> | |
7 | * | |
8 | */ | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/init.h> | |
ff7ea79c | 11 | #include <linux/kobject.h> |
b95f1b31 | 12 | #include <linux/export.h> |
917d9290 TC |
13 | #include <linux/memory.h> |
14 | #include <linux/notifier.h> | |
7e18adb4 | 15 | #include <linux/sched.h> |
56f3547b | 16 | #include <linux/mman.h> |
9420f89d MRI |
17 | #include <linux/memblock.h> |
18 | #include <linux/page-isolation.h> | |
19 | #include <linux/padata.h> | |
20 | #include <linux/nmi.h> | |
21 | #include <linux/buffer_head.h> | |
22 | #include <linux/kmemleak.h> | |
b7ec1bf3 MRI |
23 | #include <linux/kfence.h> |
24 | #include <linux/page_ext.h> | |
25 | #include <linux/pti.h> | |
26 | #include <linux/pgtable.h> | |
eb8589b4 MRI |
27 | #include <linux/swap.h> |
28 | #include <linux/cma.h> | |
708614e6 | 29 | #include "internal.h" |
d5d2c02a | 30 | #include "slab.h" |
9420f89d | 31 | #include "shuffle.h" |
6b74ab97 | 32 | |
b7ec1bf3 MRI |
33 | #include <asm/setup.h> |
34 | ||
5e9426ab | 35 | #ifdef CONFIG_DEBUG_MEMORY_INIT |
194e8151 | 36 | int __meminitdata mminit_loglevel; |
6b74ab97 | 37 | |
68ad8df4 | 38 | /* The zonelists are simply reported, validation is manual. */ |
0e2342c7 | 39 | void __init mminit_verify_zonelist(void) |
68ad8df4 MG |
40 | { |
41 | int nid; | |
42 | ||
43 | if (mminit_loglevel < MMINIT_VERIFY) | |
44 | return; | |
45 | ||
46 | for_each_online_node(nid) { | |
47 | pg_data_t *pgdat = NODE_DATA(nid); | |
48 | struct zone *zone; | |
49 | struct zoneref *z; | |
50 | struct zonelist *zonelist; | |
51 | int i, listid, zoneid; | |
52 | ||
e46b893d | 53 | BUILD_BUG_ON(MAX_ZONELISTS > 2); |
68ad8df4 MG |
54 | for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { |
55 | ||
56 | /* Identify the zone and nodelist */ | |
57 | zoneid = i % MAX_NR_ZONES; | |
58 | listid = i / MAX_NR_ZONES; | |
59 | zonelist = &pgdat->node_zonelists[listid]; | |
60 | zone = &pgdat->node_zones[zoneid]; | |
61 | if (!populated_zone(zone)) | |
62 | continue; | |
63 | ||
64 | /* Print information about the zonelist */ | |
65 | printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", | |
66 | listid > 0 ? "thisnode" : "general", nid, | |
67 | zone->name); | |
68 | ||
69 | /* Iterate the zonelist */ | |
c1093b74 PT |
70 | for_each_zone_zonelist(zone, z, zonelist, zoneid) |
71 | pr_cont("%d:%s ", zone_to_nid(zone), zone->name); | |
1170532b | 72 | pr_cont("\n"); |
68ad8df4 MG |
73 | } |
74 | } | |
75 | } | |
76 | ||
708614e6 MG |
77 | void __init mminit_verify_pageflags_layout(void) |
78 | { | |
79 | int shift, width; | |
80 | unsigned long or_mask, add_mask; | |
81 | ||
82 | shift = 8 * sizeof(unsigned long); | |
86fea8b4 | 83 | width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH |
ec1c86b2 | 84 | - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; |
708614e6 | 85 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", |
ec1c86b2 | 86 | "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", |
708614e6 MG |
87 | SECTIONS_WIDTH, |
88 | NODES_WIDTH, | |
89 | ZONES_WIDTH, | |
90572890 | 90 | LAST_CPUPID_WIDTH, |
86fea8b4 | 91 | KASAN_TAG_WIDTH, |
ec1c86b2 YZ |
92 | LRU_GEN_WIDTH, |
93 | LRU_REFS_WIDTH, | |
708614e6 MG |
94 | NR_PAGEFLAGS); |
95 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", | |
86fea8b4 | 96 | "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", |
708614e6 | 97 | SECTIONS_SHIFT, |
708614e6 | 98 | NODES_SHIFT, |
a4e1b4c6 | 99 | ZONES_SHIFT, |
86fea8b4 JX |
100 | LAST_CPUPID_SHIFT, |
101 | KASAN_TAG_WIDTH); | |
a4e1b4c6 | 102 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", |
86fea8b4 | 103 | "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", |
708614e6 MG |
104 | (unsigned long)SECTIONS_PGSHIFT, |
105 | (unsigned long)NODES_PGSHIFT, | |
a4e1b4c6 | 106 | (unsigned long)ZONES_PGSHIFT, |
86fea8b4 JX |
107 | (unsigned long)LAST_CPUPID_PGSHIFT, |
108 | (unsigned long)KASAN_TAG_PGSHIFT); | |
a4e1b4c6 MG |
109 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", |
110 | "Node/Zone ID: %lu -> %lu\n", | |
111 | (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), | |
112 | (unsigned long)ZONEID_PGOFF); | |
708614e6 | 113 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", |
a4e1b4c6 | 114 | "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", |
708614e6 MG |
115 | shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); |
116 | #ifdef NODE_NOT_IN_PAGE_FLAGS | |
117 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", | |
118 | "Node not in page flags"); | |
119 | #endif | |
90572890 | 120 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
a4e1b4c6 | 121 | mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", |
90572890 | 122 | "Last cpupid not in page flags"); |
a4e1b4c6 | 123 | #endif |
708614e6 MG |
124 | |
125 | if (SECTIONS_WIDTH) { | |
126 | shift -= SECTIONS_WIDTH; | |
127 | BUG_ON(shift != SECTIONS_PGSHIFT); | |
128 | } | |
129 | if (NODES_WIDTH) { | |
130 | shift -= NODES_WIDTH; | |
131 | BUG_ON(shift != NODES_PGSHIFT); | |
132 | } | |
133 | if (ZONES_WIDTH) { | |
134 | shift -= ZONES_WIDTH; | |
135 | BUG_ON(shift != ZONES_PGSHIFT); | |
136 | } | |
137 | ||
138 | /* Check for bitmask overlaps */ | |
139 | or_mask = (ZONES_MASK << ZONES_PGSHIFT) | | |
140 | (NODES_MASK << NODES_PGSHIFT) | | |
141 | (SECTIONS_MASK << SECTIONS_PGSHIFT); | |
142 | add_mask = (ZONES_MASK << ZONES_PGSHIFT) + | |
143 | (NODES_MASK << NODES_PGSHIFT) + | |
144 | (SECTIONS_MASK << SECTIONS_PGSHIFT); | |
145 | BUG_ON(or_mask != add_mask); | |
146 | } | |
147 | ||
6b74ab97 MG |
148 | static __init int set_mminit_loglevel(char *str) |
149 | { | |
150 | get_option(&str, &mminit_loglevel); | |
151 | return 0; | |
152 | } | |
153 | early_param("mminit_loglevel", set_mminit_loglevel); | |
5e9426ab | 154 | #endif /* CONFIG_DEBUG_MEMORY_INIT */ |
ff7ea79c NA |
155 | |
156 | struct kobject *mm_kobj; | |
157 | EXPORT_SYMBOL_GPL(mm_kobj); | |
158 | ||
917d9290 TC |
159 | #ifdef CONFIG_SMP |
160 | s32 vm_committed_as_batch = 32; | |
161 | ||
56f3547b | 162 | void mm_compute_batch(int overcommit_policy) |
917d9290 TC |
163 | { |
164 | u64 memsized_batch; | |
165 | s32 nr = num_present_cpus(); | |
166 | s32 batch = max_t(s32, nr*2, 32); | |
56f3547b FT |
167 | unsigned long ram_pages = totalram_pages(); |
168 | ||
169 | /* | |
170 | * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of | |
171 | * (total memory/#cpus), and lift it to 25% for other policies | |
172 | * to easy the possible lock contention for percpu_counter | |
173 | * vm_committed_as, while the max limit is INT_MAX | |
174 | */ | |
175 | if (overcommit_policy == OVERCOMMIT_NEVER) | |
176 | memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); | |
177 | else | |
178 | memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); | |
917d9290 TC |
179 | |
180 | vm_committed_as_batch = max_t(s32, memsized_batch, batch); | |
181 | } | |
182 | ||
183 | static int __meminit mm_compute_batch_notifier(struct notifier_block *self, | |
184 | unsigned long action, void *arg) | |
185 | { | |
186 | switch (action) { | |
187 | case MEM_ONLINE: | |
188 | case MEM_OFFLINE: | |
56f3547b | 189 | mm_compute_batch(sysctl_overcommit_memory); |
01359eb2 | 190 | break; |
917d9290 TC |
191 | default: |
192 | break; | |
193 | } | |
194 | return NOTIFY_OK; | |
195 | } | |
196 | ||
917d9290 TC |
197 | static int __init mm_compute_batch_init(void) |
198 | { | |
56f3547b | 199 | mm_compute_batch(sysctl_overcommit_memory); |
1eeaa4fd | 200 | hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI); |
917d9290 TC |
201 | return 0; |
202 | } | |
203 | ||
204 | __initcall(mm_compute_batch_init); | |
205 | ||
206 | #endif | |
207 | ||
ff7ea79c NA |
208 | static int __init mm_sysfs_init(void) |
209 | { | |
210 | mm_kobj = kobject_create_and_add("mm", kernel_kobj); | |
211 | if (!mm_kobj) | |
212 | return -ENOMEM; | |
213 | ||
214 | return 0; | |
215 | } | |
e82cb95d | 216 | postcore_initcall(mm_sysfs_init); |
9420f89d MRI |
217 | |
218 | static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; | |
219 | static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; | |
220 | static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; | |
221 | ||
222 | static unsigned long required_kernelcore __initdata; | |
223 | static unsigned long required_kernelcore_percent __initdata; | |
224 | static unsigned long required_movablecore __initdata; | |
225 | static unsigned long required_movablecore_percent __initdata; | |
226 | ||
227 | static unsigned long nr_kernel_pages __initdata; | |
228 | static unsigned long nr_all_pages __initdata; | |
229 | static unsigned long dma_reserve __initdata; | |
230 | ||
de57807e | 231 | static bool deferred_struct_pages __meminitdata; |
9420f89d MRI |
232 | |
233 | static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); | |
234 | ||
235 | static int __init cmdline_parse_core(char *p, unsigned long *core, | |
236 | unsigned long *percent) | |
237 | { | |
238 | unsigned long long coremem; | |
239 | char *endptr; | |
240 | ||
241 | if (!p) | |
242 | return -EINVAL; | |
243 | ||
244 | /* Value may be a percentage of total memory, otherwise bytes */ | |
245 | coremem = simple_strtoull(p, &endptr, 0); | |
246 | if (*endptr == '%') { | |
247 | /* Paranoid check for percent values greater than 100 */ | |
248 | WARN_ON(coremem > 100); | |
249 | ||
250 | *percent = coremem; | |
251 | } else { | |
252 | coremem = memparse(p, &p); | |
253 | /* Paranoid check that UL is enough for the coremem value */ | |
254 | WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); | |
255 | ||
256 | *core = coremem >> PAGE_SHIFT; | |
257 | *percent = 0UL; | |
258 | } | |
259 | return 0; | |
260 | } | |
261 | ||
072ba380 KW |
262 | bool mirrored_kernelcore __initdata_memblock; |
263 | ||
9420f89d MRI |
264 | /* |
265 | * kernelcore=size sets the amount of memory for use for allocations that | |
266 | * cannot be reclaimed or migrated. | |
267 | */ | |
268 | static int __init cmdline_parse_kernelcore(char *p) | |
269 | { | |
270 | /* parse kernelcore=mirror */ | |
271 | if (parse_option_str(p, "mirror")) { | |
272 | mirrored_kernelcore = true; | |
273 | return 0; | |
274 | } | |
275 | ||
276 | return cmdline_parse_core(p, &required_kernelcore, | |
277 | &required_kernelcore_percent); | |
278 | } | |
279 | early_param("kernelcore", cmdline_parse_kernelcore); | |
280 | ||
281 | /* | |
282 | * movablecore=size sets the amount of memory for use for allocations that | |
283 | * can be reclaimed or migrated. | |
284 | */ | |
285 | static int __init cmdline_parse_movablecore(char *p) | |
286 | { | |
287 | return cmdline_parse_core(p, &required_movablecore, | |
288 | &required_movablecore_percent); | |
289 | } | |
290 | early_param("movablecore", cmdline_parse_movablecore); | |
291 | ||
292 | /* | |
293 | * early_calculate_totalpages() | |
294 | * Sum pages in active regions for movable zone. | |
295 | * Populate N_MEMORY for calculating usable_nodes. | |
296 | */ | |
297 | static unsigned long __init early_calculate_totalpages(void) | |
298 | { | |
299 | unsigned long totalpages = 0; | |
300 | unsigned long start_pfn, end_pfn; | |
301 | int i, nid; | |
302 | ||
303 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { | |
304 | unsigned long pages = end_pfn - start_pfn; | |
305 | ||
306 | totalpages += pages; | |
307 | if (pages) | |
308 | node_set_state(nid, N_MEMORY); | |
309 | } | |
310 | return totalpages; | |
311 | } | |
312 | ||
313 | /* | |
314 | * This finds a zone that can be used for ZONE_MOVABLE pages. The | |
315 | * assumption is made that zones within a node are ordered in monotonic | |
316 | * increasing memory addresses so that the "highest" populated zone is used | |
317 | */ | |
318 | static void __init find_usable_zone_for_movable(void) | |
319 | { | |
320 | int zone_index; | |
321 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { | |
322 | if (zone_index == ZONE_MOVABLE) | |
323 | continue; | |
324 | ||
325 | if (arch_zone_highest_possible_pfn[zone_index] > | |
326 | arch_zone_lowest_possible_pfn[zone_index]) | |
327 | break; | |
328 | } | |
329 | ||
330 | VM_BUG_ON(zone_index == -1); | |
331 | movable_zone = zone_index; | |
332 | } | |
333 | ||
334 | /* | |
335 | * Find the PFN the Movable zone begins in each node. Kernel memory | |
336 | * is spread evenly between nodes as long as the nodes have enough | |
337 | * memory. When they don't, some nodes will have more kernelcore than | |
338 | * others | |
339 | */ | |
340 | static void __init find_zone_movable_pfns_for_nodes(void) | |
341 | { | |
342 | int i, nid; | |
343 | unsigned long usable_startpfn; | |
344 | unsigned long kernelcore_node, kernelcore_remaining; | |
345 | /* save the state before borrow the nodemask */ | |
346 | nodemask_t saved_node_state = node_states[N_MEMORY]; | |
347 | unsigned long totalpages = early_calculate_totalpages(); | |
348 | int usable_nodes = nodes_weight(node_states[N_MEMORY]); | |
349 | struct memblock_region *r; | |
350 | ||
351 | /* Need to find movable_zone earlier when movable_node is specified. */ | |
352 | find_usable_zone_for_movable(); | |
353 | ||
354 | /* | |
355 | * If movable_node is specified, ignore kernelcore and movablecore | |
356 | * options. | |
357 | */ | |
358 | if (movable_node_is_enabled()) { | |
359 | for_each_mem_region(r) { | |
360 | if (!memblock_is_hotpluggable(r)) | |
361 | continue; | |
362 | ||
363 | nid = memblock_get_region_node(r); | |
364 | ||
365 | usable_startpfn = PFN_DOWN(r->base); | |
366 | zone_movable_pfn[nid] = zone_movable_pfn[nid] ? | |
367 | min(usable_startpfn, zone_movable_pfn[nid]) : | |
368 | usable_startpfn; | |
369 | } | |
370 | ||
371 | goto out2; | |
372 | } | |
373 | ||
374 | /* | |
375 | * If kernelcore=mirror is specified, ignore movablecore option | |
376 | */ | |
377 | if (mirrored_kernelcore) { | |
378 | bool mem_below_4gb_not_mirrored = false; | |
379 | ||
380 | for_each_mem_region(r) { | |
381 | if (memblock_is_mirror(r)) | |
382 | continue; | |
383 | ||
384 | nid = memblock_get_region_node(r); | |
385 | ||
386 | usable_startpfn = memblock_region_memory_base_pfn(r); | |
387 | ||
388 | if (usable_startpfn < PHYS_PFN(SZ_4G)) { | |
389 | mem_below_4gb_not_mirrored = true; | |
390 | continue; | |
391 | } | |
392 | ||
393 | zone_movable_pfn[nid] = zone_movable_pfn[nid] ? | |
394 | min(usable_startpfn, zone_movable_pfn[nid]) : | |
395 | usable_startpfn; | |
396 | } | |
397 | ||
398 | if (mem_below_4gb_not_mirrored) | |
399 | pr_warn("This configuration results in unmirrored kernel memory.\n"); | |
400 | ||
401 | goto out2; | |
402 | } | |
403 | ||
404 | /* | |
405 | * If kernelcore=nn% or movablecore=nn% was specified, calculate the | |
406 | * amount of necessary memory. | |
407 | */ | |
408 | if (required_kernelcore_percent) | |
409 | required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / | |
410 | 10000UL; | |
411 | if (required_movablecore_percent) | |
412 | required_movablecore = (totalpages * 100 * required_movablecore_percent) / | |
413 | 10000UL; | |
414 | ||
415 | /* | |
416 | * If movablecore= was specified, calculate what size of | |
417 | * kernelcore that corresponds so that memory usable for | |
418 | * any allocation type is evenly spread. If both kernelcore | |
419 | * and movablecore are specified, then the value of kernelcore | |
420 | * will be used for required_kernelcore if it's greater than | |
421 | * what movablecore would have allowed. | |
422 | */ | |
423 | if (required_movablecore) { | |
424 | unsigned long corepages; | |
425 | ||
426 | /* | |
427 | * Round-up so that ZONE_MOVABLE is at least as large as what | |
428 | * was requested by the user | |
429 | */ | |
430 | required_movablecore = | |
431 | roundup(required_movablecore, MAX_ORDER_NR_PAGES); | |
432 | required_movablecore = min(totalpages, required_movablecore); | |
433 | corepages = totalpages - required_movablecore; | |
434 | ||
435 | required_kernelcore = max(required_kernelcore, corepages); | |
436 | } | |
437 | ||
438 | /* | |
439 | * If kernelcore was not specified or kernelcore size is larger | |
440 | * than totalpages, there is no ZONE_MOVABLE. | |
441 | */ | |
442 | if (!required_kernelcore || required_kernelcore >= totalpages) | |
443 | goto out; | |
444 | ||
445 | /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ | |
446 | usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; | |
447 | ||
448 | restart: | |
449 | /* Spread kernelcore memory as evenly as possible throughout nodes */ | |
450 | kernelcore_node = required_kernelcore / usable_nodes; | |
451 | for_each_node_state(nid, N_MEMORY) { | |
452 | unsigned long start_pfn, end_pfn; | |
453 | ||
454 | /* | |
455 | * Recalculate kernelcore_node if the division per node | |
456 | * now exceeds what is necessary to satisfy the requested | |
457 | * amount of memory for the kernel | |
458 | */ | |
459 | if (required_kernelcore < kernelcore_node) | |
460 | kernelcore_node = required_kernelcore / usable_nodes; | |
461 | ||
462 | /* | |
463 | * As the map is walked, we track how much memory is usable | |
464 | * by the kernel using kernelcore_remaining. When it is | |
465 | * 0, the rest of the node is usable by ZONE_MOVABLE | |
466 | */ | |
467 | kernelcore_remaining = kernelcore_node; | |
468 | ||
469 | /* Go through each range of PFNs within this node */ | |
470 | for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { | |
471 | unsigned long size_pages; | |
472 | ||
473 | start_pfn = max(start_pfn, zone_movable_pfn[nid]); | |
474 | if (start_pfn >= end_pfn) | |
475 | continue; | |
476 | ||
477 | /* Account for what is only usable for kernelcore */ | |
478 | if (start_pfn < usable_startpfn) { | |
479 | unsigned long kernel_pages; | |
480 | kernel_pages = min(end_pfn, usable_startpfn) | |
481 | - start_pfn; | |
482 | ||
483 | kernelcore_remaining -= min(kernel_pages, | |
484 | kernelcore_remaining); | |
485 | required_kernelcore -= min(kernel_pages, | |
486 | required_kernelcore); | |
487 | ||
488 | /* Continue if range is now fully accounted */ | |
489 | if (end_pfn <= usable_startpfn) { | |
490 | ||
491 | /* | |
492 | * Push zone_movable_pfn to the end so | |
493 | * that if we have to rebalance | |
494 | * kernelcore across nodes, we will | |
495 | * not double account here | |
496 | */ | |
497 | zone_movable_pfn[nid] = end_pfn; | |
498 | continue; | |
499 | } | |
500 | start_pfn = usable_startpfn; | |
501 | } | |
502 | ||
503 | /* | |
504 | * The usable PFN range for ZONE_MOVABLE is from | |
505 | * start_pfn->end_pfn. Calculate size_pages as the | |
506 | * number of pages used as kernelcore | |
507 | */ | |
508 | size_pages = end_pfn - start_pfn; | |
509 | if (size_pages > kernelcore_remaining) | |
510 | size_pages = kernelcore_remaining; | |
511 | zone_movable_pfn[nid] = start_pfn + size_pages; | |
512 | ||
513 | /* | |
514 | * Some kernelcore has been met, update counts and | |
515 | * break if the kernelcore for this node has been | |
516 | * satisfied | |
517 | */ | |
518 | required_kernelcore -= min(required_kernelcore, | |
519 | size_pages); | |
520 | kernelcore_remaining -= size_pages; | |
521 | if (!kernelcore_remaining) | |
522 | break; | |
523 | } | |
524 | } | |
525 | ||
526 | /* | |
527 | * If there is still required_kernelcore, we do another pass with one | |
528 | * less node in the count. This will push zone_movable_pfn[nid] further | |
529 | * along on the nodes that still have memory until kernelcore is | |
530 | * satisfied | |
531 | */ | |
532 | usable_nodes--; | |
533 | if (usable_nodes && required_kernelcore > usable_nodes) | |
534 | goto restart; | |
535 | ||
536 | out2: | |
537 | /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ | |
538 | for (nid = 0; nid < MAX_NUMNODES; nid++) { | |
539 | unsigned long start_pfn, end_pfn; | |
540 | ||
541 | zone_movable_pfn[nid] = | |
542 | roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); | |
543 | ||
544 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | |
545 | if (zone_movable_pfn[nid] >= end_pfn) | |
546 | zone_movable_pfn[nid] = 0; | |
547 | } | |
548 | ||
549 | out: | |
550 | /* restore the node_state */ | |
551 | node_states[N_MEMORY] = saved_node_state; | |
552 | } | |
553 | ||
554 | static void __meminit __init_single_page(struct page *page, unsigned long pfn, | |
555 | unsigned long zone, int nid) | |
556 | { | |
557 | mm_zero_struct_page(page); | |
558 | set_page_links(page, zone, nid, pfn); | |
559 | init_page_count(page); | |
560 | page_mapcount_reset(page); | |
561 | page_cpupid_reset_last(page); | |
562 | page_kasan_tag_reset(page); | |
563 | ||
564 | INIT_LIST_HEAD(&page->lru); | |
565 | #ifdef WANT_PAGE_VIRTUAL | |
566 | /* The shift won't overflow because ZONE_NORMAL is below 4G. */ | |
567 | if (!is_highmem_idx(zone)) | |
568 | set_page_address(page, __va(pfn << PAGE_SHIFT)); | |
569 | #endif | |
570 | } | |
571 | ||
572 | #ifdef CONFIG_NUMA | |
573 | /* | |
574 | * During memory init memblocks map pfns to nids. The search is expensive and | |
575 | * this caches recent lookups. The implementation of __early_pfn_to_nid | |
576 | * treats start/end as pfns. | |
577 | */ | |
578 | struct mminit_pfnnid_cache { | |
579 | unsigned long last_start; | |
580 | unsigned long last_end; | |
581 | int last_nid; | |
582 | }; | |
583 | ||
584 | static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; | |
585 | ||
586 | /* | |
587 | * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. | |
588 | */ | |
589 | static int __meminit __early_pfn_to_nid(unsigned long pfn, | |
590 | struct mminit_pfnnid_cache *state) | |
591 | { | |
592 | unsigned long start_pfn, end_pfn; | |
593 | int nid; | |
594 | ||
595 | if (state->last_start <= pfn && pfn < state->last_end) | |
596 | return state->last_nid; | |
597 | ||
598 | nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); | |
599 | if (nid != NUMA_NO_NODE) { | |
600 | state->last_start = start_pfn; | |
601 | state->last_end = end_pfn; | |
602 | state->last_nid = nid; | |
603 | } | |
604 | ||
605 | return nid; | |
606 | } | |
607 | ||
608 | int __meminit early_pfn_to_nid(unsigned long pfn) | |
609 | { | |
610 | static DEFINE_SPINLOCK(early_pfn_lock); | |
611 | int nid; | |
612 | ||
613 | spin_lock(&early_pfn_lock); | |
614 | nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); | |
615 | if (nid < 0) | |
616 | nid = first_online_node; | |
617 | spin_unlock(&early_pfn_lock); | |
618 | ||
619 | return nid; | |
620 | } | |
534ef4e1 MRI |
621 | |
622 | int hashdist = HASHDIST_DEFAULT; | |
623 | ||
624 | static int __init set_hashdist(char *str) | |
625 | { | |
626 | if (!str) | |
627 | return 0; | |
628 | hashdist = simple_strtoul(str, &str, 0); | |
629 | return 1; | |
630 | } | |
631 | __setup("hashdist=", set_hashdist); | |
632 | ||
633 | static inline void fixup_hashdist(void) | |
634 | { | |
635 | if (num_node_state(N_MEMORY) == 1) | |
636 | hashdist = 0; | |
637 | } | |
638 | #else | |
639 | static inline void fixup_hashdist(void) {} | |
9420f89d MRI |
640 | #endif /* CONFIG_NUMA */ |
641 | ||
642 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | |
643 | static inline void pgdat_set_deferred_range(pg_data_t *pgdat) | |
644 | { | |
645 | pgdat->first_deferred_pfn = ULONG_MAX; | |
646 | } | |
647 | ||
648 | /* Returns true if the struct page for the pfn is initialised */ | |
649 | static inline bool __meminit early_page_initialised(unsigned long pfn) | |
650 | { | |
651 | int nid = early_pfn_to_nid(pfn); | |
652 | ||
653 | if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) | |
654 | return false; | |
655 | ||
656 | return true; | |
657 | } | |
658 | ||
659 | /* | |
660 | * Returns true when the remaining initialisation should be deferred until | |
661 | * later in the boot cycle when it can be parallelised. | |
662 | */ | |
663 | static bool __meminit | |
664 | defer_init(int nid, unsigned long pfn, unsigned long end_pfn) | |
665 | { | |
666 | static unsigned long prev_end_pfn, nr_initialised; | |
667 | ||
668 | if (early_page_ext_enabled()) | |
669 | return false; | |
670 | /* | |
671 | * prev_end_pfn static that contains the end of previous zone | |
672 | * No need to protect because called very early in boot before smp_init. | |
673 | */ | |
674 | if (prev_end_pfn != end_pfn) { | |
675 | prev_end_pfn = end_pfn; | |
676 | nr_initialised = 0; | |
677 | } | |
678 | ||
679 | /* Always populate low zones for address-constrained allocations */ | |
680 | if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) | |
681 | return false; | |
682 | ||
683 | if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) | |
684 | return true; | |
685 | /* | |
686 | * We start only with one section of pages, more pages are added as | |
687 | * needed until the rest of deferred pages are initialized. | |
688 | */ | |
689 | nr_initialised++; | |
690 | if ((nr_initialised > PAGES_PER_SECTION) && | |
691 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { | |
692 | NODE_DATA(nid)->first_deferred_pfn = pfn; | |
693 | return true; | |
694 | } | |
695 | return false; | |
696 | } | |
697 | ||
698 | static void __meminit init_reserved_page(unsigned long pfn) | |
699 | { | |
700 | pg_data_t *pgdat; | |
701 | int nid, zid; | |
702 | ||
703 | if (early_page_initialised(pfn)) | |
704 | return; | |
705 | ||
706 | nid = early_pfn_to_nid(pfn); | |
707 | pgdat = NODE_DATA(nid); | |
708 | ||
709 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { | |
710 | struct zone *zone = &pgdat->node_zones[zid]; | |
711 | ||
712 | if (zone_spans_pfn(zone, pfn)) | |
713 | break; | |
714 | } | |
715 | __init_single_page(pfn_to_page(pfn), pfn, zid, nid); | |
716 | } | |
717 | #else | |
718 | static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} | |
719 | ||
720 | static inline bool early_page_initialised(unsigned long pfn) | |
721 | { | |
722 | return true; | |
723 | } | |
724 | ||
725 | static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) | |
726 | { | |
727 | return false; | |
728 | } | |
729 | ||
730 | static inline void init_reserved_page(unsigned long pfn) | |
731 | { | |
732 | } | |
733 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | |
734 | ||
735 | /* | |
736 | * Initialised pages do not have PageReserved set. This function is | |
737 | * called for each range allocated by the bootmem allocator and | |
738 | * marks the pages PageReserved. The remaining valid pages are later | |
739 | * sent to the buddy page allocator. | |
740 | */ | |
741 | void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) | |
742 | { | |
743 | unsigned long start_pfn = PFN_DOWN(start); | |
744 | unsigned long end_pfn = PFN_UP(end); | |
745 | ||
746 | for (; start_pfn < end_pfn; start_pfn++) { | |
747 | if (pfn_valid(start_pfn)) { | |
748 | struct page *page = pfn_to_page(start_pfn); | |
749 | ||
750 | init_reserved_page(start_pfn); | |
751 | ||
752 | /* Avoid false-positive PageTail() */ | |
753 | INIT_LIST_HEAD(&page->lru); | |
754 | ||
755 | /* | |
756 | * no need for atomic set_bit because the struct | |
757 | * page is not visible yet so nobody should | |
758 | * access it yet. | |
759 | */ | |
760 | __SetPageReserved(page); | |
761 | } | |
762 | } | |
763 | } | |
764 | ||
765 | /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ | |
766 | static bool __meminit | |
767 | overlap_memmap_init(unsigned long zone, unsigned long *pfn) | |
768 | { | |
769 | static struct memblock_region *r; | |
770 | ||
771 | if (mirrored_kernelcore && zone == ZONE_MOVABLE) { | |
772 | if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { | |
773 | for_each_mem_region(r) { | |
774 | if (*pfn < memblock_region_memory_end_pfn(r)) | |
775 | break; | |
776 | } | |
777 | } | |
778 | if (*pfn >= memblock_region_memory_base_pfn(r) && | |
779 | memblock_is_mirror(r)) { | |
780 | *pfn = memblock_region_memory_end_pfn(r); | |
781 | return true; | |
782 | } | |
783 | } | |
784 | return false; | |
785 | } | |
786 | ||
787 | /* | |
788 | * Only struct pages that correspond to ranges defined by memblock.memory | |
789 | * are zeroed and initialized by going through __init_single_page() during | |
790 | * memmap_init_zone_range(). | |
791 | * | |
792 | * But, there could be struct pages that correspond to holes in | |
793 | * memblock.memory. This can happen because of the following reasons: | |
794 | * - physical memory bank size is not necessarily the exact multiple of the | |
795 | * arbitrary section size | |
796 | * - early reserved memory may not be listed in memblock.memory | |
797 | * - memory layouts defined with memmap= kernel parameter may not align | |
798 | * nicely with memmap sections | |
799 | * | |
800 | * Explicitly initialize those struct pages so that: | |
801 | * - PG_Reserved is set | |
802 | * - zone and node links point to zone and node that span the page if the | |
803 | * hole is in the middle of a zone | |
804 | * - zone and node links point to adjacent zone/node if the hole falls on | |
805 | * the zone boundary; the pages in such holes will be prepended to the | |
806 | * zone/node above the hole except for the trailing pages in the last | |
807 | * section that will be appended to the zone/node below. | |
808 | */ | |
809 | static void __init init_unavailable_range(unsigned long spfn, | |
810 | unsigned long epfn, | |
811 | int zone, int node) | |
812 | { | |
813 | unsigned long pfn; | |
814 | u64 pgcnt = 0; | |
815 | ||
816 | for (pfn = spfn; pfn < epfn; pfn++) { | |
817 | if (!pfn_valid(pageblock_start_pfn(pfn))) { | |
818 | pfn = pageblock_end_pfn(pfn) - 1; | |
819 | continue; | |
820 | } | |
821 | __init_single_page(pfn_to_page(pfn), pfn, zone, node); | |
822 | __SetPageReserved(pfn_to_page(pfn)); | |
823 | pgcnt++; | |
824 | } | |
825 | ||
826 | if (pgcnt) | |
827 | pr_info("On node %d, zone %s: %lld pages in unavailable ranges", | |
828 | node, zone_names[zone], pgcnt); | |
829 | } | |
830 | ||
831 | /* | |
832 | * Initially all pages are reserved - free ones are freed | |
833 | * up by memblock_free_all() once the early boot process is | |
834 | * done. Non-atomic initialization, single-pass. | |
835 | * | |
836 | * All aligned pageblocks are initialized to the specified migratetype | |
837 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related | |
838 | * zone stats (e.g., nr_isolate_pageblock) are touched. | |
839 | */ | |
840 | void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, | |
841 | unsigned long start_pfn, unsigned long zone_end_pfn, | |
842 | enum meminit_context context, | |
843 | struct vmem_altmap *altmap, int migratetype) | |
844 | { | |
845 | unsigned long pfn, end_pfn = start_pfn + size; | |
846 | struct page *page; | |
847 | ||
848 | if (highest_memmap_pfn < end_pfn - 1) | |
849 | highest_memmap_pfn = end_pfn - 1; | |
850 | ||
851 | #ifdef CONFIG_ZONE_DEVICE | |
852 | /* | |
853 | * Honor reservation requested by the driver for this ZONE_DEVICE | |
854 | * memory. We limit the total number of pages to initialize to just | |
855 | * those that might contain the memory mapping. We will defer the | |
856 | * ZONE_DEVICE page initialization until after we have released | |
857 | * the hotplug lock. | |
858 | */ | |
859 | if (zone == ZONE_DEVICE) { | |
860 | if (!altmap) | |
861 | return; | |
862 | ||
863 | if (start_pfn == altmap->base_pfn) | |
864 | start_pfn += altmap->reserve; | |
865 | end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); | |
866 | } | |
867 | #endif | |
868 | ||
869 | for (pfn = start_pfn; pfn < end_pfn; ) { | |
870 | /* | |
871 | * There can be holes in boot-time mem_map[]s handed to this | |
872 | * function. They do not exist on hotplugged memory. | |
873 | */ | |
874 | if (context == MEMINIT_EARLY) { | |
875 | if (overlap_memmap_init(zone, &pfn)) | |
876 | continue; | |
877 | if (defer_init(nid, pfn, zone_end_pfn)) { | |
878 | deferred_struct_pages = true; | |
879 | break; | |
880 | } | |
881 | } | |
882 | ||
883 | page = pfn_to_page(pfn); | |
884 | __init_single_page(page, pfn, zone, nid); | |
885 | if (context == MEMINIT_HOTPLUG) | |
886 | __SetPageReserved(page); | |
887 | ||
888 | /* | |
889 | * Usually, we want to mark the pageblock MIGRATE_MOVABLE, | |
890 | * such that unmovable allocations won't be scattered all | |
891 | * over the place during system boot. | |
892 | */ | |
893 | if (pageblock_aligned(pfn)) { | |
894 | set_pageblock_migratetype(page, migratetype); | |
895 | cond_resched(); | |
896 | } | |
897 | pfn++; | |
898 | } | |
899 | } | |
900 | ||
901 | static void __init memmap_init_zone_range(struct zone *zone, | |
902 | unsigned long start_pfn, | |
903 | unsigned long end_pfn, | |
904 | unsigned long *hole_pfn) | |
905 | { | |
906 | unsigned long zone_start_pfn = zone->zone_start_pfn; | |
907 | unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; | |
908 | int nid = zone_to_nid(zone), zone_id = zone_idx(zone); | |
909 | ||
910 | start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); | |
911 | end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); | |
912 | ||
913 | if (start_pfn >= end_pfn) | |
914 | return; | |
915 | ||
916 | memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, | |
917 | zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); | |
918 | ||
919 | if (*hole_pfn < start_pfn) | |
920 | init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); | |
921 | ||
922 | *hole_pfn = end_pfn; | |
923 | } | |
924 | ||
925 | static void __init memmap_init(void) | |
926 | { | |
927 | unsigned long start_pfn, end_pfn; | |
928 | unsigned long hole_pfn = 0; | |
929 | int i, j, zone_id = 0, nid; | |
930 | ||
931 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { | |
932 | struct pglist_data *node = NODE_DATA(nid); | |
933 | ||
934 | for (j = 0; j < MAX_NR_ZONES; j++) { | |
935 | struct zone *zone = node->node_zones + j; | |
936 | ||
937 | if (!populated_zone(zone)) | |
938 | continue; | |
939 | ||
940 | memmap_init_zone_range(zone, start_pfn, end_pfn, | |
941 | &hole_pfn); | |
942 | zone_id = j; | |
943 | } | |
944 | } | |
945 | ||
946 | #ifdef CONFIG_SPARSEMEM | |
947 | /* | |
948 | * Initialize the memory map for hole in the range [memory_end, | |
949 | * section_end]. | |
950 | * Append the pages in this hole to the highest zone in the last | |
951 | * node. | |
952 | * The call to init_unavailable_range() is outside the ifdef to | |
953 | * silence the compiler warining about zone_id set but not used; | |
954 | * for FLATMEM it is a nop anyway | |
955 | */ | |
956 | end_pfn = round_up(end_pfn, PAGES_PER_SECTION); | |
957 | if (hole_pfn < end_pfn) | |
958 | #endif | |
959 | init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); | |
960 | } | |
961 | ||
962 | #ifdef CONFIG_ZONE_DEVICE | |
963 | static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, | |
964 | unsigned long zone_idx, int nid, | |
965 | struct dev_pagemap *pgmap) | |
966 | { | |
967 | ||
968 | __init_single_page(page, pfn, zone_idx, nid); | |
969 | ||
970 | /* | |
971 | * Mark page reserved as it will need to wait for onlining | |
972 | * phase for it to be fully associated with a zone. | |
973 | * | |
974 | * We can use the non-atomic __set_bit operation for setting | |
975 | * the flag as we are still initializing the pages. | |
976 | */ | |
977 | __SetPageReserved(page); | |
978 | ||
979 | /* | |
980 | * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer | |
981 | * and zone_device_data. It is a bug if a ZONE_DEVICE page is | |
982 | * ever freed or placed on a driver-private list. | |
983 | */ | |
984 | page->pgmap = pgmap; | |
985 | page->zone_device_data = NULL; | |
986 | ||
987 | /* | |
988 | * Mark the block movable so that blocks are reserved for | |
989 | * movable at startup. This will force kernel allocations | |
990 | * to reserve their blocks rather than leaking throughout | |
991 | * the address space during boot when many long-lived | |
992 | * kernel allocations are made. | |
993 | * | |
994 | * Please note that MEMINIT_HOTPLUG path doesn't clear memmap | |
995 | * because this is done early in section_activate() | |
996 | */ | |
997 | if (pageblock_aligned(pfn)) { | |
998 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | |
999 | cond_resched(); | |
1000 | } | |
1001 | ||
1002 | /* | |
1003 | * ZONE_DEVICE pages are released directly to the driver page allocator | |
1004 | * which will set the page count to 1 when allocating the page. | |
1005 | */ | |
1006 | if (pgmap->type == MEMORY_DEVICE_PRIVATE || | |
1007 | pgmap->type == MEMORY_DEVICE_COHERENT) | |
1008 | set_page_count(page, 0); | |
1009 | } | |
1010 | ||
1011 | /* | |
1012 | * With compound page geometry and when struct pages are stored in ram most | |
1013 | * tail pages are reused. Consequently, the amount of unique struct pages to | |
1014 | * initialize is a lot smaller that the total amount of struct pages being | |
1015 | * mapped. This is a paired / mild layering violation with explicit knowledge | |
1016 | * of how the sparse_vmemmap internals handle compound pages in the lack | |
1017 | * of an altmap. See vmemmap_populate_compound_pages(). | |
1018 | */ | |
1019 | static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, | |
87a7ae75 | 1020 | struct dev_pagemap *pgmap) |
9420f89d | 1021 | { |
87a7ae75 AK |
1022 | if (!vmemmap_can_optimize(altmap, pgmap)) |
1023 | return pgmap_vmemmap_nr(pgmap); | |
1024 | ||
1025 | return 2 * (PAGE_SIZE / sizeof(struct page)); | |
9420f89d MRI |
1026 | } |
1027 | ||
1028 | static void __ref memmap_init_compound(struct page *head, | |
1029 | unsigned long head_pfn, | |
1030 | unsigned long zone_idx, int nid, | |
1031 | struct dev_pagemap *pgmap, | |
1032 | unsigned long nr_pages) | |
1033 | { | |
1034 | unsigned long pfn, end_pfn = head_pfn + nr_pages; | |
1035 | unsigned int order = pgmap->vmemmap_shift; | |
1036 | ||
1037 | __SetPageHead(head); | |
1038 | for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { | |
1039 | struct page *page = pfn_to_page(pfn); | |
1040 | ||
1041 | __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); | |
1042 | prep_compound_tail(head, pfn - head_pfn); | |
1043 | set_page_count(page, 0); | |
1044 | ||
1045 | /* | |
1046 | * The first tail page stores important compound page info. | |
1047 | * Call prep_compound_head() after the first tail page has | |
1048 | * been initialized, to not have the data overwritten. | |
1049 | */ | |
1050 | if (pfn == head_pfn + 1) | |
1051 | prep_compound_head(head, order); | |
1052 | } | |
1053 | } | |
1054 | ||
1055 | void __ref memmap_init_zone_device(struct zone *zone, | |
1056 | unsigned long start_pfn, | |
1057 | unsigned long nr_pages, | |
1058 | struct dev_pagemap *pgmap) | |
1059 | { | |
1060 | unsigned long pfn, end_pfn = start_pfn + nr_pages; | |
1061 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1062 | struct vmem_altmap *altmap = pgmap_altmap(pgmap); | |
1063 | unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); | |
1064 | unsigned long zone_idx = zone_idx(zone); | |
1065 | unsigned long start = jiffies; | |
1066 | int nid = pgdat->node_id; | |
1067 | ||
1068 | if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) | |
1069 | return; | |
1070 | ||
1071 | /* | |
1072 | * The call to memmap_init should have already taken care | |
1073 | * of the pages reserved for the memmap, so we can just jump to | |
1074 | * the end of that region and start processing the device pages. | |
1075 | */ | |
1076 | if (altmap) { | |
1077 | start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); | |
1078 | nr_pages = end_pfn - start_pfn; | |
1079 | } | |
1080 | ||
1081 | for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { | |
1082 | struct page *page = pfn_to_page(pfn); | |
1083 | ||
1084 | __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); | |
1085 | ||
1086 | if (pfns_per_compound == 1) | |
1087 | continue; | |
1088 | ||
1089 | memmap_init_compound(page, pfn, zone_idx, nid, pgmap, | |
87a7ae75 | 1090 | compound_nr_pages(altmap, pgmap)); |
9420f89d MRI |
1091 | } |
1092 | ||
dd31bad2 | 1093 | pr_debug("%s initialised %lu pages in %ums\n", __func__, |
9420f89d MRI |
1094 | nr_pages, jiffies_to_msecs(jiffies - start)); |
1095 | } | |
1096 | #endif | |
1097 | ||
1098 | /* | |
1099 | * The zone ranges provided by the architecture do not include ZONE_MOVABLE | |
1100 | * because it is sized independent of architecture. Unlike the other zones, | |
1101 | * the starting point for ZONE_MOVABLE is not fixed. It may be different | |
1102 | * in each node depending on the size of each node and how evenly kernelcore | |
1103 | * is distributed. This helper function adjusts the zone ranges | |
1104 | * provided by the architecture for a given node by using the end of the | |
1105 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that | |
1106 | * zones within a node are in order of monotonic increases memory addresses | |
1107 | */ | |
1108 | static void __init adjust_zone_range_for_zone_movable(int nid, | |
1109 | unsigned long zone_type, | |
1110 | unsigned long node_start_pfn, | |
1111 | unsigned long node_end_pfn, | |
1112 | unsigned long *zone_start_pfn, | |
1113 | unsigned long *zone_end_pfn) | |
1114 | { | |
1115 | /* Only adjust if ZONE_MOVABLE is on this node */ | |
1116 | if (zone_movable_pfn[nid]) { | |
1117 | /* Size ZONE_MOVABLE */ | |
1118 | if (zone_type == ZONE_MOVABLE) { | |
1119 | *zone_start_pfn = zone_movable_pfn[nid]; | |
1120 | *zone_end_pfn = min(node_end_pfn, | |
1121 | arch_zone_highest_possible_pfn[movable_zone]); | |
1122 | ||
1123 | /* Adjust for ZONE_MOVABLE starting within this range */ | |
1124 | } else if (!mirrored_kernelcore && | |
1125 | *zone_start_pfn < zone_movable_pfn[nid] && | |
1126 | *zone_end_pfn > zone_movable_pfn[nid]) { | |
1127 | *zone_end_pfn = zone_movable_pfn[nid]; | |
1128 | ||
1129 | /* Check if this whole range is within ZONE_MOVABLE */ | |
1130 | } else if (*zone_start_pfn >= zone_movable_pfn[nid]) | |
1131 | *zone_start_pfn = *zone_end_pfn; | |
1132 | } | |
1133 | } | |
1134 | ||
1135 | /* | |
1136 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | |
1137 | * then all holes in the requested range will be accounted for. | |
1138 | */ | |
1139 | unsigned long __init __absent_pages_in_range(int nid, | |
1140 | unsigned long range_start_pfn, | |
1141 | unsigned long range_end_pfn) | |
1142 | { | |
1143 | unsigned long nr_absent = range_end_pfn - range_start_pfn; | |
1144 | unsigned long start_pfn, end_pfn; | |
1145 | int i; | |
1146 | ||
1147 | for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { | |
1148 | start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); | |
1149 | end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); | |
1150 | nr_absent -= end_pfn - start_pfn; | |
1151 | } | |
1152 | return nr_absent; | |
1153 | } | |
1154 | ||
1155 | /** | |
1156 | * absent_pages_in_range - Return number of page frames in holes within a range | |
1157 | * @start_pfn: The start PFN to start searching for holes | |
1158 | * @end_pfn: The end PFN to stop searching for holes | |
1159 | * | |
1160 | * Return: the number of pages frames in memory holes within a range. | |
1161 | */ | |
1162 | unsigned long __init absent_pages_in_range(unsigned long start_pfn, | |
1163 | unsigned long end_pfn) | |
1164 | { | |
1165 | return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); | |
1166 | } | |
1167 | ||
1168 | /* Return the number of page frames in holes in a zone on a node */ | |
1169 | static unsigned long __init zone_absent_pages_in_node(int nid, | |
1170 | unsigned long zone_type, | |
1171 | unsigned long node_start_pfn, | |
1172 | unsigned long node_end_pfn) | |
1173 | { | |
1174 | unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; | |
1175 | unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; | |
1176 | unsigned long zone_start_pfn, zone_end_pfn; | |
1177 | unsigned long nr_absent; | |
1178 | ||
9420f89d MRI |
1179 | zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); |
1180 | zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); | |
1181 | ||
1182 | adjust_zone_range_for_zone_movable(nid, zone_type, | |
1183 | node_start_pfn, node_end_pfn, | |
1184 | &zone_start_pfn, &zone_end_pfn); | |
1185 | nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); | |
1186 | ||
1187 | /* | |
1188 | * ZONE_MOVABLE handling. | |
1189 | * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages | |
1190 | * and vice versa. | |
1191 | */ | |
1192 | if (mirrored_kernelcore && zone_movable_pfn[nid]) { | |
1193 | unsigned long start_pfn, end_pfn; | |
1194 | struct memblock_region *r; | |
1195 | ||
1196 | for_each_mem_region(r) { | |
1197 | start_pfn = clamp(memblock_region_memory_base_pfn(r), | |
1198 | zone_start_pfn, zone_end_pfn); | |
1199 | end_pfn = clamp(memblock_region_memory_end_pfn(r), | |
1200 | zone_start_pfn, zone_end_pfn); | |
1201 | ||
1202 | if (zone_type == ZONE_MOVABLE && | |
1203 | memblock_is_mirror(r)) | |
1204 | nr_absent += end_pfn - start_pfn; | |
1205 | ||
1206 | if (zone_type == ZONE_NORMAL && | |
1207 | !memblock_is_mirror(r)) | |
1208 | nr_absent += end_pfn - start_pfn; | |
1209 | } | |
1210 | } | |
1211 | ||
1212 | return nr_absent; | |
1213 | } | |
1214 | ||
1215 | /* | |
1216 | * Return the number of pages a zone spans in a node, including holes | |
1217 | * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() | |
1218 | */ | |
1219 | static unsigned long __init zone_spanned_pages_in_node(int nid, | |
1220 | unsigned long zone_type, | |
1221 | unsigned long node_start_pfn, | |
1222 | unsigned long node_end_pfn, | |
1223 | unsigned long *zone_start_pfn, | |
1224 | unsigned long *zone_end_pfn) | |
1225 | { | |
1226 | unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; | |
1227 | unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; | |
9420f89d MRI |
1228 | |
1229 | /* Get the start and end of the zone */ | |
1230 | *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); | |
1231 | *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); | |
1232 | adjust_zone_range_for_zone_movable(nid, zone_type, | |
1233 | node_start_pfn, node_end_pfn, | |
1234 | zone_start_pfn, zone_end_pfn); | |
1235 | ||
1236 | /* Check that this node has pages within the zone's required range */ | |
1237 | if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) | |
1238 | return 0; | |
1239 | ||
1240 | /* Move the zone boundaries inside the node if necessary */ | |
1241 | *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); | |
1242 | *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); | |
1243 | ||
1244 | /* Return the spanned pages */ | |
1245 | return *zone_end_pfn - *zone_start_pfn; | |
1246 | } | |
1247 | ||
ba1b67c7 HX |
1248 | static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) |
1249 | { | |
1250 | struct zone *z; | |
1251 | ||
1252 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { | |
1253 | z->zone_start_pfn = 0; | |
1254 | z->spanned_pages = 0; | |
1255 | z->present_pages = 0; | |
1256 | #if defined(CONFIG_MEMORY_HOTPLUG) | |
1257 | z->present_early_pages = 0; | |
1258 | #endif | |
1259 | } | |
1260 | ||
1261 | pgdat->node_spanned_pages = 0; | |
1262 | pgdat->node_present_pages = 0; | |
1263 | pr_debug("On node %d totalpages: 0\n", pgdat->node_id); | |
1264 | } | |
1265 | ||
9420f89d MRI |
1266 | static void __init calculate_node_totalpages(struct pglist_data *pgdat, |
1267 | unsigned long node_start_pfn, | |
1268 | unsigned long node_end_pfn) | |
1269 | { | |
1270 | unsigned long realtotalpages = 0, totalpages = 0; | |
1271 | enum zone_type i; | |
1272 | ||
1273 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
1274 | struct zone *zone = pgdat->node_zones + i; | |
1275 | unsigned long zone_start_pfn, zone_end_pfn; | |
1276 | unsigned long spanned, absent; | |
1277 | unsigned long size, real_size; | |
1278 | ||
1279 | spanned = zone_spanned_pages_in_node(pgdat->node_id, i, | |
1280 | node_start_pfn, | |
1281 | node_end_pfn, | |
1282 | &zone_start_pfn, | |
1283 | &zone_end_pfn); | |
1284 | absent = zone_absent_pages_in_node(pgdat->node_id, i, | |
1285 | node_start_pfn, | |
1286 | node_end_pfn); | |
1287 | ||
1288 | size = spanned; | |
1289 | real_size = size - absent; | |
1290 | ||
1291 | if (size) | |
1292 | zone->zone_start_pfn = zone_start_pfn; | |
1293 | else | |
1294 | zone->zone_start_pfn = 0; | |
1295 | zone->spanned_pages = size; | |
1296 | zone->present_pages = real_size; | |
1297 | #if defined(CONFIG_MEMORY_HOTPLUG) | |
1298 | zone->present_early_pages = real_size; | |
1299 | #endif | |
1300 | ||
1301 | totalpages += size; | |
1302 | realtotalpages += real_size; | |
1303 | } | |
1304 | ||
1305 | pgdat->node_spanned_pages = totalpages; | |
1306 | pgdat->node_present_pages = realtotalpages; | |
1307 | pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); | |
1308 | } | |
1309 | ||
1310 | static unsigned long __init calc_memmap_size(unsigned long spanned_pages, | |
1311 | unsigned long present_pages) | |
1312 | { | |
1313 | unsigned long pages = spanned_pages; | |
1314 | ||
1315 | /* | |
1316 | * Provide a more accurate estimation if there are holes within | |
1317 | * the zone and SPARSEMEM is in use. If there are holes within the | |
1318 | * zone, each populated memory region may cost us one or two extra | |
1319 | * memmap pages due to alignment because memmap pages for each | |
1320 | * populated regions may not be naturally aligned on page boundary. | |
1321 | * So the (present_pages >> 4) heuristic is a tradeoff for that. | |
1322 | */ | |
1323 | if (spanned_pages > present_pages + (present_pages >> 4) && | |
1324 | IS_ENABLED(CONFIG_SPARSEMEM)) | |
1325 | pages = present_pages; | |
1326 | ||
1327 | return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; | |
1328 | } | |
1329 | ||
1330 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
1331 | static void pgdat_init_split_queue(struct pglist_data *pgdat) | |
1332 | { | |
1333 | struct deferred_split *ds_queue = &pgdat->deferred_split_queue; | |
1334 | ||
1335 | spin_lock_init(&ds_queue->split_queue_lock); | |
1336 | INIT_LIST_HEAD(&ds_queue->split_queue); | |
1337 | ds_queue->split_queue_len = 0; | |
1338 | } | |
1339 | #else | |
1340 | static void pgdat_init_split_queue(struct pglist_data *pgdat) {} | |
1341 | #endif | |
1342 | ||
1343 | #ifdef CONFIG_COMPACTION | |
1344 | static void pgdat_init_kcompactd(struct pglist_data *pgdat) | |
1345 | { | |
1346 | init_waitqueue_head(&pgdat->kcompactd_wait); | |
1347 | } | |
1348 | #else | |
1349 | static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} | |
1350 | #endif | |
1351 | ||
1352 | static void __meminit pgdat_init_internals(struct pglist_data *pgdat) | |
1353 | { | |
1354 | int i; | |
1355 | ||
1356 | pgdat_resize_init(pgdat); | |
1357 | pgdat_kswapd_lock_init(pgdat); | |
1358 | ||
1359 | pgdat_init_split_queue(pgdat); | |
1360 | pgdat_init_kcompactd(pgdat); | |
1361 | ||
1362 | init_waitqueue_head(&pgdat->kswapd_wait); | |
1363 | init_waitqueue_head(&pgdat->pfmemalloc_wait); | |
1364 | ||
1365 | for (i = 0; i < NR_VMSCAN_THROTTLE; i++) | |
1366 | init_waitqueue_head(&pgdat->reclaim_wait[i]); | |
1367 | ||
1368 | pgdat_page_ext_init(pgdat); | |
1369 | lruvec_init(&pgdat->__lruvec); | |
1370 | } | |
1371 | ||
1372 | static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, | |
1373 | unsigned long remaining_pages) | |
1374 | { | |
1375 | atomic_long_set(&zone->managed_pages, remaining_pages); | |
1376 | zone_set_nid(zone, nid); | |
1377 | zone->name = zone_names[idx]; | |
1378 | zone->zone_pgdat = NODE_DATA(nid); | |
1379 | spin_lock_init(&zone->lock); | |
1380 | zone_seqlock_init(zone); | |
1381 | zone_pcp_init(zone); | |
1382 | } | |
1383 | ||
1384 | static void __meminit zone_init_free_lists(struct zone *zone) | |
1385 | { | |
1386 | unsigned int order, t; | |
1387 | for_each_migratetype_order(order, t) { | |
1388 | INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); | |
1389 | zone->free_area[order].nr_free = 0; | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | void __meminit init_currently_empty_zone(struct zone *zone, | |
1394 | unsigned long zone_start_pfn, | |
1395 | unsigned long size) | |
1396 | { | |
1397 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1398 | int zone_idx = zone_idx(zone) + 1; | |
1399 | ||
1400 | if (zone_idx > pgdat->nr_zones) | |
1401 | pgdat->nr_zones = zone_idx; | |
1402 | ||
1403 | zone->zone_start_pfn = zone_start_pfn; | |
1404 | ||
1405 | mminit_dprintk(MMINIT_TRACE, "memmap_init", | |
1406 | "Initialising map node %d zone %lu pfns %lu -> %lu\n", | |
1407 | pgdat->node_id, | |
1408 | (unsigned long)zone_idx(zone), | |
1409 | zone_start_pfn, (zone_start_pfn + size)); | |
1410 | ||
1411 | zone_init_free_lists(zone); | |
1412 | zone->initialized = 1; | |
1413 | } | |
1414 | ||
1415 | #ifndef CONFIG_SPARSEMEM | |
1416 | /* | |
1417 | * Calculate the size of the zone->blockflags rounded to an unsigned long | |
1418 | * Start by making sure zonesize is a multiple of pageblock_order by rounding | |
1419 | * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally | |
1420 | * round what is now in bits to nearest long in bits, then return it in | |
1421 | * bytes. | |
1422 | */ | |
1423 | static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) | |
1424 | { | |
1425 | unsigned long usemapsize; | |
1426 | ||
1427 | zonesize += zone_start_pfn & (pageblock_nr_pages-1); | |
1428 | usemapsize = roundup(zonesize, pageblock_nr_pages); | |
1429 | usemapsize = usemapsize >> pageblock_order; | |
1430 | usemapsize *= NR_PAGEBLOCK_BITS; | |
1431 | usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); | |
1432 | ||
1433 | return usemapsize / 8; | |
1434 | } | |
1435 | ||
1436 | static void __ref setup_usemap(struct zone *zone) | |
1437 | { | |
1438 | unsigned long usemapsize = usemap_size(zone->zone_start_pfn, | |
1439 | zone->spanned_pages); | |
1440 | zone->pageblock_flags = NULL; | |
1441 | if (usemapsize) { | |
1442 | zone->pageblock_flags = | |
1443 | memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, | |
1444 | zone_to_nid(zone)); | |
1445 | if (!zone->pageblock_flags) | |
1446 | panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", | |
1447 | usemapsize, zone->name, zone_to_nid(zone)); | |
1448 | } | |
1449 | } | |
1450 | #else | |
1451 | static inline void setup_usemap(struct zone *zone) {} | |
1452 | #endif /* CONFIG_SPARSEMEM */ | |
1453 | ||
1454 | #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE | |
1455 | ||
1456 | /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ | |
1457 | void __init set_pageblock_order(void) | |
1458 | { | |
1459 | unsigned int order = MAX_ORDER; | |
1460 | ||
1461 | /* Check that pageblock_nr_pages has not already been setup */ | |
1462 | if (pageblock_order) | |
1463 | return; | |
1464 | ||
1465 | /* Don't let pageblocks exceed the maximum allocation granularity. */ | |
1466 | if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) | |
1467 | order = HUGETLB_PAGE_ORDER; | |
1468 | ||
1469 | /* | |
1470 | * Assume the largest contiguous order of interest is a huge page. | |
1471 | * This value may be variable depending on boot parameters on IA64 and | |
1472 | * powerpc. | |
1473 | */ | |
1474 | pageblock_order = order; | |
1475 | } | |
1476 | #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ | |
1477 | ||
1478 | /* | |
1479 | * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() | |
1480 | * is unused as pageblock_order is set at compile-time. See | |
1481 | * include/linux/pageblock-flags.h for the values of pageblock_order based on | |
1482 | * the kernel config | |
1483 | */ | |
1484 | void __init set_pageblock_order(void) | |
1485 | { | |
1486 | } | |
1487 | ||
1488 | #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ | |
1489 | ||
1490 | /* | |
1491 | * Set up the zone data structures | |
1492 | * - init pgdat internals | |
1493 | * - init all zones belonging to this node | |
1494 | * | |
1495 | * NOTE: this function is only called during memory hotplug | |
1496 | */ | |
1497 | #ifdef CONFIG_MEMORY_HOTPLUG | |
1498 | void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) | |
1499 | { | |
1500 | int nid = pgdat->node_id; | |
1501 | enum zone_type z; | |
1502 | int cpu; | |
1503 | ||
1504 | pgdat_init_internals(pgdat); | |
1505 | ||
1506 | if (pgdat->per_cpu_nodestats == &boot_nodestats) | |
1507 | pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); | |
1508 | ||
1509 | /* | |
1510 | * Reset the nr_zones, order and highest_zoneidx before reuse. | |
1511 | * Note that kswapd will init kswapd_highest_zoneidx properly | |
1512 | * when it starts in the near future. | |
1513 | */ | |
1514 | pgdat->nr_zones = 0; | |
1515 | pgdat->kswapd_order = 0; | |
1516 | pgdat->kswapd_highest_zoneidx = 0; | |
1517 | pgdat->node_start_pfn = 0; | |
1518 | for_each_online_cpu(cpu) { | |
1519 | struct per_cpu_nodestat *p; | |
1520 | ||
1521 | p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); | |
1522 | memset(p, 0, sizeof(*p)); | |
1523 | } | |
1524 | ||
1525 | for (z = 0; z < MAX_NR_ZONES; z++) | |
1526 | zone_init_internals(&pgdat->node_zones[z], z, nid, 0); | |
1527 | } | |
1528 | #endif | |
1529 | ||
1530 | /* | |
1531 | * Set up the zone data structures: | |
1532 | * - mark all pages reserved | |
1533 | * - mark all memory queues empty | |
1534 | * - clear the memory bitmaps | |
1535 | * | |
1536 | * NOTE: pgdat should get zeroed by caller. | |
1537 | * NOTE: this function is only called during early init. | |
1538 | */ | |
1539 | static void __init free_area_init_core(struct pglist_data *pgdat) | |
1540 | { | |
1541 | enum zone_type j; | |
1542 | int nid = pgdat->node_id; | |
1543 | ||
1544 | pgdat_init_internals(pgdat); | |
1545 | pgdat->per_cpu_nodestats = &boot_nodestats; | |
1546 | ||
1547 | for (j = 0; j < MAX_NR_ZONES; j++) { | |
1548 | struct zone *zone = pgdat->node_zones + j; | |
1549 | unsigned long size, freesize, memmap_pages; | |
1550 | ||
1551 | size = zone->spanned_pages; | |
1552 | freesize = zone->present_pages; | |
1553 | ||
1554 | /* | |
1555 | * Adjust freesize so that it accounts for how much memory | |
1556 | * is used by this zone for memmap. This affects the watermark | |
1557 | * and per-cpu initialisations | |
1558 | */ | |
1559 | memmap_pages = calc_memmap_size(size, freesize); | |
1560 | if (!is_highmem_idx(j)) { | |
1561 | if (freesize >= memmap_pages) { | |
1562 | freesize -= memmap_pages; | |
1563 | if (memmap_pages) | |
1564 | pr_debug(" %s zone: %lu pages used for memmap\n", | |
1565 | zone_names[j], memmap_pages); | |
1566 | } else | |
1567 | pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", | |
1568 | zone_names[j], memmap_pages, freesize); | |
1569 | } | |
1570 | ||
1571 | /* Account for reserved pages */ | |
1572 | if (j == 0 && freesize > dma_reserve) { | |
1573 | freesize -= dma_reserve; | |
1574 | pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); | |
1575 | } | |
1576 | ||
1577 | if (!is_highmem_idx(j)) | |
1578 | nr_kernel_pages += freesize; | |
1579 | /* Charge for highmem memmap if there are enough kernel pages */ | |
1580 | else if (nr_kernel_pages > memmap_pages * 2) | |
1581 | nr_kernel_pages -= memmap_pages; | |
1582 | nr_all_pages += freesize; | |
1583 | ||
1584 | /* | |
1585 | * Set an approximate value for lowmem here, it will be adjusted | |
1586 | * when the bootmem allocator frees pages into the buddy system. | |
1587 | * And all highmem pages will be managed by the buddy system. | |
1588 | */ | |
1589 | zone_init_internals(zone, j, nid, freesize); | |
1590 | ||
1591 | if (!size) | |
1592 | continue; | |
1593 | ||
1594 | set_pageblock_order(); | |
1595 | setup_usemap(zone); | |
1596 | init_currently_empty_zone(zone, zone->zone_start_pfn, size); | |
1597 | } | |
1598 | } | |
1599 | ||
1600 | void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, | |
1601 | phys_addr_t min_addr, int nid, bool exact_nid) | |
1602 | { | |
1603 | void *ptr; | |
1604 | ||
1605 | if (exact_nid) | |
1606 | ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, | |
1607 | MEMBLOCK_ALLOC_ACCESSIBLE, | |
1608 | nid); | |
1609 | else | |
1610 | ptr = memblock_alloc_try_nid_raw(size, align, min_addr, | |
1611 | MEMBLOCK_ALLOC_ACCESSIBLE, | |
1612 | nid); | |
1613 | ||
1614 | if (ptr && size > 0) | |
1615 | page_init_poison(ptr, size); | |
1616 | ||
1617 | return ptr; | |
1618 | } | |
1619 | ||
1620 | #ifdef CONFIG_FLATMEM | |
1621 | static void __init alloc_node_mem_map(struct pglist_data *pgdat) | |
1622 | { | |
1623 | unsigned long __maybe_unused start = 0; | |
1624 | unsigned long __maybe_unused offset = 0; | |
1625 | ||
1626 | /* Skip empty nodes */ | |
1627 | if (!pgdat->node_spanned_pages) | |
1628 | return; | |
1629 | ||
1630 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); | |
1631 | offset = pgdat->node_start_pfn - start; | |
1632 | /* ia64 gets its own node_mem_map, before this, without bootmem */ | |
1633 | if (!pgdat->node_mem_map) { | |
1634 | unsigned long size, end; | |
1635 | struct page *map; | |
1636 | ||
1637 | /* | |
1638 | * The zone's endpoints aren't required to be MAX_ORDER | |
1639 | * aligned but the node_mem_map endpoints must be in order | |
1640 | * for the buddy allocator to function correctly. | |
1641 | */ | |
1642 | end = pgdat_end_pfn(pgdat); | |
1643 | end = ALIGN(end, MAX_ORDER_NR_PAGES); | |
1644 | size = (end - start) * sizeof(struct page); | |
1645 | map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, | |
1646 | pgdat->node_id, false); | |
1647 | if (!map) | |
1648 | panic("Failed to allocate %ld bytes for node %d memory map\n", | |
1649 | size, pgdat->node_id); | |
1650 | pgdat->node_mem_map = map + offset; | |
1651 | } | |
1652 | pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", | |
1653 | __func__, pgdat->node_id, (unsigned long)pgdat, | |
1654 | (unsigned long)pgdat->node_mem_map); | |
1655 | #ifndef CONFIG_NUMA | |
1656 | /* | |
1657 | * With no DISCONTIG, the global mem_map is just set as node 0's | |
1658 | */ | |
1659 | if (pgdat == NODE_DATA(0)) { | |
1660 | mem_map = NODE_DATA(0)->node_mem_map; | |
1661 | if (page_to_pfn(mem_map) != pgdat->node_start_pfn) | |
1662 | mem_map -= offset; | |
1663 | } | |
1664 | #endif | |
1665 | } | |
1666 | #else | |
1667 | static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } | |
1668 | #endif /* CONFIG_FLATMEM */ | |
1669 | ||
1670 | /** | |
1671 | * get_pfn_range_for_nid - Return the start and end page frames for a node | |
1672 | * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. | |
1673 | * @start_pfn: Passed by reference. On return, it will have the node start_pfn. | |
1674 | * @end_pfn: Passed by reference. On return, it will have the node end_pfn. | |
1675 | * | |
1676 | * It returns the start and end page frame of a node based on information | |
1677 | * provided by memblock_set_node(). If called for a node | |
1678 | * with no available memory, a warning is printed and the start and end | |
1679 | * PFNs will be 0. | |
1680 | */ | |
1681 | void __init get_pfn_range_for_nid(unsigned int nid, | |
1682 | unsigned long *start_pfn, unsigned long *end_pfn) | |
1683 | { | |
1684 | unsigned long this_start_pfn, this_end_pfn; | |
1685 | int i; | |
1686 | ||
1687 | *start_pfn = -1UL; | |
1688 | *end_pfn = 0; | |
1689 | ||
1690 | for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { | |
1691 | *start_pfn = min(*start_pfn, this_start_pfn); | |
1692 | *end_pfn = max(*end_pfn, this_end_pfn); | |
1693 | } | |
1694 | ||
1695 | if (*start_pfn == -1UL) | |
1696 | *start_pfn = 0; | |
1697 | } | |
1698 | ||
1699 | static void __init free_area_init_node(int nid) | |
1700 | { | |
1701 | pg_data_t *pgdat = NODE_DATA(nid); | |
1702 | unsigned long start_pfn = 0; | |
1703 | unsigned long end_pfn = 0; | |
1704 | ||
1705 | /* pg_data_t should be reset to zero when it's allocated */ | |
1706 | WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); | |
1707 | ||
1708 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | |
1709 | ||
1710 | pgdat->node_id = nid; | |
1711 | pgdat->node_start_pfn = start_pfn; | |
1712 | pgdat->per_cpu_nodestats = NULL; | |
1713 | ||
1714 | if (start_pfn != end_pfn) { | |
1715 | pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, | |
1716 | (u64)start_pfn << PAGE_SHIFT, | |
1717 | end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); | |
ba1b67c7 HX |
1718 | |
1719 | calculate_node_totalpages(pgdat, start_pfn, end_pfn); | |
9420f89d MRI |
1720 | } else { |
1721 | pr_info("Initmem setup node %d as memoryless\n", nid); | |
9420f89d | 1722 | |
ba1b67c7 HX |
1723 | reset_memoryless_node_totalpages(pgdat); |
1724 | } | |
9420f89d MRI |
1725 | |
1726 | alloc_node_mem_map(pgdat); | |
1727 | pgdat_set_deferred_range(pgdat); | |
1728 | ||
1729 | free_area_init_core(pgdat); | |
1730 | lru_gen_init_pgdat(pgdat); | |
1731 | } | |
1732 | ||
1733 | /* Any regular or high memory on that node ? */ | |
1734 | static void check_for_memory(pg_data_t *pgdat, int nid) | |
1735 | { | |
1736 | enum zone_type zone_type; | |
1737 | ||
1738 | for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { | |
1739 | struct zone *zone = &pgdat->node_zones[zone_type]; | |
1740 | if (populated_zone(zone)) { | |
1741 | if (IS_ENABLED(CONFIG_HIGHMEM)) | |
1742 | node_set_state(nid, N_HIGH_MEMORY); | |
1743 | if (zone_type <= ZONE_NORMAL) | |
1744 | node_set_state(nid, N_NORMAL_MEMORY); | |
1745 | break; | |
1746 | } | |
1747 | } | |
1748 | } | |
1749 | ||
1750 | #if MAX_NUMNODES > 1 | |
1751 | /* | |
1752 | * Figure out the number of possible node ids. | |
1753 | */ | |
1754 | void __init setup_nr_node_ids(void) | |
1755 | { | |
1756 | unsigned int highest; | |
1757 | ||
1758 | highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); | |
1759 | nr_node_ids = highest + 1; | |
1760 | } | |
1761 | #endif | |
1762 | ||
1763 | static void __init free_area_init_memoryless_node(int nid) | |
1764 | { | |
1765 | free_area_init_node(nid); | |
1766 | } | |
1767 | ||
1768 | /* | |
1769 | * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For | |
1770 | * such cases we allow max_zone_pfn sorted in the descending order | |
1771 | */ | |
5f300fd5 | 1772 | static bool arch_has_descending_max_zone_pfns(void) |
9420f89d | 1773 | { |
5f300fd5 | 1774 | return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); |
9420f89d MRI |
1775 | } |
1776 | ||
1777 | /** | |
1778 | * free_area_init - Initialise all pg_data_t and zone data | |
1779 | * @max_zone_pfn: an array of max PFNs for each zone | |
1780 | * | |
1781 | * This will call free_area_init_node() for each active node in the system. | |
1782 | * Using the page ranges provided by memblock_set_node(), the size of each | |
1783 | * zone in each node and their holes is calculated. If the maximum PFN | |
1784 | * between two adjacent zones match, it is assumed that the zone is empty. | |
1785 | * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed | |
1786 | * that arch_max_dma32_pfn has no pages. It is also assumed that a zone | |
1787 | * starts where the previous one ended. For example, ZONE_DMA32 starts | |
1788 | * at arch_max_dma_pfn. | |
1789 | */ | |
1790 | void __init free_area_init(unsigned long *max_zone_pfn) | |
1791 | { | |
1792 | unsigned long start_pfn, end_pfn; | |
1793 | int i, nid, zone; | |
1794 | bool descending; | |
1795 | ||
1796 | /* Record where the zone boundaries are */ | |
1797 | memset(arch_zone_lowest_possible_pfn, 0, | |
1798 | sizeof(arch_zone_lowest_possible_pfn)); | |
1799 | memset(arch_zone_highest_possible_pfn, 0, | |
1800 | sizeof(arch_zone_highest_possible_pfn)); | |
1801 | ||
1802 | start_pfn = PHYS_PFN(memblock_start_of_DRAM()); | |
1803 | descending = arch_has_descending_max_zone_pfns(); | |
1804 | ||
1805 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
1806 | if (descending) | |
1807 | zone = MAX_NR_ZONES - i - 1; | |
1808 | else | |
1809 | zone = i; | |
1810 | ||
1811 | if (zone == ZONE_MOVABLE) | |
1812 | continue; | |
1813 | ||
1814 | end_pfn = max(max_zone_pfn[zone], start_pfn); | |
1815 | arch_zone_lowest_possible_pfn[zone] = start_pfn; | |
1816 | arch_zone_highest_possible_pfn[zone] = end_pfn; | |
1817 | ||
1818 | start_pfn = end_pfn; | |
1819 | } | |
1820 | ||
1821 | /* Find the PFNs that ZONE_MOVABLE begins at in each node */ | |
1822 | memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); | |
1823 | find_zone_movable_pfns_for_nodes(); | |
1824 | ||
1825 | /* Print out the zone ranges */ | |
1826 | pr_info("Zone ranges:\n"); | |
1827 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
1828 | if (i == ZONE_MOVABLE) | |
1829 | continue; | |
1830 | pr_info(" %-8s ", zone_names[i]); | |
1831 | if (arch_zone_lowest_possible_pfn[i] == | |
1832 | arch_zone_highest_possible_pfn[i]) | |
1833 | pr_cont("empty\n"); | |
1834 | else | |
1835 | pr_cont("[mem %#018Lx-%#018Lx]\n", | |
1836 | (u64)arch_zone_lowest_possible_pfn[i] | |
1837 | << PAGE_SHIFT, | |
1838 | ((u64)arch_zone_highest_possible_pfn[i] | |
1839 | << PAGE_SHIFT) - 1); | |
1840 | } | |
1841 | ||
1842 | /* Print out the PFNs ZONE_MOVABLE begins at in each node */ | |
1843 | pr_info("Movable zone start for each node\n"); | |
1844 | for (i = 0; i < MAX_NUMNODES; i++) { | |
1845 | if (zone_movable_pfn[i]) | |
1846 | pr_info(" Node %d: %#018Lx\n", i, | |
1847 | (u64)zone_movable_pfn[i] << PAGE_SHIFT); | |
1848 | } | |
1849 | ||
1850 | /* | |
1851 | * Print out the early node map, and initialize the | |
1852 | * subsection-map relative to active online memory ranges to | |
1853 | * enable future "sub-section" extensions of the memory map. | |
1854 | */ | |
1855 | pr_info("Early memory node ranges\n"); | |
1856 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { | |
1857 | pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, | |
1858 | (u64)start_pfn << PAGE_SHIFT, | |
1859 | ((u64)end_pfn << PAGE_SHIFT) - 1); | |
1860 | subsection_map_init(start_pfn, end_pfn - start_pfn); | |
1861 | } | |
1862 | ||
1863 | /* Initialise every node */ | |
1864 | mminit_verify_pageflags_layout(); | |
1865 | setup_nr_node_ids(); | |
1866 | for_each_node(nid) { | |
1867 | pg_data_t *pgdat; | |
1868 | ||
1869 | if (!node_online(nid)) { | |
1870 | pr_info("Initializing node %d as memoryless\n", nid); | |
1871 | ||
1872 | /* Allocator not initialized yet */ | |
1873 | pgdat = arch_alloc_nodedata(nid); | |
1874 | if (!pgdat) | |
1875 | panic("Cannot allocate %zuB for node %d.\n", | |
1876 | sizeof(*pgdat), nid); | |
1877 | arch_refresh_nodedata(nid, pgdat); | |
1878 | free_area_init_memoryless_node(nid); | |
1879 | ||
1880 | /* | |
1881 | * We do not want to confuse userspace by sysfs | |
1882 | * files/directories for node without any memory | |
1883 | * attached to it, so this node is not marked as | |
1884 | * N_MEMORY and not marked online so that no sysfs | |
1885 | * hierarchy will be created via register_one_node for | |
1886 | * it. The pgdat will get fully initialized by | |
1887 | * hotadd_init_pgdat() when memory is hotplugged into | |
1888 | * this node. | |
1889 | */ | |
1890 | continue; | |
1891 | } | |
1892 | ||
1893 | pgdat = NODE_DATA(nid); | |
1894 | free_area_init_node(nid); | |
1895 | ||
1896 | /* Any memory on that node */ | |
1897 | if (pgdat->node_present_pages) | |
1898 | node_set_state(nid, N_MEMORY); | |
1899 | check_for_memory(pgdat, nid); | |
1900 | } | |
1901 | ||
1902 | memmap_init(); | |
534ef4e1 MRI |
1903 | |
1904 | /* disable hash distribution for systems with a single node */ | |
1905 | fixup_hashdist(); | |
9420f89d MRI |
1906 | } |
1907 | ||
1908 | /** | |
1909 | * node_map_pfn_alignment - determine the maximum internode alignment | |
1910 | * | |
1911 | * This function should be called after node map is populated and sorted. | |
1912 | * It calculates the maximum power of two alignment which can distinguish | |
1913 | * all the nodes. | |
1914 | * | |
1915 | * For example, if all nodes are 1GiB and aligned to 1GiB, the return value | |
1916 | * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the | |
1917 | * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is | |
1918 | * shifted, 1GiB is enough and this function will indicate so. | |
1919 | * | |
1920 | * This is used to test whether pfn -> nid mapping of the chosen memory | |
1921 | * model has fine enough granularity to avoid incorrect mapping for the | |
1922 | * populated node map. | |
1923 | * | |
1924 | * Return: the determined alignment in pfn's. 0 if there is no alignment | |
1925 | * requirement (single node). | |
1926 | */ | |
1927 | unsigned long __init node_map_pfn_alignment(void) | |
1928 | { | |
1929 | unsigned long accl_mask = 0, last_end = 0; | |
1930 | unsigned long start, end, mask; | |
1931 | int last_nid = NUMA_NO_NODE; | |
1932 | int i, nid; | |
1933 | ||
1934 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { | |
1935 | if (!start || last_nid < 0 || last_nid == nid) { | |
1936 | last_nid = nid; | |
1937 | last_end = end; | |
1938 | continue; | |
1939 | } | |
1940 | ||
1941 | /* | |
1942 | * Start with a mask granular enough to pin-point to the | |
1943 | * start pfn and tick off bits one-by-one until it becomes | |
1944 | * too coarse to separate the current node from the last. | |
1945 | */ | |
1946 | mask = ~((1 << __ffs(start)) - 1); | |
1947 | while (mask && last_end <= (start & (mask << 1))) | |
1948 | mask <<= 1; | |
1949 | ||
1950 | /* accumulate all internode masks */ | |
1951 | accl_mask |= mask; | |
1952 | } | |
1953 | ||
1954 | /* convert mask to number of pages */ | |
1955 | return ~accl_mask + 1; | |
1956 | } | |
1957 | ||
1958 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | |
1959 | static void __init deferred_free_range(unsigned long pfn, | |
1960 | unsigned long nr_pages) | |
1961 | { | |
1962 | struct page *page; | |
1963 | unsigned long i; | |
1964 | ||
1965 | if (!nr_pages) | |
1966 | return; | |
1967 | ||
1968 | page = pfn_to_page(pfn); | |
1969 | ||
1970 | /* Free a large naturally-aligned chunk if possible */ | |
3f6dac0f KS |
1971 | if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) { |
1972 | for (i = 0; i < nr_pages; i += pageblock_nr_pages) | |
1973 | set_pageblock_migratetype(page + i, MIGRATE_MOVABLE); | |
1974 | __free_pages_core(page, MAX_ORDER); | |
9420f89d MRI |
1975 | return; |
1976 | } | |
1977 | ||
1978 | for (i = 0; i < nr_pages; i++, page++, pfn++) { | |
1979 | if (pageblock_aligned(pfn)) | |
1980 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | |
1981 | __free_pages_core(page, 0); | |
1982 | } | |
1983 | } | |
1984 | ||
1985 | /* Completion tracking for deferred_init_memmap() threads */ | |
1986 | static atomic_t pgdat_init_n_undone __initdata; | |
1987 | static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); | |
1988 | ||
1989 | static inline void __init pgdat_init_report_one_done(void) | |
1990 | { | |
1991 | if (atomic_dec_and_test(&pgdat_init_n_undone)) | |
1992 | complete(&pgdat_init_all_done_comp); | |
1993 | } | |
1994 | ||
1995 | /* | |
1996 | * Returns true if page needs to be initialized or freed to buddy allocator. | |
1997 | * | |
3f6dac0f | 1998 | * We check if a current MAX_ORDER block is valid by only checking the validity |
9420f89d MRI |
1999 | * of the head pfn. |
2000 | */ | |
2001 | static inline bool __init deferred_pfn_valid(unsigned long pfn) | |
2002 | { | |
3f6dac0f | 2003 | if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn)) |
9420f89d MRI |
2004 | return false; |
2005 | return true; | |
2006 | } | |
2007 | ||
2008 | /* | |
2009 | * Free pages to buddy allocator. Try to free aligned pages in | |
3f6dac0f | 2010 | * MAX_ORDER_NR_PAGES sizes. |
9420f89d MRI |
2011 | */ |
2012 | static void __init deferred_free_pages(unsigned long pfn, | |
2013 | unsigned long end_pfn) | |
2014 | { | |
2015 | unsigned long nr_free = 0; | |
2016 | ||
2017 | for (; pfn < end_pfn; pfn++) { | |
2018 | if (!deferred_pfn_valid(pfn)) { | |
2019 | deferred_free_range(pfn - nr_free, nr_free); | |
2020 | nr_free = 0; | |
3f6dac0f | 2021 | } else if (IS_MAX_ORDER_ALIGNED(pfn)) { |
9420f89d MRI |
2022 | deferred_free_range(pfn - nr_free, nr_free); |
2023 | nr_free = 1; | |
2024 | } else { | |
2025 | nr_free++; | |
2026 | } | |
2027 | } | |
2028 | /* Free the last block of pages to allocator */ | |
2029 | deferred_free_range(pfn - nr_free, nr_free); | |
2030 | } | |
2031 | ||
2032 | /* | |
2033 | * Initialize struct pages. We minimize pfn page lookups and scheduler checks | |
3f6dac0f | 2034 | * by performing it only once every MAX_ORDER_NR_PAGES. |
9420f89d MRI |
2035 | * Return number of pages initialized. |
2036 | */ | |
2037 | static unsigned long __init deferred_init_pages(struct zone *zone, | |
2038 | unsigned long pfn, | |
2039 | unsigned long end_pfn) | |
2040 | { | |
2041 | int nid = zone_to_nid(zone); | |
2042 | unsigned long nr_pages = 0; | |
2043 | int zid = zone_idx(zone); | |
2044 | struct page *page = NULL; | |
2045 | ||
2046 | for (; pfn < end_pfn; pfn++) { | |
2047 | if (!deferred_pfn_valid(pfn)) { | |
2048 | page = NULL; | |
2049 | continue; | |
3f6dac0f | 2050 | } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) { |
9420f89d MRI |
2051 | page = pfn_to_page(pfn); |
2052 | } else { | |
2053 | page++; | |
2054 | } | |
2055 | __init_single_page(page, pfn, zid, nid); | |
2056 | nr_pages++; | |
2057 | } | |
2058 | return (nr_pages); | |
2059 | } | |
2060 | ||
2061 | /* | |
2062 | * This function is meant to pre-load the iterator for the zone init. | |
2063 | * Specifically it walks through the ranges until we are caught up to the | |
2064 | * first_init_pfn value and exits there. If we never encounter the value we | |
2065 | * return false indicating there are no valid ranges left. | |
2066 | */ | |
2067 | static bool __init | |
2068 | deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, | |
2069 | unsigned long *spfn, unsigned long *epfn, | |
2070 | unsigned long first_init_pfn) | |
2071 | { | |
2072 | u64 j; | |
2073 | ||
2074 | /* | |
2075 | * Start out by walking through the ranges in this zone that have | |
2076 | * already been initialized. We don't need to do anything with them | |
2077 | * so we just need to flush them out of the system. | |
2078 | */ | |
2079 | for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { | |
2080 | if (*epfn <= first_init_pfn) | |
2081 | continue; | |
2082 | if (*spfn < first_init_pfn) | |
2083 | *spfn = first_init_pfn; | |
2084 | *i = j; | |
2085 | return true; | |
2086 | } | |
2087 | ||
2088 | return false; | |
2089 | } | |
2090 | ||
2091 | /* | |
2092 | * Initialize and free pages. We do it in two loops: first we initialize | |
2093 | * struct page, then free to buddy allocator, because while we are | |
2094 | * freeing pages we can access pages that are ahead (computing buddy | |
2095 | * page in __free_one_page()). | |
2096 | * | |
2097 | * In order to try and keep some memory in the cache we have the loop | |
2098 | * broken along max page order boundaries. This way we will not cause | |
2099 | * any issues with the buddy page computation. | |
2100 | */ | |
2101 | static unsigned long __init | |
2102 | deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, | |
2103 | unsigned long *end_pfn) | |
2104 | { | |
2105 | unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); | |
2106 | unsigned long spfn = *start_pfn, epfn = *end_pfn; | |
2107 | unsigned long nr_pages = 0; | |
2108 | u64 j = *i; | |
2109 | ||
2110 | /* First we loop through and initialize the page values */ | |
2111 | for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { | |
2112 | unsigned long t; | |
2113 | ||
2114 | if (mo_pfn <= *start_pfn) | |
2115 | break; | |
2116 | ||
2117 | t = min(mo_pfn, *end_pfn); | |
2118 | nr_pages += deferred_init_pages(zone, *start_pfn, t); | |
2119 | ||
2120 | if (mo_pfn < *end_pfn) { | |
2121 | *start_pfn = mo_pfn; | |
2122 | break; | |
2123 | } | |
2124 | } | |
2125 | ||
2126 | /* Reset values and now loop through freeing pages as needed */ | |
2127 | swap(j, *i); | |
2128 | ||
2129 | for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { | |
2130 | unsigned long t; | |
2131 | ||
2132 | if (mo_pfn <= spfn) | |
2133 | break; | |
2134 | ||
2135 | t = min(mo_pfn, epfn); | |
2136 | deferred_free_pages(spfn, t); | |
2137 | ||
2138 | if (mo_pfn <= epfn) | |
2139 | break; | |
2140 | } | |
2141 | ||
2142 | return nr_pages; | |
2143 | } | |
2144 | ||
2145 | static void __init | |
2146 | deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, | |
2147 | void *arg) | |
2148 | { | |
2149 | unsigned long spfn, epfn; | |
2150 | struct zone *zone = arg; | |
2151 | u64 i; | |
2152 | ||
2153 | deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); | |
2154 | ||
2155 | /* | |
2156 | * Initialize and free pages in MAX_ORDER sized increments so that we | |
2157 | * can avoid introducing any issues with the buddy allocator. | |
2158 | */ | |
2159 | while (spfn < end_pfn) { | |
2160 | deferred_init_maxorder(&i, zone, &spfn, &epfn); | |
2161 | cond_resched(); | |
2162 | } | |
2163 | } | |
2164 | ||
2165 | /* An arch may override for more concurrency. */ | |
2166 | __weak int __init | |
2167 | deferred_page_init_max_threads(const struct cpumask *node_cpumask) | |
2168 | { | |
2169 | return 1; | |
2170 | } | |
2171 | ||
2172 | /* Initialise remaining memory on a node */ | |
2173 | static int __init deferred_init_memmap(void *data) | |
2174 | { | |
2175 | pg_data_t *pgdat = data; | |
2176 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); | |
2177 | unsigned long spfn = 0, epfn = 0; | |
2178 | unsigned long first_init_pfn, flags; | |
2179 | unsigned long start = jiffies; | |
2180 | struct zone *zone; | |
2181 | int zid, max_threads; | |
2182 | u64 i; | |
2183 | ||
2184 | /* Bind memory initialisation thread to a local node if possible */ | |
2185 | if (!cpumask_empty(cpumask)) | |
2186 | set_cpus_allowed_ptr(current, cpumask); | |
2187 | ||
2188 | pgdat_resize_lock(pgdat, &flags); | |
2189 | first_init_pfn = pgdat->first_deferred_pfn; | |
2190 | if (first_init_pfn == ULONG_MAX) { | |
2191 | pgdat_resize_unlock(pgdat, &flags); | |
2192 | pgdat_init_report_one_done(); | |
2193 | return 0; | |
2194 | } | |
2195 | ||
2196 | /* Sanity check boundaries */ | |
2197 | BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); | |
2198 | BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); | |
2199 | pgdat->first_deferred_pfn = ULONG_MAX; | |
2200 | ||
2201 | /* | |
2202 | * Once we unlock here, the zone cannot be grown anymore, thus if an | |
2203 | * interrupt thread must allocate this early in boot, zone must be | |
2204 | * pre-grown prior to start of deferred page initialization. | |
2205 | */ | |
2206 | pgdat_resize_unlock(pgdat, &flags); | |
2207 | ||
2208 | /* Only the highest zone is deferred so find it */ | |
2209 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { | |
2210 | zone = pgdat->node_zones + zid; | |
2211 | if (first_init_pfn < zone_end_pfn(zone)) | |
2212 | break; | |
2213 | } | |
2214 | ||
2215 | /* If the zone is empty somebody else may have cleared out the zone */ | |
2216 | if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, | |
2217 | first_init_pfn)) | |
2218 | goto zone_empty; | |
2219 | ||
2220 | max_threads = deferred_page_init_max_threads(cpumask); | |
2221 | ||
2222 | while (spfn < epfn) { | |
2223 | unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); | |
2224 | struct padata_mt_job job = { | |
2225 | .thread_fn = deferred_init_memmap_chunk, | |
2226 | .fn_arg = zone, | |
2227 | .start = spfn, | |
2228 | .size = epfn_align - spfn, | |
2229 | .align = PAGES_PER_SECTION, | |
2230 | .min_chunk = PAGES_PER_SECTION, | |
2231 | .max_threads = max_threads, | |
2232 | }; | |
2233 | ||
2234 | padata_do_multithreaded(&job); | |
2235 | deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, | |
2236 | epfn_align); | |
2237 | } | |
2238 | zone_empty: | |
2239 | /* Sanity check that the next zone really is unpopulated */ | |
2240 | WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); | |
2241 | ||
2242 | pr_info("node %d deferred pages initialised in %ums\n", | |
2243 | pgdat->node_id, jiffies_to_msecs(jiffies - start)); | |
2244 | ||
2245 | pgdat_init_report_one_done(); | |
2246 | return 0; | |
2247 | } | |
2248 | ||
2249 | /* | |
2250 | * If this zone has deferred pages, try to grow it by initializing enough | |
2251 | * deferred pages to satisfy the allocation specified by order, rounded up to | |
2252 | * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments | |
2253 | * of SECTION_SIZE bytes by initializing struct pages in increments of | |
2254 | * PAGES_PER_SECTION * sizeof(struct page) bytes. | |
2255 | * | |
2256 | * Return true when zone was grown, otherwise return false. We return true even | |
2257 | * when we grow less than requested, to let the caller decide if there are | |
2258 | * enough pages to satisfy the allocation. | |
2259 | * | |
2260 | * Note: We use noinline because this function is needed only during boot, and | |
2261 | * it is called from a __ref function _deferred_grow_zone. This way we are | |
2262 | * making sure that it is not inlined into permanent text section. | |
2263 | */ | |
2264 | bool __init deferred_grow_zone(struct zone *zone, unsigned int order) | |
2265 | { | |
2266 | unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); | |
2267 | pg_data_t *pgdat = zone->zone_pgdat; | |
2268 | unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; | |
2269 | unsigned long spfn, epfn, flags; | |
2270 | unsigned long nr_pages = 0; | |
2271 | u64 i; | |
2272 | ||
2273 | /* Only the last zone may have deferred pages */ | |
2274 | if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) | |
2275 | return false; | |
2276 | ||
2277 | pgdat_resize_lock(pgdat, &flags); | |
2278 | ||
2279 | /* | |
2280 | * If someone grew this zone while we were waiting for spinlock, return | |
2281 | * true, as there might be enough pages already. | |
2282 | */ | |
2283 | if (first_deferred_pfn != pgdat->first_deferred_pfn) { | |
2284 | pgdat_resize_unlock(pgdat, &flags); | |
2285 | return true; | |
2286 | } | |
2287 | ||
2288 | /* If the zone is empty somebody else may have cleared out the zone */ | |
2289 | if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, | |
2290 | first_deferred_pfn)) { | |
2291 | pgdat->first_deferred_pfn = ULONG_MAX; | |
2292 | pgdat_resize_unlock(pgdat, &flags); | |
2293 | /* Retry only once. */ | |
2294 | return first_deferred_pfn != ULONG_MAX; | |
2295 | } | |
2296 | ||
2297 | /* | |
2298 | * Initialize and free pages in MAX_ORDER sized increments so | |
2299 | * that we can avoid introducing any issues with the buddy | |
2300 | * allocator. | |
2301 | */ | |
2302 | while (spfn < epfn) { | |
2303 | /* update our first deferred PFN for this section */ | |
2304 | first_deferred_pfn = spfn; | |
2305 | ||
2306 | nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); | |
2307 | touch_nmi_watchdog(); | |
2308 | ||
2309 | /* We should only stop along section boundaries */ | |
2310 | if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) | |
2311 | continue; | |
2312 | ||
2313 | /* If our quota has been met we can stop here */ | |
2314 | if (nr_pages >= nr_pages_needed) | |
2315 | break; | |
2316 | } | |
2317 | ||
2318 | pgdat->first_deferred_pfn = spfn; | |
2319 | pgdat_resize_unlock(pgdat, &flags); | |
2320 | ||
2321 | return nr_pages > 0; | |
2322 | } | |
2323 | ||
2324 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | |
2325 | ||
2326 | #ifdef CONFIG_CMA | |
2327 | void __init init_cma_reserved_pageblock(struct page *page) | |
2328 | { | |
2329 | unsigned i = pageblock_nr_pages; | |
2330 | struct page *p = page; | |
2331 | ||
2332 | do { | |
2333 | __ClearPageReserved(p); | |
2334 | set_page_count(p, 0); | |
2335 | } while (++p, --i); | |
2336 | ||
2337 | set_pageblock_migratetype(page, MIGRATE_CMA); | |
2338 | set_page_refcounted(page); | |
2339 | __free_pages(page, pageblock_order); | |
2340 | ||
2341 | adjust_managed_page_count(page, pageblock_nr_pages); | |
2342 | page_zone(page)->cma_pages += pageblock_nr_pages; | |
2343 | } | |
2344 | #endif | |
2345 | ||
904d5857 KW |
2346 | void set_zone_contiguous(struct zone *zone) |
2347 | { | |
2348 | unsigned long block_start_pfn = zone->zone_start_pfn; | |
2349 | unsigned long block_end_pfn; | |
2350 | ||
2351 | block_end_pfn = pageblock_end_pfn(block_start_pfn); | |
2352 | for (; block_start_pfn < zone_end_pfn(zone); | |
2353 | block_start_pfn = block_end_pfn, | |
2354 | block_end_pfn += pageblock_nr_pages) { | |
2355 | ||
2356 | block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); | |
2357 | ||
2358 | if (!__pageblock_pfn_to_page(block_start_pfn, | |
2359 | block_end_pfn, zone)) | |
2360 | return; | |
2361 | cond_resched(); | |
2362 | } | |
2363 | ||
2364 | /* We confirm that there is no hole */ | |
2365 | zone->contiguous = true; | |
2366 | } | |
2367 | ||
9420f89d MRI |
2368 | void __init page_alloc_init_late(void) |
2369 | { | |
2370 | struct zone *zone; | |
2371 | int nid; | |
2372 | ||
2373 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | |
2374 | ||
2375 | /* There will be num_node_state(N_MEMORY) threads */ | |
2376 | atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); | |
2377 | for_each_node_state(nid, N_MEMORY) { | |
2378 | kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); | |
2379 | } | |
2380 | ||
2381 | /* Block until all are initialised */ | |
2382 | wait_for_completion(&pgdat_init_all_done_comp); | |
2383 | ||
2384 | /* | |
2385 | * We initialized the rest of the deferred pages. Permanently disable | |
2386 | * on-demand struct page initialization. | |
2387 | */ | |
2388 | static_branch_disable(&deferred_pages); | |
2389 | ||
2390 | /* Reinit limits that are based on free pages after the kernel is up */ | |
2391 | files_maxfiles_init(); | |
2392 | #endif | |
2393 | ||
2394 | buffer_init(); | |
2395 | ||
2396 | /* Discard memblock private memory */ | |
2397 | memblock_discard(); | |
2398 | ||
2399 | for_each_node_state(nid, N_MEMORY) | |
2400 | shuffle_free_memory(NODE_DATA(nid)); | |
2401 | ||
2402 | for_each_populated_zone(zone) | |
2403 | set_zone_contiguous(zone); | |
de57807e MRI |
2404 | |
2405 | /* Initialize page ext after all struct pages are initialized. */ | |
2406 | if (deferred_struct_pages) | |
2407 | page_ext_init(); | |
e95d372c KW |
2408 | |
2409 | page_alloc_sysctl_init(); | |
9420f89d MRI |
2410 | } |
2411 | ||
2412 | #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES | |
2413 | /* | |
2414 | * Returns the number of pages that arch has reserved but | |
2415 | * is not known to alloc_large_system_hash(). | |
2416 | */ | |
2417 | static unsigned long __init arch_reserved_kernel_pages(void) | |
2418 | { | |
2419 | return 0; | |
2420 | } | |
2421 | #endif | |
2422 | ||
2423 | /* | |
2424 | * Adaptive scale is meant to reduce sizes of hash tables on large memory | |
2425 | * machines. As memory size is increased the scale is also increased but at | |
2426 | * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory | |
2427 | * quadruples the scale is increased by one, which means the size of hash table | |
2428 | * only doubles, instead of quadrupling as well. | |
2429 | * Because 32-bit systems cannot have large physical memory, where this scaling | |
2430 | * makes sense, it is disabled on such platforms. | |
2431 | */ | |
2432 | #if __BITS_PER_LONG > 32 | |
2433 | #define ADAPT_SCALE_BASE (64ul << 30) | |
2434 | #define ADAPT_SCALE_SHIFT 2 | |
2435 | #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) | |
2436 | #endif | |
2437 | ||
2438 | /* | |
2439 | * allocate a large system hash table from bootmem | |
2440 | * - it is assumed that the hash table must contain an exact power-of-2 | |
2441 | * quantity of entries | |
2442 | * - limit is the number of hash buckets, not the total allocation size | |
2443 | */ | |
2444 | void *__init alloc_large_system_hash(const char *tablename, | |
2445 | unsigned long bucketsize, | |
2446 | unsigned long numentries, | |
2447 | int scale, | |
2448 | int flags, | |
2449 | unsigned int *_hash_shift, | |
2450 | unsigned int *_hash_mask, | |
2451 | unsigned long low_limit, | |
2452 | unsigned long high_limit) | |
2453 | { | |
2454 | unsigned long long max = high_limit; | |
2455 | unsigned long log2qty, size; | |
2456 | void *table; | |
2457 | gfp_t gfp_flags; | |
2458 | bool virt; | |
2459 | bool huge; | |
2460 | ||
2461 | /* allow the kernel cmdline to have a say */ | |
2462 | if (!numentries) { | |
2463 | /* round applicable memory size up to nearest megabyte */ | |
2464 | numentries = nr_kernel_pages; | |
2465 | numentries -= arch_reserved_kernel_pages(); | |
2466 | ||
2467 | /* It isn't necessary when PAGE_SIZE >= 1MB */ | |
2468 | if (PAGE_SIZE < SZ_1M) | |
2469 | numentries = round_up(numentries, SZ_1M / PAGE_SIZE); | |
2470 | ||
2471 | #if __BITS_PER_LONG > 32 | |
2472 | if (!high_limit) { | |
2473 | unsigned long adapt; | |
2474 | ||
2475 | for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; | |
2476 | adapt <<= ADAPT_SCALE_SHIFT) | |
2477 | scale++; | |
2478 | } | |
2479 | #endif | |
2480 | ||
2481 | /* limit to 1 bucket per 2^scale bytes of low memory */ | |
2482 | if (scale > PAGE_SHIFT) | |
2483 | numentries >>= (scale - PAGE_SHIFT); | |
2484 | else | |
2485 | numentries <<= (PAGE_SHIFT - scale); | |
2486 | ||
2487 | /* Make sure we've got at least a 0-order allocation.. */ | |
2488 | if (unlikely(flags & HASH_SMALL)) { | |
2489 | /* Makes no sense without HASH_EARLY */ | |
2490 | WARN_ON(!(flags & HASH_EARLY)); | |
2491 | if (!(numentries >> *_hash_shift)) { | |
2492 | numentries = 1UL << *_hash_shift; | |
2493 | BUG_ON(!numentries); | |
2494 | } | |
2495 | } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) | |
2496 | numentries = PAGE_SIZE / bucketsize; | |
2497 | } | |
2498 | numentries = roundup_pow_of_two(numentries); | |
2499 | ||
2500 | /* limit allocation size to 1/16 total memory by default */ | |
2501 | if (max == 0) { | |
2502 | max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; | |
2503 | do_div(max, bucketsize); | |
2504 | } | |
2505 | max = min(max, 0x80000000ULL); | |
2506 | ||
2507 | if (numentries < low_limit) | |
2508 | numentries = low_limit; | |
2509 | if (numentries > max) | |
2510 | numentries = max; | |
2511 | ||
2512 | log2qty = ilog2(numentries); | |
2513 | ||
2514 | gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; | |
2515 | do { | |
2516 | virt = false; | |
2517 | size = bucketsize << log2qty; | |
2518 | if (flags & HASH_EARLY) { | |
2519 | if (flags & HASH_ZERO) | |
2520 | table = memblock_alloc(size, SMP_CACHE_BYTES); | |
2521 | else | |
2522 | table = memblock_alloc_raw(size, | |
2523 | SMP_CACHE_BYTES); | |
2524 | } else if (get_order(size) > MAX_ORDER || hashdist) { | |
2525 | table = vmalloc_huge(size, gfp_flags); | |
2526 | virt = true; | |
2527 | if (table) | |
2528 | huge = is_vm_area_hugepages(table); | |
2529 | } else { | |
2530 | /* | |
2531 | * If bucketsize is not a power-of-two, we may free | |
2532 | * some pages at the end of hash table which | |
2533 | * alloc_pages_exact() automatically does | |
2534 | */ | |
2535 | table = alloc_pages_exact(size, gfp_flags); | |
2536 | kmemleak_alloc(table, size, 1, gfp_flags); | |
2537 | } | |
2538 | } while (!table && size > PAGE_SIZE && --log2qty); | |
2539 | ||
2540 | if (!table) | |
2541 | panic("Failed to allocate %s hash table\n", tablename); | |
2542 | ||
2543 | pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", | |
2544 | tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, | |
2545 | virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); | |
2546 | ||
2547 | if (_hash_shift) | |
2548 | *_hash_shift = log2qty; | |
2549 | if (_hash_mask) | |
2550 | *_hash_mask = (1 << log2qty) - 1; | |
2551 | ||
2552 | return table; | |
2553 | } | |
2554 | ||
2555 | /** | |
2556 | * set_dma_reserve - set the specified number of pages reserved in the first zone | |
2557 | * @new_dma_reserve: The number of pages to mark reserved | |
2558 | * | |
2559 | * The per-cpu batchsize and zone watermarks are determined by managed_pages. | |
2560 | * In the DMA zone, a significant percentage may be consumed by kernel image | |
2561 | * and other unfreeable allocations which can skew the watermarks badly. This | |
2562 | * function may optionally be used to account for unfreeable pages in the | |
2563 | * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and | |
2564 | * smaller per-cpu batchsize. | |
2565 | */ | |
2566 | void __init set_dma_reserve(unsigned long new_dma_reserve) | |
2567 | { | |
2568 | dma_reserve = new_dma_reserve; | |
2569 | } | |
2570 | ||
2571 | void __init memblock_free_pages(struct page *page, unsigned long pfn, | |
2572 | unsigned int order) | |
2573 | { | |
2574 | if (!early_page_initialised(pfn)) | |
2575 | return; | |
2576 | if (!kmsan_memblock_free_pages(page, order)) { | |
2577 | /* KMSAN will take care of these pages. */ | |
2578 | return; | |
2579 | } | |
2580 | __free_pages_core(page, order); | |
2581 | } | |
b7ec1bf3 | 2582 | |
5e7d5da2 KW |
2583 | DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); |
2584 | EXPORT_SYMBOL(init_on_alloc); | |
2585 | ||
2586 | DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); | |
2587 | EXPORT_SYMBOL(init_on_free); | |
2588 | ||
f2fc4b44 MRI |
2589 | static bool _init_on_alloc_enabled_early __read_mostly |
2590 | = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); | |
2591 | static int __init early_init_on_alloc(char *buf) | |
2592 | { | |
2593 | ||
2594 | return kstrtobool(buf, &_init_on_alloc_enabled_early); | |
2595 | } | |
2596 | early_param("init_on_alloc", early_init_on_alloc); | |
2597 | ||
2598 | static bool _init_on_free_enabled_early __read_mostly | |
2599 | = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); | |
2600 | static int __init early_init_on_free(char *buf) | |
2601 | { | |
2602 | return kstrtobool(buf, &_init_on_free_enabled_early); | |
2603 | } | |
2604 | early_param("init_on_free", early_init_on_free); | |
2605 | ||
2606 | DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); | |
2607 | ||
2608 | /* | |
2609 | * Enable static keys related to various memory debugging and hardening options. | |
2610 | * Some override others, and depend on early params that are evaluated in the | |
2611 | * order of appearance. So we need to first gather the full picture of what was | |
2612 | * enabled, and then make decisions. | |
2613 | */ | |
2614 | static void __init mem_debugging_and_hardening_init(void) | |
2615 | { | |
2616 | bool page_poisoning_requested = false; | |
2617 | bool want_check_pages = false; | |
2618 | ||
2619 | #ifdef CONFIG_PAGE_POISONING | |
2620 | /* | |
2621 | * Page poisoning is debug page alloc for some arches. If | |
2622 | * either of those options are enabled, enable poisoning. | |
2623 | */ | |
2624 | if (page_poisoning_enabled() || | |
2625 | (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && | |
2626 | debug_pagealloc_enabled())) { | |
2627 | static_branch_enable(&_page_poisoning_enabled); | |
2628 | page_poisoning_requested = true; | |
2629 | want_check_pages = true; | |
2630 | } | |
2631 | #endif | |
2632 | ||
2633 | if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && | |
2634 | page_poisoning_requested) { | |
2635 | pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " | |
2636 | "will take precedence over init_on_alloc and init_on_free\n"); | |
2637 | _init_on_alloc_enabled_early = false; | |
2638 | _init_on_free_enabled_early = false; | |
2639 | } | |
2640 | ||
2641 | if (_init_on_alloc_enabled_early) { | |
2642 | want_check_pages = true; | |
2643 | static_branch_enable(&init_on_alloc); | |
2644 | } else { | |
2645 | static_branch_disable(&init_on_alloc); | |
2646 | } | |
2647 | ||
2648 | if (_init_on_free_enabled_early) { | |
2649 | want_check_pages = true; | |
2650 | static_branch_enable(&init_on_free); | |
2651 | } else { | |
2652 | static_branch_disable(&init_on_free); | |
2653 | } | |
2654 | ||
2655 | if (IS_ENABLED(CONFIG_KMSAN) && | |
2656 | (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) | |
2657 | pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); | |
2658 | ||
2659 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
2660 | if (debug_pagealloc_enabled()) { | |
2661 | want_check_pages = true; | |
2662 | static_branch_enable(&_debug_pagealloc_enabled); | |
2663 | ||
2664 | if (debug_guardpage_minorder()) | |
2665 | static_branch_enable(&_debug_guardpage_enabled); | |
2666 | } | |
2667 | #endif | |
2668 | ||
2669 | /* | |
2670 | * Any page debugging or hardening option also enables sanity checking | |
2671 | * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's | |
2672 | * enabled already. | |
2673 | */ | |
2674 | if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages) | |
2675 | static_branch_enable(&check_pages_enabled); | |
2676 | } | |
2677 | ||
b7ec1bf3 MRI |
2678 | /* Report memory auto-initialization states for this boot. */ |
2679 | static void __init report_meminit(void) | |
2680 | { | |
2681 | const char *stack; | |
2682 | ||
2683 | if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) | |
2684 | stack = "all(pattern)"; | |
2685 | else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) | |
2686 | stack = "all(zero)"; | |
2687 | else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) | |
2688 | stack = "byref_all(zero)"; | |
2689 | else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) | |
2690 | stack = "byref(zero)"; | |
2691 | else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) | |
2692 | stack = "__user(zero)"; | |
2693 | else | |
2694 | stack = "off"; | |
2695 | ||
2696 | pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", | |
2697 | stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", | |
2698 | want_init_on_free() ? "on" : "off"); | |
2699 | if (want_init_on_free()) | |
2700 | pr_info("mem auto-init: clearing system memory may take some time...\n"); | |
2701 | } | |
2702 | ||
eb8589b4 MRI |
2703 | static void __init mem_init_print_info(void) |
2704 | { | |
2705 | unsigned long physpages, codesize, datasize, rosize, bss_size; | |
2706 | unsigned long init_code_size, init_data_size; | |
2707 | ||
2708 | physpages = get_num_physpages(); | |
2709 | codesize = _etext - _stext; | |
2710 | datasize = _edata - _sdata; | |
2711 | rosize = __end_rodata - __start_rodata; | |
2712 | bss_size = __bss_stop - __bss_start; | |
2713 | init_data_size = __init_end - __init_begin; | |
2714 | init_code_size = _einittext - _sinittext; | |
2715 | ||
2716 | /* | |
2717 | * Detect special cases and adjust section sizes accordingly: | |
2718 | * 1) .init.* may be embedded into .data sections | |
2719 | * 2) .init.text.* may be out of [__init_begin, __init_end], | |
2720 | * please refer to arch/tile/kernel/vmlinux.lds.S. | |
2721 | * 3) .rodata.* may be embedded into .text or .data sections. | |
2722 | */ | |
2723 | #define adj_init_size(start, end, size, pos, adj) \ | |
2724 | do { \ | |
2725 | if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ | |
2726 | size -= adj; \ | |
2727 | } while (0) | |
2728 | ||
2729 | adj_init_size(__init_begin, __init_end, init_data_size, | |
2730 | _sinittext, init_code_size); | |
2731 | adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); | |
2732 | adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); | |
2733 | adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); | |
2734 | adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); | |
2735 | ||
2736 | #undef adj_init_size | |
2737 | ||
2738 | pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" | |
2739 | #ifdef CONFIG_HIGHMEM | |
2740 | ", %luK highmem" | |
2741 | #endif | |
2742 | ")\n", | |
2743 | K(nr_free_pages()), K(physpages), | |
2744 | codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, | |
2745 | (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, | |
2746 | K(physpages - totalram_pages() - totalcma_pages), | |
2747 | K(totalcma_pages) | |
2748 | #ifdef CONFIG_HIGHMEM | |
2749 | , K(totalhigh_pages()) | |
2750 | #endif | |
2751 | ); | |
2752 | } | |
2753 | ||
b7ec1bf3 MRI |
2754 | /* |
2755 | * Set up kernel memory allocators | |
2756 | */ | |
2757 | void __init mm_core_init(void) | |
2758 | { | |
2759 | /* Initializations relying on SMP setup */ | |
2760 | build_all_zonelists(NULL); | |
2761 | page_alloc_init_cpuhp(); | |
2762 | ||
2763 | /* | |
2764 | * page_ext requires contiguous pages, | |
2765 | * bigger than MAX_ORDER unless SPARSEMEM. | |
2766 | */ | |
2767 | page_ext_init_flatmem(); | |
f2fc4b44 | 2768 | mem_debugging_and_hardening_init(); |
b7ec1bf3 MRI |
2769 | kfence_alloc_pool(); |
2770 | report_meminit(); | |
2771 | kmsan_init_shadow(); | |
2772 | stack_depot_early_init(); | |
2773 | mem_init(); | |
2774 | mem_init_print_info(); | |
2775 | kmem_cache_init(); | |
2776 | /* | |
2777 | * page_owner must be initialized after buddy is ready, and also after | |
2778 | * slab is ready so that stack_depot_init() works properly | |
2779 | */ | |
2780 | page_ext_init_flatmem_late(); | |
2781 | kmemleak_init(); | |
4cd1e9ed MRI |
2782 | ptlock_cache_init(); |
2783 | pgtable_cache_init(); | |
b7ec1bf3 MRI |
2784 | debug_objects_mem_init(); |
2785 | vmalloc_init(); | |
2786 | /* If no deferred init page_ext now, as vmap is fully initialized */ | |
2787 | if (!deferred_struct_pages) | |
2788 | page_ext_init(); | |
2789 | /* Should be run before the first non-init thread is created */ | |
2790 | init_espfix_bsp(); | |
2791 | /* Should be run after espfix64 is set up. */ | |
2792 | pti_init(); | |
2793 | kmsan_init_runtime(); | |
2794 | mm_cache_init(); | |
2795 | } |