]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d41dee36 AW |
2 | /* |
3 | * sparse memory mappings. | |
4 | */ | |
d41dee36 | 5 | #include <linux/mm.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
d41dee36 | 7 | #include <linux/mmzone.h> |
97ad1087 | 8 | #include <linux/memblock.h> |
3b32123d | 9 | #include <linux/compiler.h> |
0b0acbec | 10 | #include <linux/highmem.h> |
b95f1b31 | 11 | #include <linux/export.h> |
28ae55c9 | 12 | #include <linux/spinlock.h> |
0b0acbec | 13 | #include <linux/vmalloc.h> |
9f82883c AS |
14 | #include <linux/swap.h> |
15 | #include <linux/swapops.h> | |
3b32123d | 16 | |
0c0a4a51 | 17 | #include "internal.h" |
d41dee36 AW |
18 | #include <asm/dma.h> |
19 | ||
20 | /* | |
21 | * Permanent SPARSEMEM data: | |
22 | * | |
23 | * 1) mem_section - memory sections, mem_map's for valid memory | |
24 | */ | |
3e347261 | 25 | #ifdef CONFIG_SPARSEMEM_EXTREME |
83e3c487 | 26 | struct mem_section **mem_section; |
3e347261 BP |
27 | #else |
28 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | |
22fc6ecc | 29 | ____cacheline_internodealigned_in_smp; |
3e347261 BP |
30 | #endif |
31 | EXPORT_SYMBOL(mem_section); | |
32 | ||
89689ae7 CL |
33 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
34 | /* | |
35 | * If we did not store the node number in the page then we have to | |
36 | * do a lookup in the section_to_node_table in order to find which | |
37 | * node the page belongs to. | |
38 | */ | |
39 | #if MAX_NUMNODES <= 256 | |
40 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
41 | #else | |
42 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
43 | #endif | |
44 | ||
33dd4e0e | 45 | int page_to_nid(const struct page *page) |
89689ae7 CL |
46 | { |
47 | return section_to_node_table[page_to_section(page)]; | |
48 | } | |
49 | EXPORT_SYMBOL(page_to_nid); | |
85770ffe AW |
50 | |
51 | static void set_section_nid(unsigned long section_nr, int nid) | |
52 | { | |
53 | section_to_node_table[section_nr] = nid; | |
54 | } | |
55 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | |
56 | static inline void set_section_nid(unsigned long section_nr, int nid) | |
57 | { | |
58 | } | |
89689ae7 CL |
59 | #endif |
60 | ||
3e347261 | 61 | #ifdef CONFIG_SPARSEMEM_EXTREME |
bd721ea7 | 62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
28ae55c9 DH |
63 | { |
64 | struct mem_section *section = NULL; | |
65 | unsigned long array_size = SECTIONS_PER_ROOT * | |
66 | sizeof(struct mem_section); | |
67 | ||
8a7f97b9 | 68 | if (slab_is_available()) { |
b95046b0 | 69 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
8a7f97b9 | 70 | } else { |
7e1c4e27 MR |
71 | section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, |
72 | nid); | |
8a7f97b9 MR |
73 | if (!section) |
74 | panic("%s: Failed to allocate %lu bytes nid=%d\n", | |
75 | __func__, array_size, nid); | |
76 | } | |
28ae55c9 DH |
77 | |
78 | return section; | |
3e347261 | 79 | } |
802f192e | 80 | |
a3142c8e | 81 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
802f192e | 82 | { |
28ae55c9 DH |
83 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
84 | struct mem_section *section; | |
802f192e | 85 | |
ba72b4c8 DW |
86 | /* |
87 | * An existing section is possible in the sub-section hotplug | |
88 | * case. First hot-add instantiates, follow-on hot-add reuses | |
89 | * the existing section. | |
90 | * | |
91 | * The mem_hotplug_lock resolves the apparent race below. | |
92 | */ | |
802f192e | 93 | if (mem_section[root]) |
ba72b4c8 | 94 | return 0; |
3e347261 | 95 | |
28ae55c9 | 96 | section = sparse_index_alloc(nid); |
af0cd5a7 WC |
97 | if (!section) |
98 | return -ENOMEM; | |
28ae55c9 DH |
99 | |
100 | mem_section[root] = section; | |
c1c95183 | 101 | |
9d1936cf | 102 | return 0; |
28ae55c9 DH |
103 | } |
104 | #else /* !SPARSEMEM_EXTREME */ | |
105 | static inline int sparse_index_init(unsigned long section_nr, int nid) | |
106 | { | |
107 | return 0; | |
802f192e | 108 | } |
28ae55c9 DH |
109 | #endif |
110 | ||
91fd8b95 | 111 | #ifdef CONFIG_SPARSEMEM_EXTREME |
2491f0a2 | 112 | unsigned long __section_nr(struct mem_section *ms) |
4ca644d9 DH |
113 | { |
114 | unsigned long root_nr; | |
83e3c487 | 115 | struct mem_section *root = NULL; |
4ca644d9 | 116 | |
12783b00 MK |
117 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
118 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | |
4ca644d9 DH |
119 | if (!root) |
120 | continue; | |
121 | ||
122 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | |
123 | break; | |
124 | } | |
125 | ||
83e3c487 | 126 | VM_BUG_ON(!root); |
db36a461 | 127 | |
4ca644d9 DH |
128 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
129 | } | |
91fd8b95 | 130 | #else |
2491f0a2 | 131 | unsigned long __section_nr(struct mem_section *ms) |
91fd8b95 | 132 | { |
2491f0a2 | 133 | return (unsigned long)(ms - mem_section[0]); |
91fd8b95 ZC |
134 | } |
135 | #endif | |
4ca644d9 | 136 | |
30c253e6 AW |
137 | /* |
138 | * During early boot, before section_mem_map is used for an actual | |
139 | * mem_map, we use section_mem_map to store the section's NUMA | |
140 | * node. This keeps us from having to use another data structure. The | |
141 | * node information is cleared just before we store the real mem_map. | |
142 | */ | |
143 | static inline unsigned long sparse_encode_early_nid(int nid) | |
144 | { | |
145 | return (nid << SECTION_NID_SHIFT); | |
146 | } | |
147 | ||
148 | static inline int sparse_early_nid(struct mem_section *section) | |
149 | { | |
150 | return (section->section_mem_map >> SECTION_NID_SHIFT); | |
151 | } | |
152 | ||
2dbb51c4 MG |
153 | /* Validate the physical addressing limitations of the model */ |
154 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
155 | unsigned long *end_pfn) | |
d41dee36 | 156 | { |
2dbb51c4 | 157 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
d41dee36 | 158 | |
bead9a3a IM |
159 | /* |
160 | * Sanity checks - do not allow an architecture to pass | |
161 | * in larger pfns than the maximum scope of sparsemem: | |
162 | */ | |
2dbb51c4 MG |
163 | if (*start_pfn > max_sparsemem_pfn) { |
164 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | |
165 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
166 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
167 | WARN_ON_ONCE(1); | |
168 | *start_pfn = max_sparsemem_pfn; | |
169 | *end_pfn = max_sparsemem_pfn; | |
ef161a98 | 170 | } else if (*end_pfn > max_sparsemem_pfn) { |
2dbb51c4 MG |
171 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
172 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
173 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
174 | WARN_ON_ONCE(1); | |
175 | *end_pfn = max_sparsemem_pfn; | |
176 | } | |
177 | } | |
178 | ||
c4e1be9e DH |
179 | /* |
180 | * There are a number of times that we loop over NR_MEM_SECTIONS, | |
181 | * looking for section_present() on each. But, when we have very | |
182 | * large physical address spaces, NR_MEM_SECTIONS can also be | |
183 | * very large which makes the loops quite long. | |
184 | * | |
185 | * Keeping track of this gives us an easy way to break out of | |
186 | * those loops early. | |
187 | */ | |
2491f0a2 | 188 | unsigned long __highest_present_section_nr; |
c4e1be9e DH |
189 | static void section_mark_present(struct mem_section *ms) |
190 | { | |
2491f0a2 | 191 | unsigned long section_nr = __section_nr(ms); |
c4e1be9e DH |
192 | |
193 | if (section_nr > __highest_present_section_nr) | |
194 | __highest_present_section_nr = section_nr; | |
195 | ||
196 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | |
197 | } | |
198 | ||
c4e1be9e DH |
199 | #define for_each_present_section_nr(start, section_nr) \ |
200 | for (section_nr = next_present_section_nr(start-1); \ | |
d778015a | 201 | ((section_nr != -1) && \ |
c4e1be9e DH |
202 | (section_nr <= __highest_present_section_nr)); \ |
203 | section_nr = next_present_section_nr(section_nr)) | |
204 | ||
85c77f79 PT |
205 | static inline unsigned long first_present_section_nr(void) |
206 | { | |
207 | return next_present_section_nr(-1); | |
208 | } | |
209 | ||
0a9f9f62 | 210 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
758b8db4 | 211 | static void subsection_mask_set(unsigned long *map, unsigned long pfn, |
f46edbd1 DW |
212 | unsigned long nr_pages) |
213 | { | |
214 | int idx = subsection_map_index(pfn); | |
215 | int end = subsection_map_index(pfn + nr_pages - 1); | |
216 | ||
217 | bitmap_set(map, idx, end - idx + 1); | |
218 | } | |
219 | ||
220 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) | |
221 | { | |
222 | int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); | |
9a845030 | 223 | unsigned long nr, start_sec = pfn_to_section_nr(pfn); |
f46edbd1 DW |
224 | |
225 | if (!nr_pages) | |
226 | return; | |
227 | ||
9a845030 | 228 | for (nr = start_sec; nr <= end_sec; nr++) { |
f46edbd1 DW |
229 | struct mem_section *ms; |
230 | unsigned long pfns; | |
231 | ||
232 | pfns = min(nr_pages, PAGES_PER_SECTION | |
233 | - (pfn & ~PAGE_SECTION_MASK)); | |
9a845030 | 234 | ms = __nr_to_section(nr); |
f46edbd1 DW |
235 | subsection_mask_set(ms->usage->subsection_map, pfn, pfns); |
236 | ||
9a845030 | 237 | pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, |
f46edbd1 DW |
238 | pfns, subsection_map_index(pfn), |
239 | subsection_map_index(pfn + pfns - 1)); | |
240 | ||
241 | pfn += pfns; | |
242 | nr_pages -= pfns; | |
243 | } | |
244 | } | |
0a9f9f62 BH |
245 | #else |
246 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) | |
247 | { | |
248 | } | |
249 | #endif | |
f46edbd1 | 250 | |
2dbb51c4 | 251 | /* Record a memory area against a node. */ |
c89ab04f | 252 | static void __init memory_present(int nid, unsigned long start, unsigned long end) |
2dbb51c4 MG |
253 | { |
254 | unsigned long pfn; | |
bead9a3a | 255 | |
629a359b KS |
256 | #ifdef CONFIG_SPARSEMEM_EXTREME |
257 | if (unlikely(!mem_section)) { | |
258 | unsigned long size, align; | |
259 | ||
68d68ff6 | 260 | size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; |
629a359b | 261 | align = 1 << (INTERNODE_CACHE_SHIFT); |
eb31d559 | 262 | mem_section = memblock_alloc(size, align); |
8a7f97b9 MR |
263 | if (!mem_section) |
264 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", | |
265 | __func__, size, align); | |
629a359b KS |
266 | } |
267 | #endif | |
268 | ||
d41dee36 | 269 | start &= PAGE_SECTION_MASK; |
2dbb51c4 | 270 | mminit_validate_memmodel_limits(&start, &end); |
d41dee36 AW |
271 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
272 | unsigned long section = pfn_to_section_nr(pfn); | |
802f192e BP |
273 | struct mem_section *ms; |
274 | ||
275 | sparse_index_init(section, nid); | |
85770ffe | 276 | set_section_nid(section, nid); |
802f192e BP |
277 | |
278 | ms = __nr_to_section(section); | |
c4e1be9e | 279 | if (!ms->section_mem_map) { |
2d070eab MH |
280 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
281 | SECTION_IS_ONLINE; | |
c4e1be9e DH |
282 | section_mark_present(ms); |
283 | } | |
d41dee36 AW |
284 | } |
285 | } | |
286 | ||
9def36e0 | 287 | /* |
c89ab04f MR |
288 | * Mark all memblocks as present using memory_present(). |
289 | * This is a convenience function that is useful to mark all of the systems | |
290 | * memory as present during initialization. | |
9def36e0 | 291 | */ |
c89ab04f | 292 | static void __init memblocks_present(void) |
9def36e0 | 293 | { |
c9118e6c MR |
294 | unsigned long start, end; |
295 | int i, nid; | |
9def36e0 | 296 | |
c9118e6c MR |
297 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) |
298 | memory_present(nid, start, end); | |
9def36e0 LG |
299 | } |
300 | ||
29751f69 AW |
301 | /* |
302 | * Subtle, we encode the real pfn into the mem_map such that | |
303 | * the identity pfn - section_mem_map will return the actual | |
304 | * physical page frame number. | |
305 | */ | |
306 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | |
307 | { | |
def9b71e PT |
308 | unsigned long coded_mem_map = |
309 | (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | |
310 | BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); | |
311 | BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); | |
312 | return coded_mem_map; | |
29751f69 AW |
313 | } |
314 | ||
3a0aaefe | 315 | #ifdef CONFIG_MEMORY_HOTPLUG |
29751f69 | 316 | /* |
ea01ea93 | 317 | * Decode mem_map from the coded memmap |
29751f69 | 318 | */ |
29751f69 AW |
319 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
320 | { | |
ea01ea93 BP |
321 | /* mask off the extra low bits of information */ |
322 | coded_mem_map &= SECTION_MAP_MASK; | |
29751f69 AW |
323 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
324 | } | |
3a0aaefe | 325 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
29751f69 | 326 | |
4e40987f | 327 | static void __meminit sparse_init_one_section(struct mem_section *ms, |
5c0e3066 | 328 | unsigned long pnum, struct page *mem_map, |
326e1b8f | 329 | struct mem_section_usage *usage, unsigned long flags) |
29751f69 | 330 | { |
30c253e6 | 331 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
326e1b8f DW |
332 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
333 | | SECTION_HAS_MEM_MAP | flags; | |
f1eca35a | 334 | ms->usage = usage; |
29751f69 AW |
335 | } |
336 | ||
f1eca35a | 337 | static unsigned long usemap_size(void) |
5c0e3066 | 338 | { |
60a7a88d | 339 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
5c0e3066 MG |
340 | } |
341 | ||
f1eca35a | 342 | size_t mem_section_usage_size(void) |
5c0e3066 | 343 | { |
f1eca35a | 344 | return sizeof(struct mem_section_usage) + usemap_size(); |
5c0e3066 | 345 | } |
5c0e3066 | 346 | |
ccbd6283 MC |
347 | static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) |
348 | { | |
349 | #ifndef CONFIG_NEED_MULTIPLE_NODES | |
350 | return __pa_symbol(pgdat); | |
351 | #else | |
352 | return __pa(pgdat); | |
353 | #endif | |
354 | } | |
355 | ||
48c90682 | 356 | #ifdef CONFIG_MEMORY_HOTREMOVE |
f1eca35a | 357 | static struct mem_section_usage * __init |
a4322e1b | 358 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 359 | unsigned long size) |
48c90682 | 360 | { |
f1eca35a | 361 | struct mem_section_usage *usage; |
99ab7b19 | 362 | unsigned long goal, limit; |
99ab7b19 | 363 | int nid; |
48c90682 YG |
364 | /* |
365 | * A page may contain usemaps for other sections preventing the | |
366 | * page being freed and making a section unremovable while | |
c800bcd5 | 367 | * other sections referencing the usemap remain active. Similarly, |
48c90682 YG |
368 | * a pgdat can prevent a section being removed. If section A |
369 | * contains a pgdat and section B contains the usemap, both | |
370 | * sections become inter-dependent. This allocates usemaps | |
371 | * from the same section as the pgdat where possible to avoid | |
372 | * this problem. | |
373 | */ | |
ccbd6283 | 374 | goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
99ab7b19 YL |
375 | limit = goal + (1UL << PA_SECTION_SHIFT); |
376 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); | |
377 | again: | |
f1eca35a DW |
378 | usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); |
379 | if (!usage && limit) { | |
99ab7b19 YL |
380 | limit = 0; |
381 | goto again; | |
382 | } | |
f1eca35a | 383 | return usage; |
48c90682 YG |
384 | } |
385 | ||
f1eca35a DW |
386 | static void __init check_usemap_section_nr(int nid, |
387 | struct mem_section_usage *usage) | |
48c90682 YG |
388 | { |
389 | unsigned long usemap_snr, pgdat_snr; | |
83e3c487 KS |
390 | static unsigned long old_usemap_snr; |
391 | static unsigned long old_pgdat_snr; | |
48c90682 YG |
392 | struct pglist_data *pgdat = NODE_DATA(nid); |
393 | int usemap_nid; | |
394 | ||
83e3c487 KS |
395 | /* First call */ |
396 | if (!old_usemap_snr) { | |
397 | old_usemap_snr = NR_MEM_SECTIONS; | |
398 | old_pgdat_snr = NR_MEM_SECTIONS; | |
399 | } | |
400 | ||
f1eca35a | 401 | usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); |
ccbd6283 | 402 | pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT); |
48c90682 YG |
403 | if (usemap_snr == pgdat_snr) |
404 | return; | |
405 | ||
406 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | |
407 | /* skip redundant message */ | |
408 | return; | |
409 | ||
410 | old_usemap_snr = usemap_snr; | |
411 | old_pgdat_snr = pgdat_snr; | |
412 | ||
413 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | |
414 | if (usemap_nid != nid) { | |
1170532b JP |
415 | pr_info("node %d must be removed before remove section %ld\n", |
416 | nid, usemap_snr); | |
48c90682 YG |
417 | return; |
418 | } | |
419 | /* | |
420 | * There is a circular dependency. | |
421 | * Some platforms allow un-removable section because they will just | |
422 | * gather other removable sections for dynamic partitioning. | |
423 | * Just notify un-removable section's number here. | |
424 | */ | |
1170532b JP |
425 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
426 | usemap_snr, pgdat_snr, nid); | |
48c90682 YG |
427 | } |
428 | #else | |
f1eca35a | 429 | static struct mem_section_usage * __init |
a4322e1b | 430 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 431 | unsigned long size) |
48c90682 | 432 | { |
26fb3dae | 433 | return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); |
48c90682 YG |
434 | } |
435 | ||
f1eca35a DW |
436 | static void __init check_usemap_section_nr(int nid, |
437 | struct mem_section_usage *usage) | |
48c90682 YG |
438 | { |
439 | } | |
440 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
441 | ||
35fd1eb1 | 442 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
afda57bc | 443 | static unsigned long __init section_map_size(void) |
35fd1eb1 PT |
444 | { |
445 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); | |
446 | } | |
447 | ||
448 | #else | |
afda57bc | 449 | static unsigned long __init section_map_size(void) |
e131c06b PT |
450 | { |
451 | return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); | |
452 | } | |
453 | ||
e9c0a3f0 DW |
454 | struct page __init *__populate_section_memmap(unsigned long pfn, |
455 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) | |
29751f69 | 456 | { |
e131c06b PT |
457 | unsigned long size = section_map_size(); |
458 | struct page *map = sparse_buffer_alloc(size); | |
8a7f97b9 | 459 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
e131c06b PT |
460 | |
461 | if (map) | |
462 | return map; | |
29751f69 | 463 | |
09dbcf42 | 464 | map = memblock_alloc_try_nid_raw(size, size, addr, |
97ad1087 | 465 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
8a7f97b9 MR |
466 | if (!map) |
467 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", | |
468 | __func__, size, PAGE_SIZE, nid, &addr); | |
469 | ||
8f6aac41 CL |
470 | return map; |
471 | } | |
472 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
473 | ||
35fd1eb1 PT |
474 | static void *sparsemap_buf __meminitdata; |
475 | static void *sparsemap_buf_end __meminitdata; | |
476 | ||
ae831894 LC |
477 | static inline void __meminit sparse_buffer_free(unsigned long size) |
478 | { | |
479 | WARN_ON(!sparsemap_buf || size == 0); | |
480 | memblock_free_early(__pa(sparsemap_buf), size); | |
481 | } | |
482 | ||
afda57bc | 483 | static void __init sparse_buffer_init(unsigned long size, int nid) |
35fd1eb1 | 484 | { |
8a7f97b9 | 485 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); |
35fd1eb1 | 486 | WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ |
09dbcf42 MH |
487 | /* |
488 | * Pre-allocated buffer is mainly used by __populate_section_memmap | |
489 | * and we want it to be properly aligned to the section size - this is | |
490 | * especially the case for VMEMMAP which maps memmap to PMDs | |
491 | */ | |
0ac398b1 | 492 | sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(), |
09dbcf42 | 493 | addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
35fd1eb1 PT |
494 | sparsemap_buf_end = sparsemap_buf + size; |
495 | } | |
496 | ||
afda57bc | 497 | static void __init sparse_buffer_fini(void) |
35fd1eb1 PT |
498 | { |
499 | unsigned long size = sparsemap_buf_end - sparsemap_buf; | |
500 | ||
501 | if (sparsemap_buf && size > 0) | |
ae831894 | 502 | sparse_buffer_free(size); |
35fd1eb1 PT |
503 | sparsemap_buf = NULL; |
504 | } | |
505 | ||
506 | void * __meminit sparse_buffer_alloc(unsigned long size) | |
507 | { | |
508 | void *ptr = NULL; | |
509 | ||
510 | if (sparsemap_buf) { | |
db57e98d | 511 | ptr = (void *) roundup((unsigned long)sparsemap_buf, size); |
35fd1eb1 PT |
512 | if (ptr + size > sparsemap_buf_end) |
513 | ptr = NULL; | |
ae831894 LC |
514 | else { |
515 | /* Free redundant aligned space */ | |
516 | if ((unsigned long)(ptr - sparsemap_buf) > 0) | |
517 | sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); | |
35fd1eb1 | 518 | sparsemap_buf = ptr + size; |
ae831894 | 519 | } |
35fd1eb1 PT |
520 | } |
521 | return ptr; | |
522 | } | |
523 | ||
3b32123d | 524 | void __weak __meminit vmemmap_populate_print_last(void) |
c2b91e2e YL |
525 | { |
526 | } | |
a4322e1b | 527 | |
85c77f79 PT |
528 | /* |
529 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) | |
530 | * And number of present sections in this node is map_count. | |
531 | */ | |
532 | static void __init sparse_init_nid(int nid, unsigned long pnum_begin, | |
533 | unsigned long pnum_end, | |
534 | unsigned long map_count) | |
535 | { | |
f1eca35a DW |
536 | struct mem_section_usage *usage; |
537 | unsigned long pnum; | |
85c77f79 PT |
538 | struct page *map; |
539 | ||
f1eca35a DW |
540 | usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), |
541 | mem_section_usage_size() * map_count); | |
542 | if (!usage) { | |
85c77f79 PT |
543 | pr_err("%s: node[%d] usemap allocation failed", __func__, nid); |
544 | goto failed; | |
545 | } | |
546 | sparse_buffer_init(map_count * section_map_size(), nid); | |
547 | for_each_present_section_nr(pnum_begin, pnum) { | |
e9c0a3f0 DW |
548 | unsigned long pfn = section_nr_to_pfn(pnum); |
549 | ||
85c77f79 PT |
550 | if (pnum >= pnum_end) |
551 | break; | |
552 | ||
e9c0a3f0 DW |
553 | map = __populate_section_memmap(pfn, PAGES_PER_SECTION, |
554 | nid, NULL); | |
85c77f79 PT |
555 | if (!map) { |
556 | pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", | |
557 | __func__, nid); | |
558 | pnum_begin = pnum; | |
2284f47f | 559 | sparse_buffer_fini(); |
85c77f79 PT |
560 | goto failed; |
561 | } | |
f1eca35a | 562 | check_usemap_section_nr(nid, usage); |
326e1b8f DW |
563 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, |
564 | SECTION_IS_EARLY); | |
f1eca35a | 565 | usage = (void *) usage + mem_section_usage_size(); |
85c77f79 PT |
566 | } |
567 | sparse_buffer_fini(); | |
568 | return; | |
569 | failed: | |
570 | /* We failed to allocate, mark all the following pnums as not present */ | |
571 | for_each_present_section_nr(pnum_begin, pnum) { | |
572 | struct mem_section *ms; | |
573 | ||
574 | if (pnum >= pnum_end) | |
575 | break; | |
576 | ms = __nr_to_section(pnum); | |
577 | ms->section_mem_map = 0; | |
578 | } | |
579 | } | |
580 | ||
581 | /* | |
582 | * Allocate the accumulated non-linear sections, allocate a mem_map | |
583 | * for each and record the physical to section mapping. | |
584 | */ | |
2a3cb8ba | 585 | void __init sparse_init(void) |
85c77f79 | 586 | { |
c89ab04f MR |
587 | unsigned long pnum_end, pnum_begin, map_count = 1; |
588 | int nid_begin; | |
589 | ||
590 | memblocks_present(); | |
591 | ||
592 | pnum_begin = first_present_section_nr(); | |
593 | nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); | |
85c77f79 PT |
594 | |
595 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ | |
596 | set_pageblock_order(); | |
597 | ||
598 | for_each_present_section_nr(pnum_begin + 1, pnum_end) { | |
599 | int nid = sparse_early_nid(__nr_to_section(pnum_end)); | |
600 | ||
601 | if (nid == nid_begin) { | |
602 | map_count++; | |
603 | continue; | |
604 | } | |
605 | /* Init node with sections in range [pnum_begin, pnum_end) */ | |
606 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); | |
607 | nid_begin = nid; | |
608 | pnum_begin = pnum_end; | |
609 | map_count = 1; | |
610 | } | |
611 | /* cover the last node */ | |
612 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); | |
613 | vmemmap_populate_print_last(); | |
614 | } | |
615 | ||
193faea9 | 616 | #ifdef CONFIG_MEMORY_HOTPLUG |
2d070eab MH |
617 | |
618 | /* Mark all memory sections within the pfn range as online */ | |
619 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | |
620 | { | |
621 | unsigned long pfn; | |
622 | ||
623 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
b4ccec41 | 624 | unsigned long section_nr = pfn_to_section_nr(pfn); |
2d070eab MH |
625 | struct mem_section *ms; |
626 | ||
627 | /* onlining code should never touch invalid ranges */ | |
628 | if (WARN_ON(!valid_section_nr(section_nr))) | |
629 | continue; | |
630 | ||
631 | ms = __nr_to_section(section_nr); | |
632 | ms->section_mem_map |= SECTION_IS_ONLINE; | |
633 | } | |
634 | } | |
635 | ||
9b7ea46a | 636 | /* Mark all memory sections within the pfn range as offline */ |
2d070eab MH |
637 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) |
638 | { | |
639 | unsigned long pfn; | |
640 | ||
641 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
27227c73 | 642 | unsigned long section_nr = pfn_to_section_nr(pfn); |
2d070eab MH |
643 | struct mem_section *ms; |
644 | ||
645 | /* | |
646 | * TODO this needs some double checking. Offlining code makes | |
647 | * sure to check pfn_valid but those checks might be just bogus | |
648 | */ | |
649 | if (WARN_ON(!valid_section_nr(section_nr))) | |
650 | continue; | |
651 | ||
652 | ms = __nr_to_section(section_nr); | |
653 | ms->section_mem_map &= ~SECTION_IS_ONLINE; | |
654 | } | |
655 | } | |
2d070eab | 656 | |
98f3cfc1 | 657 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
030eab4f | 658 | static struct page * __meminit populate_section_memmap(unsigned long pfn, |
e9c0a3f0 | 659 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
98f3cfc1 | 660 | { |
e9c0a3f0 | 661 | return __populate_section_memmap(pfn, nr_pages, nid, altmap); |
98f3cfc1 | 662 | } |
e9c0a3f0 DW |
663 | |
664 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, | |
24b6d416 | 665 | struct vmem_altmap *altmap) |
98f3cfc1 | 666 | { |
e9c0a3f0 DW |
667 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
668 | unsigned long end = start + nr_pages * sizeof(struct page); | |
0aad818b | 669 | |
24b6d416 | 670 | vmemmap_free(start, end, altmap); |
98f3cfc1 | 671 | } |
81556b02 | 672 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 | 673 | { |
0aad818b | 674 | unsigned long start = (unsigned long)memmap; |
81556b02 | 675 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
0aad818b | 676 | |
24b6d416 | 677 | vmemmap_free(start, end, NULL); |
0c0a4a51 | 678 | } |
6ecb0fc6 BH |
679 | |
680 | static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) | |
681 | { | |
682 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; | |
683 | DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; | |
684 | struct mem_section *ms = __pfn_to_section(pfn); | |
685 | unsigned long *subsection_map = ms->usage | |
686 | ? &ms->usage->subsection_map[0] : NULL; | |
687 | ||
688 | subsection_mask_set(map, pfn, nr_pages); | |
689 | if (subsection_map) | |
690 | bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); | |
691 | ||
692 | if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), | |
693 | "section already deactivated (%#lx + %ld)\n", | |
694 | pfn, nr_pages)) | |
695 | return -EINVAL; | |
696 | ||
697 | bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); | |
698 | return 0; | |
699 | } | |
700 | ||
701 | static bool is_subsection_map_empty(struct mem_section *ms) | |
702 | { | |
703 | return bitmap_empty(&ms->usage->subsection_map[0], | |
704 | SUBSECTIONS_PER_SECTION); | |
705 | } | |
706 | ||
707 | static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) | |
708 | { | |
709 | struct mem_section *ms = __pfn_to_section(pfn); | |
710 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; | |
711 | unsigned long *subsection_map; | |
712 | int rc = 0; | |
713 | ||
714 | subsection_mask_set(map, pfn, nr_pages); | |
715 | ||
716 | subsection_map = &ms->usage->subsection_map[0]; | |
717 | ||
718 | if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) | |
719 | rc = -EINVAL; | |
720 | else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) | |
721 | rc = -EEXIST; | |
722 | else | |
723 | bitmap_or(subsection_map, map, subsection_map, | |
724 | SUBSECTIONS_PER_SECTION); | |
725 | ||
726 | return rc; | |
727 | } | |
98f3cfc1 | 728 | #else |
030eab4f | 729 | struct page * __meminit populate_section_memmap(unsigned long pfn, |
e9c0a3f0 | 730 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
0b0acbec | 731 | { |
4027149a BH |
732 | return kvmalloc_node(array_size(sizeof(struct page), |
733 | PAGES_PER_SECTION), GFP_KERNEL, nid); | |
0b0acbec DH |
734 | } |
735 | ||
e9c0a3f0 | 736 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, |
7b73d978 | 737 | struct vmem_altmap *altmap) |
98f3cfc1 | 738 | { |
3af776f6 | 739 | kvfree(pfn_to_page(pfn)); |
0b0acbec | 740 | } |
0c0a4a51 | 741 | |
81556b02 | 742 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 YG |
743 | { |
744 | unsigned long maps_section_nr, removing_section_nr, i; | |
81556b02 | 745 | unsigned long magic, nr_pages; |
ae64ffca | 746 | struct page *page = virt_to_page(memmap); |
0c0a4a51 | 747 | |
81556b02 ZY |
748 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
749 | >> PAGE_SHIFT; | |
750 | ||
0c0a4a51 | 751 | for (i = 0; i < nr_pages; i++, page++) { |
ddffe98d | 752 | magic = (unsigned long) page->freelist; |
0c0a4a51 YG |
753 | |
754 | BUG_ON(magic == NODE_INFO); | |
755 | ||
756 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | |
857e522a | 757 | removing_section_nr = page_private(page); |
0c0a4a51 YG |
758 | |
759 | /* | |
760 | * When this function is called, the removing section is | |
761 | * logical offlined state. This means all pages are isolated | |
762 | * from page allocator. If removing section's memmap is placed | |
763 | * on the same section, it must not be freed. | |
764 | * If it is freed, page allocator may allocate it which will | |
765 | * be removed physically soon. | |
766 | */ | |
767 | if (maps_section_nr != removing_section_nr) | |
768 | put_page_bootmem(page); | |
769 | } | |
770 | } | |
0b0acbec | 771 | |
37bc1502 | 772 | static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) |
ba72b4c8 | 773 | { |
37bc1502 BH |
774 | return 0; |
775 | } | |
776 | ||
777 | static bool is_subsection_map_empty(struct mem_section *ms) | |
778 | { | |
6ecb0fc6 | 779 | return true; |
0a9f9f62 BH |
780 | } |
781 | ||
6ecb0fc6 | 782 | static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) |
0a9f9f62 | 783 | { |
6ecb0fc6 | 784 | return 0; |
0a9f9f62 | 785 | } |
6ecb0fc6 | 786 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
37bc1502 | 787 | |
95a5a34d BH |
788 | /* |
789 | * To deactivate a memory region, there are 3 cases to handle across | |
790 | * two configurations (SPARSEMEM_VMEMMAP={y,n}): | |
791 | * | |
792 | * 1. deactivation of a partial hot-added section (only possible in | |
793 | * the SPARSEMEM_VMEMMAP=y case). | |
794 | * a) section was present at memory init. | |
795 | * b) section was hot-added post memory init. | |
796 | * 2. deactivation of a complete hot-added section. | |
797 | * 3. deactivation of a complete section from memory init. | |
798 | * | |
799 | * For 1, when subsection_map does not empty we will not be freeing the | |
800 | * usage map, but still need to free the vmemmap range. | |
801 | * | |
802 | * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified | |
803 | */ | |
37bc1502 BH |
804 | static void section_deactivate(unsigned long pfn, unsigned long nr_pages, |
805 | struct vmem_altmap *altmap) | |
806 | { | |
807 | struct mem_section *ms = __pfn_to_section(pfn); | |
808 | bool section_is_early = early_section(ms); | |
809 | struct page *memmap = NULL; | |
810 | bool empty; | |
811 | ||
812 | if (clear_subsection_map(pfn, nr_pages)) | |
813 | return; | |
95a5a34d | 814 | |
37bc1502 | 815 | empty = is_subsection_map_empty(ms); |
d41e2f3b | 816 | if (empty) { |
ba72b4c8 DW |
817 | unsigned long section_nr = pfn_to_section_nr(pfn); |
818 | ||
8068df3b DH |
819 | /* |
820 | * When removing an early section, the usage map is kept (as the | |
821 | * usage maps of other sections fall into the same page). It | |
822 | * will be re-used when re-adding the section - which is then no | |
823 | * longer an early section. If the usage map is PageReserved, it | |
824 | * was allocated during boot. | |
825 | */ | |
826 | if (!PageReserved(virt_to_page(ms->usage))) { | |
ba72b4c8 DW |
827 | kfree(ms->usage); |
828 | ms->usage = NULL; | |
829 | } | |
830 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | |
b943f045 AK |
831 | /* |
832 | * Mark the section invalid so that valid_section() | |
833 | * return false. This prevents code from dereferencing | |
834 | * ms->usage array. | |
835 | */ | |
836 | ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; | |
ba72b4c8 DW |
837 | } |
838 | ||
ef69bc9f WY |
839 | /* |
840 | * The memmap of early sections is always fully populated. See | |
841 | * section_activate() and pfn_valid() . | |
842 | */ | |
843 | if (!section_is_early) | |
ba72b4c8 | 844 | depopulate_section_memmap(pfn, nr_pages, altmap); |
ef69bc9f WY |
845 | else if (memmap) |
846 | free_map_bootmem(memmap); | |
d41e2f3b BH |
847 | |
848 | if (empty) | |
849 | ms->section_mem_map = (unsigned long)NULL; | |
ba72b4c8 DW |
850 | } |
851 | ||
5d87255c BH |
852 | static struct page * __meminit section_activate(int nid, unsigned long pfn, |
853 | unsigned long nr_pages, struct vmem_altmap *altmap) | |
854 | { | |
855 | struct mem_section *ms = __pfn_to_section(pfn); | |
856 | struct mem_section_usage *usage = NULL; | |
857 | struct page *memmap; | |
858 | int rc = 0; | |
859 | ||
860 | if (!ms->usage) { | |
861 | usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); | |
862 | if (!usage) | |
863 | return ERR_PTR(-ENOMEM); | |
864 | ms->usage = usage; | |
865 | } | |
866 | ||
867 | rc = fill_subsection_map(pfn, nr_pages); | |
ba72b4c8 DW |
868 | if (rc) { |
869 | if (usage) | |
870 | ms->usage = NULL; | |
871 | kfree(usage); | |
872 | return ERR_PTR(rc); | |
873 | } | |
874 | ||
875 | /* | |
876 | * The early init code does not consider partially populated | |
877 | * initial sections, it simply assumes that memory will never be | |
878 | * referenced. If we hot-add memory into such a section then we | |
879 | * do not need to populate the memmap and can simply reuse what | |
880 | * is already there. | |
881 | */ | |
882 | if (nr_pages < PAGES_PER_SECTION && early_section(ms)) | |
883 | return pfn_to_page(pfn); | |
884 | ||
885 | memmap = populate_section_memmap(pfn, nr_pages, nid, altmap); | |
886 | if (!memmap) { | |
887 | section_deactivate(pfn, nr_pages, altmap); | |
888 | return ERR_PTR(-ENOMEM); | |
889 | } | |
890 | ||
891 | return memmap; | |
892 | } | |
893 | ||
7567cfc5 | 894 | /** |
ba72b4c8 | 895 | * sparse_add_section - add a memory section, or populate an existing one |
7567cfc5 BH |
896 | * @nid: The node to add section on |
897 | * @start_pfn: start pfn of the memory range | |
ba72b4c8 | 898 | * @nr_pages: number of pfns to add in the section |
7567cfc5 BH |
899 | * @altmap: device page map |
900 | * | |
901 | * This is only intended for hotplug. | |
902 | * | |
95a5a34d BH |
903 | * Note that only VMEMMAP supports sub-section aligned hotplug, |
904 | * the proper alignment and size are gated by check_pfn_span(). | |
905 | * | |
906 | * | |
7567cfc5 BH |
907 | * Return: |
908 | * * 0 - On success. | |
909 | * * -EEXIST - Section has been present. | |
910 | * * -ENOMEM - Out of memory. | |
29751f69 | 911 | */ |
7ea62160 DW |
912 | int __meminit sparse_add_section(int nid, unsigned long start_pfn, |
913 | unsigned long nr_pages, struct vmem_altmap *altmap) | |
29751f69 | 914 | { |
0b0acbec | 915 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
0b0acbec DH |
916 | struct mem_section *ms; |
917 | struct page *memmap; | |
0b0acbec | 918 | int ret; |
29751f69 | 919 | |
4e0d2e7e | 920 | ret = sparse_index_init(section_nr, nid); |
ba72b4c8 | 921 | if (ret < 0) |
bbd06825 | 922 | return ret; |
0b0acbec | 923 | |
ba72b4c8 DW |
924 | memmap = section_activate(nid, start_pfn, nr_pages, altmap); |
925 | if (IS_ERR(memmap)) | |
926 | return PTR_ERR(memmap); | |
5c0e3066 | 927 | |
d0dc12e8 PT |
928 | /* |
929 | * Poison uninitialized struct pages in order to catch invalid flags | |
930 | * combinations. | |
931 | */ | |
18e19f19 | 932 | page_init_poison(memmap, sizeof(struct page) * nr_pages); |
3ac19f8e | 933 | |
c1cbc3ee | 934 | ms = __nr_to_section(section_nr); |
26f26bed | 935 | set_section_nid(section_nr, nid); |
c4e1be9e | 936 | section_mark_present(ms); |
0b0acbec | 937 | |
ba72b4c8 DW |
938 | /* Align memmap to section boundary in the subsection case */ |
939 | if (section_nr_to_pfn(section_nr) != start_pfn) | |
4627d76d | 940 | memmap = pfn_to_page(section_nr_to_pfn(section_nr)); |
ba72b4c8 DW |
941 | sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); |
942 | ||
943 | return 0; | |
29751f69 | 944 | } |
ea01ea93 | 945 | |
95a4774d WC |
946 | #ifdef CONFIG_MEMORY_FAILURE |
947 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
948 | { | |
949 | int i; | |
950 | ||
5eb570a8 BS |
951 | /* |
952 | * A further optimization is to have per section refcounted | |
953 | * num_poisoned_pages. But that would need more space per memmap, so | |
954 | * for now just do a quick global check to speed up this routine in the | |
955 | * absence of bad pages. | |
956 | */ | |
957 | if (atomic_long_read(&num_poisoned_pages) == 0) | |
958 | return; | |
959 | ||
4b94ffdc | 960 | for (i = 0; i < nr_pages; i++) { |
95a4774d | 961 | if (PageHWPoison(&memmap[i])) { |
9f82883c | 962 | num_poisoned_pages_dec(); |
95a4774d WC |
963 | ClearPageHWPoison(&memmap[i]); |
964 | } | |
965 | } | |
966 | } | |
967 | #else | |
968 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
969 | { | |
970 | } | |
971 | #endif | |
972 | ||
ba72b4c8 | 973 | void sparse_remove_section(struct mem_section *ms, unsigned long pfn, |
7ea62160 DW |
974 | unsigned long nr_pages, unsigned long map_offset, |
975 | struct vmem_altmap *altmap) | |
ea01ea93 | 976 | { |
ba72b4c8 DW |
977 | clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset, |
978 | nr_pages - map_offset); | |
979 | section_deactivate(pfn, nr_pages, altmap); | |
ea01ea93 | 980 | } |
4edd7cef | 981 | #endif /* CONFIG_MEMORY_HOTPLUG */ |