]> Git Repo - linux.git/blame - mm/page_ext.c
mm: fix non-compound multi-order memory accounting in __free_pages
[linux.git] / mm / page_ext.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
eefa864b
JK
2#include <linux/mm.h>
3#include <linux/mmzone.h>
57c8a661 4#include <linux/memblock.h>
eefa864b
JK
5#include <linux/page_ext.h>
6#include <linux/memory.h>
7#include <linux/vmalloc.h>
8#include <linux/kmemleak.h>
48c96a36 9#include <linux/page_owner.h>
33c3fc71 10#include <linux/page_idle.h>
df4e817b 11#include <linux/page_table_check.h>
b1d5488a 12#include <linux/rcupdate.h>
dcfe378c 13#include <linux/pgalloc_tag.h>
eefa864b
JK
14
15/*
16 * struct page extension
17 *
18 * This is the feature to manage memory for extended data per page.
19 *
20 * Until now, we must modify struct page itself to store extra data per page.
21 * This requires rebuilding the kernel and it is really time consuming process.
22 * And, sometimes, rebuild is impossible due to third party module dependency.
23 * At last, enlarging struct page could cause un-wanted system behaviour change.
24 *
25 * This feature is intended to overcome above mentioned problems. This feature
26 * allocates memory for extended data per page in certain place rather than
27 * the struct page itself. This memory can be accessed by the accessor
28 * functions provided by this code. During the boot process, it checks whether
29 * allocation of huge chunk of memory is needed or not. If not, it avoids
30 * allocating memory at all. With this advantage, we can include this feature
31 * into the kernel in default and can avoid rebuild and solve related problems.
32 *
33 * To help these things to work well, there are two callbacks for clients. One
34 * is the need callback which is mandatory if user wants to avoid useless
35 * memory allocation at boot-time. The other is optional, init callback, which
36 * is used to do proper initialization after memory is allocated.
37 *
38 * The need callback is used to decide whether extended memory allocation is
39 * needed or not. Sometimes users want to deactivate some features in this
8958b249 40 * boot and extra memory would be unnecessary. In this case, to avoid
eefa864b
JK
41 * allocating huge chunk of memory, each clients represent their need of
42 * extra memory through the need callback. If one of the need callbacks
43 * returns true, it means that someone needs extra memory so that
44 * page extension core should allocates memory for page extension. If
45 * none of need callbacks return true, memory isn't needed at all in this boot
46 * and page extension core can skip to allocate memory. As result,
47 * none of memory is wasted.
48 *
980ac167
JK
49 * When need callback returns true, page_ext checks if there is a request for
50 * extra memory through size in struct page_ext_operations. If it is non-zero,
51 * extra space is allocated for each page_ext entry and offset is returned to
52 * user through offset in struct page_ext_operations.
53 *
eefa864b
JK
54 * The init callback is used to do proper initialization after page extension
55 * is completely initialized. In sparse memory system, extra memory is
56 * allocated some time later than memmap is allocated. In other words, lifetime
57 * of memory for page extension isn't same with memmap for struct page.
58 * Therefore, clients can't store extra data until page extension is
59 * initialized, even if pages are allocated and used freely. This could
60 * cause inadequate state of extra data per page, so, to prevent it, client
61 * can utilize this callback to initialize the state of it correctly.
62 */
63
b1d5488a
CTK
64#ifdef CONFIG_SPARSEMEM
65#define PAGE_EXT_INVALID (0x1)
66#endif
67
1c676e0d
SP
68#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
69static bool need_page_idle(void)
70{
71 return true;
72}
cab0a7c1 73static struct page_ext_operations page_idle_ops __initdata = {
1c676e0d 74 .need = need_page_idle,
6189eb82 75 .need_shared_flags = true,
1c676e0d
SP
76};
77#endif
78
cab0a7c1 79static struct page_ext_operations *page_ext_ops[] __initdata = {
48c96a36
JK
80#ifdef CONFIG_PAGE_OWNER
81 &page_owner_ops,
82#endif
1c676e0d 83#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
33c3fc71
VD
84 &page_idle_ops,
85#endif
dcfe378c
SB
86#ifdef CONFIG_MEM_ALLOC_PROFILING
87 &page_alloc_tagging_ops,
88#endif
df4e817b
PT
89#ifdef CONFIG_PAGE_TABLE_CHECK
90 &page_table_check_ops,
91#endif
eefa864b
JK
92};
93
6189eb82 94unsigned long page_ext_size;
5556cfe8 95
eefa864b
JK
96static unsigned long total_usage;
97
7ec7096b 98bool early_page_ext __meminitdata;
c4f20f14
LZ
99static int __init setup_early_page_ext(char *str)
100{
101 early_page_ext = true;
102 return 0;
103}
104early_param("early_page_ext", setup_early_page_ext);
105
eefa864b
JK
106static bool __init invoke_need_callbacks(void)
107{
108 int i;
109 int entries = ARRAY_SIZE(page_ext_ops);
980ac167 110 bool need = false;
eefa864b
JK
111
112 for (i = 0; i < entries; i++) {
6189eb82
PT
113 if (page_ext_ops[i]->need()) {
114 if (page_ext_ops[i]->need_shared_flags) {
115 page_ext_size = sizeof(struct page_ext);
116 break;
117 }
118 }
119 }
120
121 for (i = 0; i < entries; i++) {
122 if (page_ext_ops[i]->need()) {
5556cfe8
VB
123 page_ext_ops[i]->offset = page_ext_size;
124 page_ext_size += page_ext_ops[i]->size;
980ac167
JK
125 need = true;
126 }
eefa864b
JK
127 }
128
980ac167 129 return need;
eefa864b
JK
130}
131
132static void __init invoke_init_callbacks(void)
133{
134 int i;
135 int entries = ARRAY_SIZE(page_ext_ops);
136
137 for (i = 0; i < entries; i++) {
138 if (page_ext_ops[i]->init)
139 page_ext_ops[i]->init();
140 }
141}
142
980ac167
JK
143static inline struct page_ext *get_entry(void *base, unsigned long index)
144{
5556cfe8 145 return base + page_ext_size * index;
980ac167
JK
146}
147
eb0da7f6
KS
148#ifndef CONFIG_SPARSEMEM
149void __init page_ext_init_flatmem_late(void)
b1d5488a 150{
eb0da7f6 151 invoke_init_callbacks();
b1d5488a 152}
eefa864b
JK
153
154void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
155{
156 pgdat->node_page_ext = NULL;
157}
158
b1d5488a 159static struct page_ext *lookup_page_ext(const struct page *page)
eefa864b
JK
160{
161 unsigned long pfn = page_to_pfn(page);
0b06bb3f 162 unsigned long index;
eefa864b
JK
163 struct page_ext *base;
164
b1d5488a 165 WARN_ON_ONCE(!rcu_read_lock_held());
eefa864b 166 base = NODE_DATA(page_to_nid(page))->node_page_ext;
eefa864b
JK
167 /*
168 * The sanity checks the page allocator does upon freeing a
169 * page can reach here before the page_ext arrays are
170 * allocated when feeding a range of pages to the allocator
171 * for the first time during bootup or memory hotplug.
172 */
173 if (unlikely(!base))
174 return NULL;
0b06bb3f 175 index = pfn - round_down(node_start_pfn(page_to_nid(page)),
eefa864b 176 MAX_ORDER_NR_PAGES);
980ac167 177 return get_entry(base, index);
eefa864b
JK
178}
179
180static int __init alloc_node_page_ext(int nid)
181{
182 struct page_ext *base;
183 unsigned long table_size;
184 unsigned long nr_pages;
185
186 nr_pages = NODE_DATA(nid)->node_spanned_pages;
187 if (!nr_pages)
188 return 0;
189
190 /*
191 * Need extra space if node range is not aligned with
192 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
193 * checks buddy's status, range could be out of exact node range.
194 */
195 if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
196 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
197 nr_pages += MAX_ORDER_NR_PAGES;
198
5556cfe8 199 table_size = page_ext_size * nr_pages;
eefa864b 200
26fb3dae 201 base = memblock_alloc_try_nid(
eefa864b 202 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
97ad1087 203 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
eefa864b
JK
204 if (!base)
205 return -ENOMEM;
206 NODE_DATA(nid)->node_page_ext = base;
207 total_usage += table_size;
208 return 0;
209}
210
211void __init page_ext_init_flatmem(void)
212{
213
214 int nid, fail;
215
216 if (!invoke_need_callbacks())
217 return;
218
219 for_each_online_node(nid) {
220 fail = alloc_node_page_ext(nid);
221 if (fail)
222 goto fail;
223 }
224 pr_info("allocated %ld bytes of page_ext\n", total_usage);
eefa864b
JK
225 return;
226
227fail:
228 pr_crit("allocation of page_ext failed.\n");
229 panic("Out of memory");
230}
231
d1fea155 232#else /* CONFIG_SPARSEMEM */
b1d5488a
CTK
233static bool page_ext_invalid(struct page_ext *page_ext)
234{
235 return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
236}
eefa864b 237
b1d5488a 238static struct page_ext *lookup_page_ext(const struct page *page)
eefa864b
JK
239{
240 unsigned long pfn = page_to_pfn(page);
241 struct mem_section *section = __pfn_to_section(pfn);
b1d5488a
CTK
242 struct page_ext *page_ext = READ_ONCE(section->page_ext);
243
244 WARN_ON_ONCE(!rcu_read_lock_held());
eefa864b
JK
245 /*
246 * The sanity checks the page allocator does upon freeing a
247 * page can reach here before the page_ext arrays are
248 * allocated when feeding a range of pages to the allocator
249 * for the first time during bootup or memory hotplug.
250 */
b1d5488a 251 if (page_ext_invalid(page_ext))
eefa864b 252 return NULL;
b1d5488a 253 return get_entry(page_ext, pfn);
eefa864b
JK
254}
255
256static void *__meminit alloc_page_ext(size_t size, int nid)
257{
258 gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
259 void *addr = NULL;
260
261 addr = alloc_pages_exact_nid(nid, size, flags);
262 if (addr) {
263 kmemleak_alloc(addr, size, 1, flags);
264 return addr;
265 }
266
b95046b0 267 addr = vzalloc_node(size, nid);
eefa864b
JK
268
269 return addr;
270}
271
272static int __meminit init_section_page_ext(unsigned long pfn, int nid)
273{
274 struct mem_section *section;
275 struct page_ext *base;
276 unsigned long table_size;
277
278 section = __pfn_to_section(pfn);
279
280 if (section->page_ext)
281 return 0;
282
5556cfe8 283 table_size = page_ext_size * PAGES_PER_SECTION;
eefa864b
JK
284 base = alloc_page_ext(table_size, nid);
285
286 /*
287 * The value stored in section->page_ext is (base - pfn)
288 * and it does not point to the memory block allocated above,
289 * causing kmemleak false positives.
290 */
291 kmemleak_not_leak(base);
292
293 if (!base) {
294 pr_err("page ext allocation failure\n");
295 return -ENOMEM;
296 }
297
298 /*
299 * The passed "pfn" may not be aligned to SECTION. For the calculation
300 * we need to apply a mask.
301 */
302 pfn &= PAGE_SECTION_MASK;
5556cfe8 303 section->page_ext = (void *)base - page_ext_size * pfn;
eefa864b
JK
304 total_usage += table_size;
305 return 0;
306}
76af6a05 307
eefa864b
JK
308static void free_page_ext(void *addr)
309{
310 if (is_vmalloc_addr(addr)) {
311 vfree(addr);
312 } else {
313 struct page *page = virt_to_page(addr);
314 size_t table_size;
315
5556cfe8 316 table_size = page_ext_size * PAGES_PER_SECTION;
eefa864b
JK
317
318 BUG_ON(PageReserved(page));
0c815854 319 kmemleak_free(addr);
eefa864b
JK
320 free_pages_exact(addr, table_size);
321 }
322}
323
324static void __free_page_ext(unsigned long pfn)
325{
326 struct mem_section *ms;
327 struct page_ext *base;
328
329 ms = __pfn_to_section(pfn);
330 if (!ms || !ms->page_ext)
331 return;
b1d5488a
CTK
332
333 base = READ_ONCE(ms->page_ext);
334 /*
335 * page_ext here can be valid while doing the roll back
336 * operation in online_page_ext().
337 */
338 if (page_ext_invalid(base))
339 base = (void *)base - PAGE_EXT_INVALID;
340 WRITE_ONCE(ms->page_ext, NULL);
341
342 base = get_entry(base, pfn);
eefa864b 343 free_page_ext(base);
b1d5488a
CTK
344}
345
346static void __invalidate_page_ext(unsigned long pfn)
347{
348 struct mem_section *ms;
349 void *val;
350
351 ms = __pfn_to_section(pfn);
352 if (!ms || !ms->page_ext)
353 return;
354 val = (void *)ms->page_ext + PAGE_EXT_INVALID;
355 WRITE_ONCE(ms->page_ext, val);
eefa864b
JK
356}
357
358static int __meminit online_page_ext(unsigned long start_pfn,
359 unsigned long nr_pages,
360 int nid)
361{
362 unsigned long start, end, pfn;
363 int fail = 0;
364
365 start = SECTION_ALIGN_DOWN(start_pfn);
366 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
367
98fa15f3 368 if (nid == NUMA_NO_NODE) {
eefa864b
JK
369 /*
370 * In this case, "nid" already exists and contains valid memory.
371 * "start_pfn" passed to us is a pfn which is an arg for
372 * online__pages(), and start_pfn should exist.
373 */
374 nid = pfn_to_nid(start_pfn);
30a51400 375 VM_BUG_ON(!node_online(nid));
eefa864b
JK
376 }
377
dccacf8d 378 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
eefa864b 379 fail = init_section_page_ext(pfn, nid);
eefa864b
JK
380 if (!fail)
381 return 0;
382
383 /* rollback */
3c09be5a 384 end = pfn - PAGES_PER_SECTION;
eefa864b
JK
385 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
386 __free_page_ext(pfn);
387
388 return -ENOMEM;
389}
390
063ff7cd 391static void __meminit offline_page_ext(unsigned long start_pfn,
7b5a0b66 392 unsigned long nr_pages)
eefa864b
JK
393{
394 unsigned long start, end, pfn;
395
396 start = SECTION_ALIGN_DOWN(start_pfn);
397 end = SECTION_ALIGN_UP(start_pfn + nr_pages);
398
b1d5488a
CTK
399 /*
400 * Freeing of page_ext is done in 3 steps to avoid
401 * use-after-free of it:
402 * 1) Traverse all the sections and mark their page_ext
403 * as invalid.
404 * 2) Wait for all the existing users of page_ext who
405 * started before invalidation to finish.
406 * 3) Free the page_ext.
407 */
408 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
409 __invalidate_page_ext(pfn);
410
411 synchronize_rcu();
412
eefa864b
JK
413 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
414 __free_page_ext(pfn);
eefa864b
JK
415}
416
417static int __meminit page_ext_callback(struct notifier_block *self,
418 unsigned long action, void *arg)
419{
420 struct memory_notify *mn = arg;
421 int ret = 0;
422
423 switch (action) {
424 case MEM_GOING_ONLINE:
425 ret = online_page_ext(mn->start_pfn,
426 mn->nr_pages, mn->status_change_nid);
427 break;
428 case MEM_OFFLINE:
429 offline_page_ext(mn->start_pfn,
7b5a0b66 430 mn->nr_pages);
eefa864b
JK
431 break;
432 case MEM_CANCEL_ONLINE:
433 offline_page_ext(mn->start_pfn,
7b5a0b66 434 mn->nr_pages);
eefa864b
JK
435 break;
436 case MEM_GOING_OFFLINE:
437 break;
438 case MEM_ONLINE:
439 case MEM_CANCEL_OFFLINE:
440 break;
441 }
442
443 return notifier_from_errno(ret);
444}
445
eefa864b
JK
446void __init page_ext_init(void)
447{
448 unsigned long pfn;
449 int nid;
450
451 if (!invoke_need_callbacks())
452 return;
453
454 for_each_node_state(nid, N_MEMORY) {
455 unsigned long start_pfn, end_pfn;
456
457 start_pfn = node_start_pfn(nid);
458 end_pfn = node_end_pfn(nid);
459 /*
460 * start_pfn and end_pfn may not be aligned to SECTION and the
461 * page->flags of out of node pages are not initialized. So we
462 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
463 */
464 for (pfn = start_pfn; pfn < end_pfn;
465 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
466
467 if (!pfn_valid(pfn))
468 continue;
469 /*
470 * Nodes's pfns can be overlapping.
471 * We know some arch can have a nodes layout such as
472 * -------------pfn-------------->
473 * N0 | N1 | N2 | N0 | N1 | N2|....
474 */
2f1ee091 475 if (pfn_to_nid(pfn) != nid)
eefa864b
JK
476 continue;
477 if (init_section_page_ext(pfn, nid))
478 goto oom;
0fc542b7 479 cond_resched();
eefa864b
JK
480 }
481 }
1eeaa4fd 482 hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI);
eefa864b
JK
483 pr_info("allocated %ld bytes of page_ext\n", total_usage);
484 invoke_init_callbacks();
485 return;
486
487oom:
488 panic("Out of memory");
489}
490
491void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
492{
493}
494
495#endif
eb0da7f6
KS
496
497/**
498 * page_ext_get() - Get the extended information for a page.
499 * @page: The page we're interested in.
500 *
501 * Ensures that the page_ext will remain valid until page_ext_put()
502 * is called.
503 *
504 * Return: NULL if no page_ext exists for this page.
505 * Context: Any context. Caller may not sleep until they have called
506 * page_ext_put().
507 */
508struct page_ext *page_ext_get(struct page *page)
509{
510 struct page_ext *page_ext;
511
512 rcu_read_lock();
513 page_ext = lookup_page_ext(page);
514 if (!page_ext) {
515 rcu_read_unlock();
516 return NULL;
517 }
518
519 return page_ext;
520}
521
522/**
523 * page_ext_put() - Working with page extended information is done.
524 * @page_ext: Page extended information received from page_ext_get().
525 *
526 * The page extended information of the page may not be valid after this
527 * function is called.
528 *
529 * Return: None.
530 * Context: Any context with corresponding page_ext_get() is called.
531 */
532void page_ext_put(struct page_ext *page_ext)
533{
534 if (unlikely(!page_ext))
535 return;
536
537 rcu_read_unlock();
538}
This page took 0.8688 seconds and 4 git commands to generate.