]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
48c96a36 JK |
2 | #include <linux/debugfs.h> |
3 | #include <linux/mm.h> | |
4 | #include <linux/slab.h> | |
5 | #include <linux/uaccess.h> | |
6 | #include <linux/bootmem.h> | |
7 | #include <linux/stacktrace.h> | |
8 | #include <linux/page_owner.h> | |
7dd80b8a | 9 | #include <linux/jump_label.h> |
7cd12b4a | 10 | #include <linux/migrate.h> |
f2ca0b55 | 11 | #include <linux/stackdepot.h> |
e2f612e6 | 12 | #include <linux/seq_file.h> |
f2ca0b55 | 13 | |
48c96a36 JK |
14 | #include "internal.h" |
15 | ||
f2ca0b55 JK |
16 | /* |
17 | * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) | |
18 | * to use off stack temporal storage | |
19 | */ | |
20 | #define PAGE_OWNER_STACK_DEPTH (16) | |
21 | ||
9300d8df | 22 | struct page_owner { |
6b4c54e3 AM |
23 | unsigned short order; |
24 | short last_migrate_reason; | |
9300d8df | 25 | gfp_t gfp_mask; |
9300d8df JK |
26 | depot_stack_handle_t handle; |
27 | }; | |
28 | ||
48c96a36 | 29 | static bool page_owner_disabled = true; |
7dd80b8a | 30 | DEFINE_STATIC_KEY_FALSE(page_owner_inited); |
48c96a36 | 31 | |
f2ca0b55 JK |
32 | static depot_stack_handle_t dummy_handle; |
33 | static depot_stack_handle_t failure_handle; | |
dab4ead1 | 34 | static depot_stack_handle_t early_handle; |
f2ca0b55 | 35 | |
61cf5feb JK |
36 | static void init_early_allocated_pages(void); |
37 | ||
48c96a36 JK |
38 | static int early_page_owner_param(char *buf) |
39 | { | |
40 | if (!buf) | |
41 | return -EINVAL; | |
42 | ||
43 | if (strcmp(buf, "on") == 0) | |
44 | page_owner_disabled = false; | |
45 | ||
46 | return 0; | |
47 | } | |
48 | early_param("page_owner", early_page_owner_param); | |
49 | ||
50 | static bool need_page_owner(void) | |
51 | { | |
52 | if (page_owner_disabled) | |
53 | return false; | |
54 | ||
55 | return true; | |
56 | } | |
57 | ||
dab4ead1 | 58 | static __always_inline depot_stack_handle_t create_dummy_stack(void) |
f2ca0b55 JK |
59 | { |
60 | unsigned long entries[4]; | |
61 | struct stack_trace dummy; | |
62 | ||
63 | dummy.nr_entries = 0; | |
64 | dummy.max_entries = ARRAY_SIZE(entries); | |
65 | dummy.entries = &entries[0]; | |
66 | dummy.skip = 0; | |
67 | ||
68 | save_stack_trace(&dummy); | |
dab4ead1 | 69 | return depot_save_stack(&dummy, GFP_KERNEL); |
f2ca0b55 JK |
70 | } |
71 | ||
dab4ead1 | 72 | static noinline void register_dummy_stack(void) |
f2ca0b55 | 73 | { |
dab4ead1 VB |
74 | dummy_handle = create_dummy_stack(); |
75 | } | |
f2ca0b55 | 76 | |
dab4ead1 VB |
77 | static noinline void register_failure_stack(void) |
78 | { | |
79 | failure_handle = create_dummy_stack(); | |
80 | } | |
f2ca0b55 | 81 | |
dab4ead1 VB |
82 | static noinline void register_early_stack(void) |
83 | { | |
84 | early_handle = create_dummy_stack(); | |
f2ca0b55 JK |
85 | } |
86 | ||
48c96a36 JK |
87 | static void init_page_owner(void) |
88 | { | |
89 | if (page_owner_disabled) | |
90 | return; | |
91 | ||
f2ca0b55 JK |
92 | register_dummy_stack(); |
93 | register_failure_stack(); | |
dab4ead1 | 94 | register_early_stack(); |
7dd80b8a | 95 | static_branch_enable(&page_owner_inited); |
61cf5feb | 96 | init_early_allocated_pages(); |
48c96a36 JK |
97 | } |
98 | ||
99 | struct page_ext_operations page_owner_ops = { | |
9300d8df | 100 | .size = sizeof(struct page_owner), |
48c96a36 JK |
101 | .need = need_page_owner, |
102 | .init = init_page_owner, | |
103 | }; | |
104 | ||
9300d8df JK |
105 | static inline struct page_owner *get_page_owner(struct page_ext *page_ext) |
106 | { | |
107 | return (void *)page_ext + page_owner_ops.offset; | |
108 | } | |
109 | ||
48c96a36 JK |
110 | void __reset_page_owner(struct page *page, unsigned int order) |
111 | { | |
112 | int i; | |
113 | struct page_ext *page_ext; | |
114 | ||
115 | for (i = 0; i < (1 << order); i++) { | |
116 | page_ext = lookup_page_ext(page + i); | |
f86e4271 YS |
117 | if (unlikely(!page_ext)) |
118 | continue; | |
48c96a36 JK |
119 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); |
120 | } | |
121 | } | |
122 | ||
f2ca0b55 JK |
123 | static inline bool check_recursive_alloc(struct stack_trace *trace, |
124 | unsigned long ip) | |
48c96a36 | 125 | { |
f2ca0b55 JK |
126 | int i, count; |
127 | ||
128 | if (!trace->nr_entries) | |
129 | return false; | |
130 | ||
131 | for (i = 0, count = 0; i < trace->nr_entries; i++) { | |
132 | if (trace->entries[i] == ip && ++count == 2) | |
133 | return true; | |
134 | } | |
f86e4271 | 135 | |
f2ca0b55 JK |
136 | return false; |
137 | } | |
138 | ||
139 | static noinline depot_stack_handle_t save_stack(gfp_t flags) | |
140 | { | |
141 | unsigned long entries[PAGE_OWNER_STACK_DEPTH]; | |
94f759d6 SR |
142 | struct stack_trace trace = { |
143 | .nr_entries = 0, | |
f2ca0b55 JK |
144 | .entries = entries, |
145 | .max_entries = PAGE_OWNER_STACK_DEPTH, | |
5f48f0bd | 146 | .skip = 2 |
94f759d6 | 147 | }; |
f2ca0b55 JK |
148 | depot_stack_handle_t handle; |
149 | ||
150 | save_stack_trace(&trace); | |
151 | if (trace.nr_entries != 0 && | |
152 | trace.entries[trace.nr_entries-1] == ULONG_MAX) | |
153 | trace.nr_entries--; | |
154 | ||
155 | /* | |
156 | * We need to check recursion here because our request to stackdepot | |
157 | * could trigger memory allocation to save new entry. New memory | |
158 | * allocation would reach here and call depot_save_stack() again | |
159 | * if we don't catch it. There is still not enough memory in stackdepot | |
160 | * so it would try to allocate memory again and loop forever. | |
161 | */ | |
162 | if (check_recursive_alloc(&trace, _RET_IP_)) | |
163 | return dummy_handle; | |
164 | ||
165 | handle = depot_save_stack(&trace, flags); | |
166 | if (!handle) | |
167 | handle = failure_handle; | |
168 | ||
169 | return handle; | |
170 | } | |
171 | ||
dab4ead1 VB |
172 | static inline void __set_page_owner_handle(struct page_ext *page_ext, |
173 | depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask) | |
f2ca0b55 | 174 | { |
9300d8df | 175 | struct page_owner *page_owner; |
48c96a36 | 176 | |
9300d8df | 177 | page_owner = get_page_owner(page_ext); |
dab4ead1 | 178 | page_owner->handle = handle; |
9300d8df JK |
179 | page_owner->order = order; |
180 | page_owner->gfp_mask = gfp_mask; | |
181 | page_owner->last_migrate_reason = -1; | |
48c96a36 JK |
182 | |
183 | __set_bit(PAGE_EXT_OWNER, &page_ext->flags); | |
184 | } | |
185 | ||
dab4ead1 VB |
186 | noinline void __set_page_owner(struct page *page, unsigned int order, |
187 | gfp_t gfp_mask) | |
188 | { | |
189 | struct page_ext *page_ext = lookup_page_ext(page); | |
190 | depot_stack_handle_t handle; | |
191 | ||
192 | if (unlikely(!page_ext)) | |
193 | return; | |
194 | ||
195 | handle = save_stack(gfp_mask); | |
196 | __set_page_owner_handle(page_ext, handle, order, gfp_mask); | |
197 | } | |
198 | ||
7cd12b4a VB |
199 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
200 | { | |
201 | struct page_ext *page_ext = lookup_page_ext(page); | |
9300d8df JK |
202 | struct page_owner *page_owner; |
203 | ||
f86e4271 YS |
204 | if (unlikely(!page_ext)) |
205 | return; | |
7cd12b4a | 206 | |
9300d8df JK |
207 | page_owner = get_page_owner(page_ext); |
208 | page_owner->last_migrate_reason = reason; | |
7cd12b4a VB |
209 | } |
210 | ||
a9627bc5 | 211 | void __split_page_owner(struct page *page, unsigned int order) |
e2cfc911 | 212 | { |
a9627bc5 | 213 | int i; |
e2cfc911 | 214 | struct page_ext *page_ext = lookup_page_ext(page); |
9300d8df | 215 | struct page_owner *page_owner; |
a9627bc5 | 216 | |
f86e4271 | 217 | if (unlikely(!page_ext)) |
a9627bc5 | 218 | return; |
e2cfc911 | 219 | |
9300d8df JK |
220 | page_owner = get_page_owner(page_ext); |
221 | page_owner->order = 0; | |
a9627bc5 JK |
222 | for (i = 1; i < (1 << order); i++) |
223 | __copy_page_owner(page, page + i); | |
e2cfc911 JK |
224 | } |
225 | ||
d435edca VB |
226 | void __copy_page_owner(struct page *oldpage, struct page *newpage) |
227 | { | |
228 | struct page_ext *old_ext = lookup_page_ext(oldpage); | |
229 | struct page_ext *new_ext = lookup_page_ext(newpage); | |
9300d8df | 230 | struct page_owner *old_page_owner, *new_page_owner; |
d435edca | 231 | |
f86e4271 YS |
232 | if (unlikely(!old_ext || !new_ext)) |
233 | return; | |
234 | ||
9300d8df JK |
235 | old_page_owner = get_page_owner(old_ext); |
236 | new_page_owner = get_page_owner(new_ext); | |
237 | new_page_owner->order = old_page_owner->order; | |
238 | new_page_owner->gfp_mask = old_page_owner->gfp_mask; | |
239 | new_page_owner->last_migrate_reason = | |
240 | old_page_owner->last_migrate_reason; | |
241 | new_page_owner->handle = old_page_owner->handle; | |
d435edca VB |
242 | |
243 | /* | |
244 | * We don't clear the bit on the oldpage as it's going to be freed | |
245 | * after migration. Until then, the info can be useful in case of | |
246 | * a bug, and the overal stats will be off a bit only temporarily. | |
247 | * Also, migrate_misplaced_transhuge_page() can still fail the | |
248 | * migration and then we want the oldpage to retain the info. But | |
249 | * in that case we also don't need to explicitly clear the info from | |
250 | * the new page, which will be freed. | |
251 | */ | |
252 | __set_bit(PAGE_EXT_OWNER, &new_ext->flags); | |
253 | } | |
254 | ||
e2f612e6 JK |
255 | void pagetypeinfo_showmixedcount_print(struct seq_file *m, |
256 | pg_data_t *pgdat, struct zone *zone) | |
257 | { | |
258 | struct page *page; | |
259 | struct page_ext *page_ext; | |
9300d8df | 260 | struct page_owner *page_owner; |
e2f612e6 JK |
261 | unsigned long pfn = zone->zone_start_pfn, block_end_pfn; |
262 | unsigned long end_pfn = pfn + zone->spanned_pages; | |
263 | unsigned long count[MIGRATE_TYPES] = { 0, }; | |
264 | int pageblock_mt, page_mt; | |
265 | int i; | |
266 | ||
267 | /* Scan block by block. First and last block may be incomplete */ | |
268 | pfn = zone->zone_start_pfn; | |
269 | ||
270 | /* | |
271 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
272 | * a zone boundary, it will be double counted between zones. This does | |
273 | * not matter as the mixed block count will still be correct | |
274 | */ | |
275 | for (; pfn < end_pfn; ) { | |
276 | if (!pfn_valid(pfn)) { | |
277 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); | |
278 | continue; | |
279 | } | |
280 | ||
281 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | |
282 | block_end_pfn = min(block_end_pfn, end_pfn); | |
283 | ||
284 | page = pfn_to_page(pfn); | |
285 | pageblock_mt = get_pageblock_migratetype(page); | |
286 | ||
287 | for (; pfn < block_end_pfn; pfn++) { | |
288 | if (!pfn_valid_within(pfn)) | |
289 | continue; | |
290 | ||
291 | page = pfn_to_page(pfn); | |
292 | ||
293 | if (page_zone(page) != zone) | |
294 | continue; | |
295 | ||
296 | if (PageBuddy(page)) { | |
727c080f VM |
297 | unsigned long freepage_order; |
298 | ||
299 | freepage_order = page_order_unsafe(page); | |
300 | if (freepage_order < MAX_ORDER) | |
301 | pfn += (1UL << freepage_order) - 1; | |
e2f612e6 JK |
302 | continue; |
303 | } | |
304 | ||
305 | if (PageReserved(page)) | |
306 | continue; | |
307 | ||
308 | page_ext = lookup_page_ext(page); | |
309 | if (unlikely(!page_ext)) | |
310 | continue; | |
311 | ||
312 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
313 | continue; | |
314 | ||
9300d8df JK |
315 | page_owner = get_page_owner(page_ext); |
316 | page_mt = gfpflags_to_migratetype( | |
317 | page_owner->gfp_mask); | |
e2f612e6 JK |
318 | if (pageblock_mt != page_mt) { |
319 | if (is_migrate_cma(pageblock_mt)) | |
320 | count[MIGRATE_MOVABLE]++; | |
321 | else | |
322 | count[pageblock_mt]++; | |
323 | ||
324 | pfn = block_end_pfn; | |
325 | break; | |
326 | } | |
9300d8df | 327 | pfn += (1UL << page_owner->order) - 1; |
e2f612e6 JK |
328 | } |
329 | } | |
330 | ||
331 | /* Print counts */ | |
332 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | |
333 | for (i = 0; i < MIGRATE_TYPES; i++) | |
334 | seq_printf(m, "%12lu ", count[i]); | |
335 | seq_putc(m, '\n'); | |
336 | } | |
337 | ||
48c96a36 JK |
338 | static ssize_t |
339 | print_page_owner(char __user *buf, size_t count, unsigned long pfn, | |
9300d8df | 340 | struct page *page, struct page_owner *page_owner, |
f2ca0b55 | 341 | depot_stack_handle_t handle) |
48c96a36 JK |
342 | { |
343 | int ret; | |
344 | int pageblock_mt, page_mt; | |
345 | char *kbuf; | |
f2ca0b55 | 346 | unsigned long entries[PAGE_OWNER_STACK_DEPTH]; |
94f759d6 | 347 | struct stack_trace trace = { |
f2ca0b55 JK |
348 | .nr_entries = 0, |
349 | .entries = entries, | |
350 | .max_entries = PAGE_OWNER_STACK_DEPTH, | |
351 | .skip = 0 | |
94f759d6 | 352 | }; |
48c96a36 JK |
353 | |
354 | kbuf = kmalloc(count, GFP_KERNEL); | |
355 | if (!kbuf) | |
356 | return -ENOMEM; | |
357 | ||
358 | ret = snprintf(kbuf, count, | |
60f30350 | 359 | "Page allocated via order %u, mask %#x(%pGg)\n", |
9300d8df JK |
360 | page_owner->order, page_owner->gfp_mask, |
361 | &page_owner->gfp_mask); | |
48c96a36 JK |
362 | |
363 | if (ret >= count) | |
364 | goto err; | |
365 | ||
366 | /* Print information relevant to grouping pages by mobility */ | |
0b423ca2 | 367 | pageblock_mt = get_pageblock_migratetype(page); |
9300d8df | 368 | page_mt = gfpflags_to_migratetype(page_owner->gfp_mask); |
48c96a36 | 369 | ret += snprintf(kbuf + ret, count - ret, |
60f30350 | 370 | "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", |
48c96a36 | 371 | pfn, |
60f30350 | 372 | migratetype_names[page_mt], |
48c96a36 | 373 | pfn >> pageblock_order, |
60f30350 VB |
374 | migratetype_names[pageblock_mt], |
375 | page->flags, &page->flags); | |
48c96a36 JK |
376 | |
377 | if (ret >= count) | |
378 | goto err; | |
379 | ||
f2ca0b55 | 380 | depot_fetch_stack(handle, &trace); |
94f759d6 | 381 | ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); |
48c96a36 JK |
382 | if (ret >= count) |
383 | goto err; | |
384 | ||
9300d8df | 385 | if (page_owner->last_migrate_reason != -1) { |
7cd12b4a VB |
386 | ret += snprintf(kbuf + ret, count - ret, |
387 | "Page has been migrated, last migrate reason: %s\n", | |
9300d8df | 388 | migrate_reason_names[page_owner->last_migrate_reason]); |
7cd12b4a VB |
389 | if (ret >= count) |
390 | goto err; | |
391 | } | |
392 | ||
48c96a36 JK |
393 | ret += snprintf(kbuf + ret, count - ret, "\n"); |
394 | if (ret >= count) | |
395 | goto err; | |
396 | ||
397 | if (copy_to_user(buf, kbuf, ret)) | |
398 | ret = -EFAULT; | |
399 | ||
400 | kfree(kbuf); | |
401 | return ret; | |
402 | ||
403 | err: | |
404 | kfree(kbuf); | |
405 | return -ENOMEM; | |
406 | } | |
407 | ||
4e462112 VB |
408 | void __dump_page_owner(struct page *page) |
409 | { | |
410 | struct page_ext *page_ext = lookup_page_ext(page); | |
9300d8df | 411 | struct page_owner *page_owner; |
f2ca0b55 | 412 | unsigned long entries[PAGE_OWNER_STACK_DEPTH]; |
4e462112 | 413 | struct stack_trace trace = { |
f2ca0b55 JK |
414 | .nr_entries = 0, |
415 | .entries = entries, | |
416 | .max_entries = PAGE_OWNER_STACK_DEPTH, | |
417 | .skip = 0 | |
4e462112 | 418 | }; |
f2ca0b55 | 419 | depot_stack_handle_t handle; |
8285027f SM |
420 | gfp_t gfp_mask; |
421 | int mt; | |
4e462112 | 422 | |
f86e4271 YS |
423 | if (unlikely(!page_ext)) { |
424 | pr_alert("There is not page extension available.\n"); | |
425 | return; | |
426 | } | |
9300d8df JK |
427 | |
428 | page_owner = get_page_owner(page_ext); | |
429 | gfp_mask = page_owner->gfp_mask; | |
8285027f | 430 | mt = gfpflags_to_migratetype(gfp_mask); |
f86e4271 | 431 | |
4e462112 VB |
432 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
433 | pr_alert("page_owner info is not active (free page?)\n"); | |
434 | return; | |
435 | } | |
436 | ||
9300d8df | 437 | handle = READ_ONCE(page_owner->handle); |
f2ca0b55 JK |
438 | if (!handle) { |
439 | pr_alert("page_owner info is not active (free page?)\n"); | |
440 | return; | |
441 | } | |
442 | ||
443 | depot_fetch_stack(handle, &trace); | |
756a025f | 444 | pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", |
9300d8df | 445 | page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask); |
4e462112 VB |
446 | print_stack_trace(&trace, 0); |
447 | ||
9300d8df | 448 | if (page_owner->last_migrate_reason != -1) |
4e462112 | 449 | pr_alert("page has been migrated, last migrate reason: %s\n", |
9300d8df | 450 | migrate_reason_names[page_owner->last_migrate_reason]); |
4e462112 VB |
451 | } |
452 | ||
48c96a36 JK |
453 | static ssize_t |
454 | read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
455 | { | |
456 | unsigned long pfn; | |
457 | struct page *page; | |
458 | struct page_ext *page_ext; | |
9300d8df | 459 | struct page_owner *page_owner; |
f2ca0b55 | 460 | depot_stack_handle_t handle; |
48c96a36 | 461 | |
7dd80b8a | 462 | if (!static_branch_unlikely(&page_owner_inited)) |
48c96a36 JK |
463 | return -EINVAL; |
464 | ||
465 | page = NULL; | |
466 | pfn = min_low_pfn + *ppos; | |
467 | ||
468 | /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ | |
469 | while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) | |
470 | pfn++; | |
471 | ||
472 | drain_all_pages(NULL); | |
473 | ||
474 | /* Find an allocated page */ | |
475 | for (; pfn < max_pfn; pfn++) { | |
476 | /* | |
477 | * If the new page is in a new MAX_ORDER_NR_PAGES area, | |
478 | * validate the area as existing, skip it if not | |
479 | */ | |
480 | if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { | |
481 | pfn += MAX_ORDER_NR_PAGES - 1; | |
482 | continue; | |
483 | } | |
484 | ||
485 | /* Check for holes within a MAX_ORDER area */ | |
486 | if (!pfn_valid_within(pfn)) | |
487 | continue; | |
488 | ||
489 | page = pfn_to_page(pfn); | |
490 | if (PageBuddy(page)) { | |
491 | unsigned long freepage_order = page_order_unsafe(page); | |
492 | ||
493 | if (freepage_order < MAX_ORDER) | |
494 | pfn += (1UL << freepage_order) - 1; | |
495 | continue; | |
496 | } | |
497 | ||
498 | page_ext = lookup_page_ext(page); | |
f86e4271 YS |
499 | if (unlikely(!page_ext)) |
500 | continue; | |
48c96a36 JK |
501 | |
502 | /* | |
61cf5feb JK |
503 | * Some pages could be missed by concurrent allocation or free, |
504 | * because we don't hold the zone lock. | |
48c96a36 JK |
505 | */ |
506 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
507 | continue; | |
508 | ||
9300d8df JK |
509 | page_owner = get_page_owner(page_ext); |
510 | ||
f2ca0b55 JK |
511 | /* |
512 | * Access to page_ext->handle isn't synchronous so we should | |
513 | * be careful to access it. | |
514 | */ | |
9300d8df | 515 | handle = READ_ONCE(page_owner->handle); |
f2ca0b55 JK |
516 | if (!handle) |
517 | continue; | |
518 | ||
48c96a36 JK |
519 | /* Record the next PFN to read in the file offset */ |
520 | *ppos = (pfn - min_low_pfn) + 1; | |
521 | ||
f2ca0b55 | 522 | return print_page_owner(buf, count, pfn, page, |
9300d8df | 523 | page_owner, handle); |
48c96a36 JK |
524 | } |
525 | ||
526 | return 0; | |
527 | } | |
528 | ||
61cf5feb JK |
529 | static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) |
530 | { | |
6787c1da OS |
531 | unsigned long pfn = zone->zone_start_pfn; |
532 | unsigned long end_pfn = zone_end_pfn(zone); | |
61cf5feb JK |
533 | unsigned long count = 0; |
534 | ||
61cf5feb JK |
535 | /* |
536 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
537 | * a zone boundary, it will be double counted between zones. This does | |
538 | * not matter as the mixed block count will still be correct | |
539 | */ | |
540 | for (; pfn < end_pfn; ) { | |
6787c1da OS |
541 | unsigned long block_end_pfn; |
542 | ||
61cf5feb JK |
543 | if (!pfn_valid(pfn)) { |
544 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); | |
545 | continue; | |
546 | } | |
547 | ||
548 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | |
549 | block_end_pfn = min(block_end_pfn, end_pfn); | |
550 | ||
61cf5feb | 551 | for (; pfn < block_end_pfn; pfn++) { |
6787c1da OS |
552 | struct page *page; |
553 | struct page_ext *page_ext; | |
554 | ||
61cf5feb JK |
555 | if (!pfn_valid_within(pfn)) |
556 | continue; | |
557 | ||
558 | page = pfn_to_page(pfn); | |
559 | ||
9d43f5ae JK |
560 | if (page_zone(page) != zone) |
561 | continue; | |
562 | ||
61cf5feb | 563 | /* |
10903027 VB |
564 | * To avoid having to grab zone->lock, be a little |
565 | * careful when reading buddy page order. The only | |
566 | * danger is that we skip too much and potentially miss | |
567 | * some early allocated pages, which is better than | |
568 | * heavy lock contention. | |
61cf5feb JK |
569 | */ |
570 | if (PageBuddy(page)) { | |
10903027 VB |
571 | unsigned long order = page_order_unsafe(page); |
572 | ||
573 | if (order > 0 && order < MAX_ORDER) | |
574 | pfn += (1UL << order) - 1; | |
61cf5feb JK |
575 | continue; |
576 | } | |
577 | ||
578 | if (PageReserved(page)) | |
579 | continue; | |
580 | ||
581 | page_ext = lookup_page_ext(page); | |
f86e4271 YS |
582 | if (unlikely(!page_ext)) |
583 | continue; | |
61cf5feb | 584 | |
dab4ead1 | 585 | /* Maybe overlapping zone */ |
61cf5feb JK |
586 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
587 | continue; | |
588 | ||
589 | /* Found early allocated page */ | |
dab4ead1 | 590 | __set_page_owner_handle(page_ext, early_handle, 0, 0); |
61cf5feb JK |
591 | count++; |
592 | } | |
10903027 | 593 | cond_resched(); |
61cf5feb JK |
594 | } |
595 | ||
596 | pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", | |
597 | pgdat->node_id, zone->name, count); | |
598 | } | |
599 | ||
600 | static void init_zones_in_node(pg_data_t *pgdat) | |
601 | { | |
602 | struct zone *zone; | |
603 | struct zone *node_zones = pgdat->node_zones; | |
61cf5feb JK |
604 | |
605 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | |
606 | if (!populated_zone(zone)) | |
607 | continue; | |
608 | ||
61cf5feb | 609 | init_pages_in_zone(pgdat, zone); |
61cf5feb JK |
610 | } |
611 | } | |
612 | ||
613 | static void init_early_allocated_pages(void) | |
614 | { | |
615 | pg_data_t *pgdat; | |
616 | ||
61cf5feb JK |
617 | for_each_online_pgdat(pgdat) |
618 | init_zones_in_node(pgdat); | |
619 | } | |
620 | ||
48c96a36 JK |
621 | static const struct file_operations proc_page_owner_operations = { |
622 | .read = read_page_owner, | |
623 | }; | |
624 | ||
625 | static int __init pageowner_init(void) | |
626 | { | |
627 | struct dentry *dentry; | |
628 | ||
7dd80b8a | 629 | if (!static_branch_unlikely(&page_owner_inited)) { |
48c96a36 JK |
630 | pr_info("page_owner is disabled\n"); |
631 | return 0; | |
632 | } | |
633 | ||
634 | dentry = debugfs_create_file("page_owner", S_IRUSR, NULL, | |
635 | NULL, &proc_page_owner_operations); | |
48c96a36 | 636 | |
8e33771c | 637 | return PTR_ERR_OR_ZERO(dentry); |
48c96a36 | 638 | } |
44c5af96 | 639 | late_initcall(pageowner_init) |