]> Git Repo - linux.git/blob - mm/page_owner.c
mm: x86: add CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
[linux.git] / mm / page_owner.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/debugfs.h>
3 #include <linux/mm.h>
4 #include <linux/slab.h>
5 #include <linux/uaccess.h>
6 #include <linux/memblock.h>
7 #include <linux/stacktrace.h>
8 #include <linux/page_owner.h>
9 #include <linux/jump_label.h>
10 #include <linux/migrate.h>
11 #include <linux/stackdepot.h>
12 #include <linux/seq_file.h>
13 #include <linux/memcontrol.h>
14 #include <linux/sched/clock.h>
15
16 #include "internal.h"
17
18 /*
19  * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
20  * to use off stack temporal storage
21  */
22 #define PAGE_OWNER_STACK_DEPTH (16)
23
24 struct page_owner {
25         unsigned short order;
26         short last_migrate_reason;
27         gfp_t gfp_mask;
28         depot_stack_handle_t handle;
29         depot_stack_handle_t free_handle;
30         u64 ts_nsec;
31         u64 free_ts_nsec;
32         char comm[TASK_COMM_LEN];
33         pid_t pid;
34         pid_t tgid;
35 };
36
37 static bool page_owner_enabled __initdata;
38 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
39
40 static depot_stack_handle_t dummy_handle;
41 static depot_stack_handle_t failure_handle;
42 static depot_stack_handle_t early_handle;
43
44 static void init_early_allocated_pages(void);
45
46 static int __init early_page_owner_param(char *buf)
47 {
48         int ret = kstrtobool(buf, &page_owner_enabled);
49
50         if (page_owner_enabled)
51                 stack_depot_want_early_init();
52
53         return ret;
54 }
55 early_param("page_owner", early_page_owner_param);
56
57 static __init bool need_page_owner(void)
58 {
59         return page_owner_enabled;
60 }
61
62 static __always_inline depot_stack_handle_t create_dummy_stack(void)
63 {
64         unsigned long entries[4];
65         unsigned int nr_entries;
66
67         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
68         return stack_depot_save(entries, nr_entries, GFP_KERNEL);
69 }
70
71 static noinline void register_dummy_stack(void)
72 {
73         dummy_handle = create_dummy_stack();
74 }
75
76 static noinline void register_failure_stack(void)
77 {
78         failure_handle = create_dummy_stack();
79 }
80
81 static noinline void register_early_stack(void)
82 {
83         early_handle = create_dummy_stack();
84 }
85
86 static __init void init_page_owner(void)
87 {
88         if (!page_owner_enabled)
89                 return;
90
91         register_dummy_stack();
92         register_failure_stack();
93         register_early_stack();
94         static_branch_enable(&page_owner_inited);
95         init_early_allocated_pages();
96 }
97
98 struct page_ext_operations page_owner_ops = {
99         .size = sizeof(struct page_owner),
100         .need = need_page_owner,
101         .init = init_page_owner,
102 };
103
104 static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
105 {
106         return (void *)page_ext + page_owner_ops.offset;
107 }
108
109 static noinline depot_stack_handle_t save_stack(gfp_t flags)
110 {
111         unsigned long entries[PAGE_OWNER_STACK_DEPTH];
112         depot_stack_handle_t handle;
113         unsigned int nr_entries;
114
115         /*
116          * Avoid recursion.
117          *
118          * Sometimes page metadata allocation tracking requires more
119          * memory to be allocated:
120          * - when new stack trace is saved to stack depot
121          * - when backtrace itself is calculated (ia64)
122          */
123         if (current->in_page_owner)
124                 return dummy_handle;
125         current->in_page_owner = 1;
126
127         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
128         handle = stack_depot_save(entries, nr_entries, flags);
129         if (!handle)
130                 handle = failure_handle;
131
132         current->in_page_owner = 0;
133         return handle;
134 }
135
136 void __reset_page_owner(struct page *page, unsigned short order)
137 {
138         int i;
139         struct page_ext *page_ext;
140         depot_stack_handle_t handle;
141         struct page_owner *page_owner;
142         u64 free_ts_nsec = local_clock();
143
144         page_ext = page_ext_get(page);
145         if (unlikely(!page_ext))
146                 return;
147
148         handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
149         for (i = 0; i < (1 << order); i++) {
150                 __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
151                 page_owner = get_page_owner(page_ext);
152                 page_owner->free_handle = handle;
153                 page_owner->free_ts_nsec = free_ts_nsec;
154                 page_ext = page_ext_next(page_ext);
155         }
156         page_ext_put(page_ext);
157 }
158
159 static inline void __set_page_owner_handle(struct page_ext *page_ext,
160                                         depot_stack_handle_t handle,
161                                         unsigned short order, gfp_t gfp_mask)
162 {
163         struct page_owner *page_owner;
164         int i;
165
166         for (i = 0; i < (1 << order); i++) {
167                 page_owner = get_page_owner(page_ext);
168                 page_owner->handle = handle;
169                 page_owner->order = order;
170                 page_owner->gfp_mask = gfp_mask;
171                 page_owner->last_migrate_reason = -1;
172                 page_owner->pid = current->pid;
173                 page_owner->tgid = current->tgid;
174                 page_owner->ts_nsec = local_clock();
175                 strscpy(page_owner->comm, current->comm,
176                         sizeof(page_owner->comm));
177                 __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
178                 __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
179
180                 page_ext = page_ext_next(page_ext);
181         }
182 }
183
184 noinline void __set_page_owner(struct page *page, unsigned short order,
185                                         gfp_t gfp_mask)
186 {
187         struct page_ext *page_ext;
188         depot_stack_handle_t handle;
189
190         handle = save_stack(gfp_mask);
191
192         page_ext = page_ext_get(page);
193         if (unlikely(!page_ext))
194                 return;
195         __set_page_owner_handle(page_ext, handle, order, gfp_mask);
196         page_ext_put(page_ext);
197 }
198
199 void __set_page_owner_migrate_reason(struct page *page, int reason)
200 {
201         struct page_ext *page_ext = page_ext_get(page);
202         struct page_owner *page_owner;
203
204         if (unlikely(!page_ext))
205                 return;
206
207         page_owner = get_page_owner(page_ext);
208         page_owner->last_migrate_reason = reason;
209         page_ext_put(page_ext);
210 }
211
212 void __split_page_owner(struct page *page, unsigned int nr)
213 {
214         int i;
215         struct page_ext *page_ext = page_ext_get(page);
216         struct page_owner *page_owner;
217
218         if (unlikely(!page_ext))
219                 return;
220
221         for (i = 0; i < nr; i++) {
222                 page_owner = get_page_owner(page_ext);
223                 page_owner->order = 0;
224                 page_ext = page_ext_next(page_ext);
225         }
226         page_ext_put(page_ext);
227 }
228
229 void __folio_copy_owner(struct folio *newfolio, struct folio *old)
230 {
231         struct page_ext *old_ext;
232         struct page_ext *new_ext;
233         struct page_owner *old_page_owner, *new_page_owner;
234
235         old_ext = page_ext_get(&old->page);
236         if (unlikely(!old_ext))
237                 return;
238
239         new_ext = page_ext_get(&newfolio->page);
240         if (unlikely(!new_ext)) {
241                 page_ext_put(old_ext);
242                 return;
243         }
244
245         old_page_owner = get_page_owner(old_ext);
246         new_page_owner = get_page_owner(new_ext);
247         new_page_owner->order = old_page_owner->order;
248         new_page_owner->gfp_mask = old_page_owner->gfp_mask;
249         new_page_owner->last_migrate_reason =
250                 old_page_owner->last_migrate_reason;
251         new_page_owner->handle = old_page_owner->handle;
252         new_page_owner->pid = old_page_owner->pid;
253         new_page_owner->tgid = old_page_owner->tgid;
254         new_page_owner->ts_nsec = old_page_owner->ts_nsec;
255         new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
256         strcpy(new_page_owner->comm, old_page_owner->comm);
257
258         /*
259          * We don't clear the bit on the old folio as it's going to be freed
260          * after migration. Until then, the info can be useful in case of
261          * a bug, and the overall stats will be off a bit only temporarily.
262          * Also, migrate_misplaced_transhuge_page() can still fail the
263          * migration and then we want the old folio to retain the info. But
264          * in that case we also don't need to explicitly clear the info from
265          * the new page, which will be freed.
266          */
267         __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
268         __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
269         page_ext_put(new_ext);
270         page_ext_put(old_ext);
271 }
272
273 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
274                                        pg_data_t *pgdat, struct zone *zone)
275 {
276         struct page *page;
277         struct page_ext *page_ext;
278         struct page_owner *page_owner;
279         unsigned long pfn, block_end_pfn;
280         unsigned long end_pfn = zone_end_pfn(zone);
281         unsigned long count[MIGRATE_TYPES] = { 0, };
282         int pageblock_mt, page_mt;
283         int i;
284
285         /* Scan block by block. First and last block may be incomplete */
286         pfn = zone->zone_start_pfn;
287
288         /*
289          * Walk the zone in pageblock_nr_pages steps. If a page block spans
290          * a zone boundary, it will be double counted between zones. This does
291          * not matter as the mixed block count will still be correct
292          */
293         for (; pfn < end_pfn; ) {
294                 page = pfn_to_online_page(pfn);
295                 if (!page) {
296                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
297                         continue;
298                 }
299
300                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
301                 block_end_pfn = min(block_end_pfn, end_pfn);
302
303                 pageblock_mt = get_pageblock_migratetype(page);
304
305                 for (; pfn < block_end_pfn; pfn++) {
306                         /* The pageblock is online, no need to recheck. */
307                         page = pfn_to_page(pfn);
308
309                         if (page_zone(page) != zone)
310                                 continue;
311
312                         if (PageBuddy(page)) {
313                                 unsigned long freepage_order;
314
315                                 freepage_order = buddy_order_unsafe(page);
316                                 if (freepage_order < MAX_ORDER)
317                                         pfn += (1UL << freepage_order) - 1;
318                                 continue;
319                         }
320
321                         if (PageReserved(page))
322                                 continue;
323
324                         page_ext = page_ext_get(page);
325                         if (unlikely(!page_ext))
326                                 continue;
327
328                         if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
329                                 goto ext_put_continue;
330
331                         page_owner = get_page_owner(page_ext);
332                         page_mt = gfp_migratetype(page_owner->gfp_mask);
333                         if (pageblock_mt != page_mt) {
334                                 if (is_migrate_cma(pageblock_mt))
335                                         count[MIGRATE_MOVABLE]++;
336                                 else
337                                         count[pageblock_mt]++;
338
339                                 pfn = block_end_pfn;
340                                 page_ext_put(page_ext);
341                                 break;
342                         }
343                         pfn += (1UL << page_owner->order) - 1;
344 ext_put_continue:
345                         page_ext_put(page_ext);
346                 }
347         }
348
349         /* Print counts */
350         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
351         for (i = 0; i < MIGRATE_TYPES; i++)
352                 seq_printf(m, "%12lu ", count[i]);
353         seq_putc(m, '\n');
354 }
355
356 /*
357  * Looking for memcg information and print it out
358  */
359 static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret,
360                                          struct page *page)
361 {
362 #ifdef CONFIG_MEMCG
363         unsigned long memcg_data;
364         struct mem_cgroup *memcg;
365         bool online;
366         char name[80];
367
368         rcu_read_lock();
369         memcg_data = READ_ONCE(page->memcg_data);
370         if (!memcg_data)
371                 goto out_unlock;
372
373         if (memcg_data & MEMCG_DATA_OBJCGS)
374                 ret += scnprintf(kbuf + ret, count - ret,
375                                 "Slab cache page\n");
376
377         memcg = page_memcg_check(page);
378         if (!memcg)
379                 goto out_unlock;
380
381         online = (memcg->css.flags & CSS_ONLINE);
382         cgroup_name(memcg->css.cgroup, name, sizeof(name));
383         ret += scnprintf(kbuf + ret, count - ret,
384                         "Charged %sto %smemcg %s\n",
385                         PageMemcgKmem(page) ? "(via objcg) " : "",
386                         online ? "" : "offline ",
387                         name);
388 out_unlock:
389         rcu_read_unlock();
390 #endif /* CONFIG_MEMCG */
391
392         return ret;
393 }
394
395 static ssize_t
396 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
397                 struct page *page, struct page_owner *page_owner,
398                 depot_stack_handle_t handle)
399 {
400         int ret, pageblock_mt, page_mt;
401         char *kbuf;
402
403         count = min_t(size_t, count, PAGE_SIZE);
404         kbuf = kmalloc(count, GFP_KERNEL);
405         if (!kbuf)
406                 return -ENOMEM;
407
408         ret = scnprintf(kbuf, count,
409                         "Page allocated via order %u, mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu ns, free_ts %llu ns\n",
410                         page_owner->order, page_owner->gfp_mask,
411                         &page_owner->gfp_mask, page_owner->pid,
412                         page_owner->tgid, page_owner->comm,
413                         page_owner->ts_nsec, page_owner->free_ts_nsec);
414
415         /* Print information relevant to grouping pages by mobility */
416         pageblock_mt = get_pageblock_migratetype(page);
417         page_mt  = gfp_migratetype(page_owner->gfp_mask);
418         ret += scnprintf(kbuf + ret, count - ret,
419                         "PFN %lu type %s Block %lu type %s Flags %pGp\n",
420                         pfn,
421                         migratetype_names[page_mt],
422                         pfn >> pageblock_order,
423                         migratetype_names[pageblock_mt],
424                         &page->flags);
425
426         ret += stack_depot_snprint(handle, kbuf + ret, count - ret, 0);
427         if (ret >= count)
428                 goto err;
429
430         if (page_owner->last_migrate_reason != -1) {
431                 ret += scnprintf(kbuf + ret, count - ret,
432                         "Page has been migrated, last migrate reason: %s\n",
433                         migrate_reason_names[page_owner->last_migrate_reason]);
434         }
435
436         ret = print_page_owner_memcg(kbuf, count, ret, page);
437
438         ret += snprintf(kbuf + ret, count - ret, "\n");
439         if (ret >= count)
440                 goto err;
441
442         if (copy_to_user(buf, kbuf, ret))
443                 ret = -EFAULT;
444
445         kfree(kbuf);
446         return ret;
447
448 err:
449         kfree(kbuf);
450         return -ENOMEM;
451 }
452
453 void __dump_page_owner(const struct page *page)
454 {
455         struct page_ext *page_ext = page_ext_get((void *)page);
456         struct page_owner *page_owner;
457         depot_stack_handle_t handle;
458         gfp_t gfp_mask;
459         int mt;
460
461         if (unlikely(!page_ext)) {
462                 pr_alert("There is not page extension available.\n");
463                 return;
464         }
465
466         page_owner = get_page_owner(page_ext);
467         gfp_mask = page_owner->gfp_mask;
468         mt = gfp_migratetype(gfp_mask);
469
470         if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
471                 pr_alert("page_owner info is not present (never set?)\n");
472                 page_ext_put(page_ext);
473                 return;
474         }
475
476         if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
477                 pr_alert("page_owner tracks the page as allocated\n");
478         else
479                 pr_alert("page_owner tracks the page as freed\n");
480
481         pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg), pid %d, tgid %d (%s), ts %llu, free_ts %llu\n",
482                  page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask,
483                  page_owner->pid, page_owner->tgid, page_owner->comm,
484                  page_owner->ts_nsec, page_owner->free_ts_nsec);
485
486         handle = READ_ONCE(page_owner->handle);
487         if (!handle)
488                 pr_alert("page_owner allocation stack trace missing\n");
489         else
490                 stack_depot_print(handle);
491
492         handle = READ_ONCE(page_owner->free_handle);
493         if (!handle) {
494                 pr_alert("page_owner free stack trace missing\n");
495         } else {
496                 pr_alert("page last free stack trace:\n");
497                 stack_depot_print(handle);
498         }
499
500         if (page_owner->last_migrate_reason != -1)
501                 pr_alert("page has been migrated, last migrate reason: %s\n",
502                         migrate_reason_names[page_owner->last_migrate_reason]);
503         page_ext_put(page_ext);
504 }
505
506 static ssize_t
507 read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
508 {
509         unsigned long pfn;
510         struct page *page;
511         struct page_ext *page_ext;
512         struct page_owner *page_owner;
513         depot_stack_handle_t handle;
514
515         if (!static_branch_unlikely(&page_owner_inited))
516                 return -EINVAL;
517
518         page = NULL;
519         if (*ppos == 0)
520                 pfn = min_low_pfn;
521         else
522                 pfn = *ppos;
523         /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
524         while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
525                 pfn++;
526
527         drain_all_pages(NULL);
528
529         /* Find an allocated page */
530         for (; pfn < max_pfn; pfn++) {
531                 /*
532                  * This temporary page_owner is required so
533                  * that we can avoid the context switches while holding
534                  * the rcu lock and copying the page owner information to
535                  * user through copy_to_user() or GFP_KERNEL allocations.
536                  */
537                 struct page_owner page_owner_tmp;
538
539                 /*
540                  * If the new page is in a new MAX_ORDER_NR_PAGES area,
541                  * validate the area as existing, skip it if not
542                  */
543                 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
544                         pfn += MAX_ORDER_NR_PAGES - 1;
545                         continue;
546                 }
547
548                 page = pfn_to_page(pfn);
549                 if (PageBuddy(page)) {
550                         unsigned long freepage_order = buddy_order_unsafe(page);
551
552                         if (freepage_order < MAX_ORDER)
553                                 pfn += (1UL << freepage_order) - 1;
554                         continue;
555                 }
556
557                 page_ext = page_ext_get(page);
558                 if (unlikely(!page_ext))
559                         continue;
560
561                 /*
562                  * Some pages could be missed by concurrent allocation or free,
563                  * because we don't hold the zone lock.
564                  */
565                 if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
566                         goto ext_put_continue;
567
568                 /*
569                  * Although we do have the info about past allocation of free
570                  * pages, it's not relevant for current memory usage.
571                  */
572                 if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
573                         goto ext_put_continue;
574
575                 page_owner = get_page_owner(page_ext);
576
577                 /*
578                  * Don't print "tail" pages of high-order allocations as that
579                  * would inflate the stats.
580                  */
581                 if (!IS_ALIGNED(pfn, 1 << page_owner->order))
582                         goto ext_put_continue;
583
584                 /*
585                  * Access to page_ext->handle isn't synchronous so we should
586                  * be careful to access it.
587                  */
588                 handle = READ_ONCE(page_owner->handle);
589                 if (!handle)
590                         goto ext_put_continue;
591
592                 /* Record the next PFN to read in the file offset */
593                 *ppos = pfn + 1;
594
595                 page_owner_tmp = *page_owner;
596                 page_ext_put(page_ext);
597                 return print_page_owner(buf, count, pfn, page,
598                                 &page_owner_tmp, handle);
599 ext_put_continue:
600                 page_ext_put(page_ext);
601         }
602
603         return 0;
604 }
605
606 static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
607 {
608         switch (orig) {
609         case SEEK_SET:
610                 file->f_pos = offset;
611                 break;
612         case SEEK_CUR:
613                 file->f_pos += offset;
614                 break;
615         default:
616                 return -EINVAL;
617         }
618         return file->f_pos;
619 }
620
621 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
622 {
623         unsigned long pfn = zone->zone_start_pfn;
624         unsigned long end_pfn = zone_end_pfn(zone);
625         unsigned long count = 0;
626
627         /*
628          * Walk the zone in pageblock_nr_pages steps. If a page block spans
629          * a zone boundary, it will be double counted between zones. This does
630          * not matter as the mixed block count will still be correct
631          */
632         for (; pfn < end_pfn; ) {
633                 unsigned long block_end_pfn;
634
635                 if (!pfn_valid(pfn)) {
636                         pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
637                         continue;
638                 }
639
640                 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
641                 block_end_pfn = min(block_end_pfn, end_pfn);
642
643                 for (; pfn < block_end_pfn; pfn++) {
644                         struct page *page = pfn_to_page(pfn);
645                         struct page_ext *page_ext;
646
647                         if (page_zone(page) != zone)
648                                 continue;
649
650                         /*
651                          * To avoid having to grab zone->lock, be a little
652                          * careful when reading buddy page order. The only
653                          * danger is that we skip too much and potentially miss
654                          * some early allocated pages, which is better than
655                          * heavy lock contention.
656                          */
657                         if (PageBuddy(page)) {
658                                 unsigned long order = buddy_order_unsafe(page);
659
660                                 if (order > 0 && order < MAX_ORDER)
661                                         pfn += (1UL << order) - 1;
662                                 continue;
663                         }
664
665                         if (PageReserved(page))
666                                 continue;
667
668                         page_ext = page_ext_get(page);
669                         if (unlikely(!page_ext))
670                                 continue;
671
672                         /* Maybe overlapping zone */
673                         if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
674                                 goto ext_put_continue;
675
676                         /* Found early allocated page */
677                         __set_page_owner_handle(page_ext, early_handle,
678                                                 0, 0);
679                         count++;
680 ext_put_continue:
681                         page_ext_put(page_ext);
682                 }
683                 cond_resched();
684         }
685
686         pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n",
687                 pgdat->node_id, zone->name, count);
688 }
689
690 static void init_zones_in_node(pg_data_t *pgdat)
691 {
692         struct zone *zone;
693         struct zone *node_zones = pgdat->node_zones;
694
695         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
696                 if (!populated_zone(zone))
697                         continue;
698
699                 init_pages_in_zone(pgdat, zone);
700         }
701 }
702
703 static void init_early_allocated_pages(void)
704 {
705         pg_data_t *pgdat;
706
707         for_each_online_pgdat(pgdat)
708                 init_zones_in_node(pgdat);
709 }
710
711 static const struct file_operations proc_page_owner_operations = {
712         .read           = read_page_owner,
713         .llseek         = lseek_page_owner,
714 };
715
716 static int __init pageowner_init(void)
717 {
718         if (!static_branch_unlikely(&page_owner_inited)) {
719                 pr_info("page_owner is disabled\n");
720                 return 0;
721         }
722
723         debugfs_create_file("page_owner", 0400, NULL, NULL,
724                             &proc_page_owner_operations);
725
726         return 0;
727 }
728 late_initcall(pageowner_init)
This page took 0.079108 seconds and 4 git commands to generate.