]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
31c9afa6 SL |
2 | /* |
3 | * mm/debug.c | |
4 | * | |
5 | * mm/ specific debug routines. | |
6 | * | |
7 | */ | |
8 | ||
82742a3a SL |
9 | #include <linux/kernel.h> |
10 | #include <linux/mm.h> | |
af658dca | 11 | #include <linux/trace_events.h> |
82742a3a | 12 | #include <linux/memcontrol.h> |
420adbe9 | 13 | #include <trace/events/mmflags.h> |
7cd12b4a | 14 | #include <linux/migrate.h> |
4e462112 | 15 | #include <linux/page_owner.h> |
f682a97a | 16 | #include <linux/ctype.h> |
82742a3a | 17 | |
edf14cdb | 18 | #include "internal.h" |
8eb42bea JH |
19 | #include <trace/events/migrate.h> |
20 | ||
21 | /* | |
22 | * Define EM() and EMe() so that MIGRATE_REASON from trace/events/migrate.h can | |
23 | * be used to populate migrate_reason_names[]. | |
24 | */ | |
25 | #undef EM | |
26 | #undef EMe | |
27 | #define EM(a, b) b, | |
28 | #define EMe(a, b) b | |
edf14cdb | 29 | |
9a2f45ff | 30 | const char *migrate_reason_names[MR_TYPES] = { |
8eb42bea | 31 | MIGRATE_REASON |
7cd12b4a VB |
32 | }; |
33 | ||
edf14cdb VB |
34 | const struct trace_print_flags pageflag_names[] = { |
35 | __def_pageflag_names, | |
36 | {0, NULL} | |
37 | }; | |
38 | ||
39 | const struct trace_print_flags gfpflag_names[] = { | |
40 | __def_gfpflag_names, | |
41 | {0, NULL} | |
420adbe9 VB |
42 | }; |
43 | ||
edf14cdb VB |
44 | const struct trace_print_flags vmaflag_names[] = { |
45 | __def_vmaflag_names, | |
46 | {0, NULL} | |
82742a3a SL |
47 | }; |
48 | ||
be7c701f | 49 | static void __dump_page(struct page *page) |
82742a3a | 50 | { |
74e8ee47 MWO |
51 | struct folio *folio = page_folio(page); |
52 | struct page *head = &folio->page; | |
311ade0e | 53 | struct address_space *mapping; |
6197ab98 | 54 | bool compound = PageCompound(page); |
4a55c047 QC |
55 | /* |
56 | * Accessing the pageblock without the zone lock. It could change to | |
57 | * "isolate" again in the meantime, but since we are just dumping the | |
58 | * state for debugging, it should be fine to accept a bit of | |
59 | * inaccuracy here due to racing. | |
60 | */ | |
61 | bool page_cma = is_migrate_cma_page(page); | |
fc36def9 | 62 | int mapcount; |
5b57b8f2 | 63 | char *type = ""; |
fc36def9 | 64 | |
6197ab98 | 65 | if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) { |
e1ab96f8 MWO |
66 | /* |
67 | * Corrupt page, so we cannot call page_mapping. Instead, do a | |
68 | * safe subset of the steps that page_mapping() does. Caution: | |
69 | * this will be misleading for tail pages, PageSwapCache pages, | |
70 | * and potentially other situations. (See the page_mapping() | |
71 | * implementation for what's missing here.) | |
72 | */ | |
73 | unsigned long tmp = (unsigned long)page->mapping; | |
74 | ||
75 | if (tmp & PAGE_MAPPING_ANON) | |
76 | mapping = NULL; | |
77 | else | |
78 | mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS); | |
6197ab98 | 79 | head = page; |
74e8ee47 | 80 | folio = (struct folio *)page; |
6197ab98 MWO |
81 | compound = false; |
82 | } else { | |
83 | mapping = page_mapping(page); | |
84 | } | |
311ade0e | 85 | |
9996f05e KS |
86 | /* |
87 | * Avoid VM_BUG_ON() in page_mapcount(). | |
88 | * page->_mapcount space in struct page is used by sl[aou]b pages to | |
89 | * encode own info. | |
90 | */ | |
6197ab98 MWO |
91 | mapcount = PageSlab(head) ? 0 : page_mapcount(page); |
92 | ||
54a75157 | 93 | pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n", |
452b557c | 94 | page, page_ref_count(head), mapcount, mapping, |
54a75157 | 95 | page_to_pgoff(page), page_to_pfn(page)); |
452b557c | 96 | if (compound) { |
91ec7f28 | 97 | pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n", |
5232c63f | 98 | head, compound_order(head), |
91ec7f28 | 99 | folio_entire_mapcount(folio), |
eec20426 | 100 | folio_nr_pages_mapped(folio), |
94688e8e | 101 | atomic_read(&folio->_pincount)); |
452b557c | 102 | } |
91f5345a MWO |
103 | |
104 | #ifdef CONFIG_MEMCG | |
105 | if (head->memcg_data) | |
106 | pr_warn("memcg:%lx\n", head->memcg_data); | |
107 | #endif | |
6855ac4a | 108 | if (PageKsm(page)) |
5b57b8f2 | 109 | type = "ksm "; |
6855ac4a | 110 | else if (PageAnon(page)) |
5b57b8f2 | 111 | type = "anon "; |
3e9d80a8 MWO |
112 | else if (mapping) |
113 | dump_mapping(mapping); | |
edf14cdb | 114 | BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1); |
ff8e8116 | 115 | |
23efd080 | 116 | pr_warn("%sflags: %pGp%s\n", type, &head->flags, |
4a55c047 | 117 | page_cma ? " CMA" : ""); |
e0392cf7 | 118 | print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32, |
46e8a3a0 VB |
119 | sizeof(unsigned long), page, |
120 | sizeof(struct page), false); | |
6197ab98 MWO |
121 | if (head != page) |
122 | print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32, | |
123 | sizeof(unsigned long), head, | |
124 | sizeof(struct page), false); | |
82742a3a SL |
125 | } |
126 | ||
127 | void dump_page(struct page *page, const char *reason) | |
128 | { | |
be7c701f MWO |
129 | if (PagePoisoned(page)) |
130 | pr_warn("page:%p is uninitialized and poisoned", page); | |
131 | else | |
132 | __dump_page(page); | |
133 | if (reason) | |
134 | pr_warn("page dumped because: %s\n", reason); | |
4e462112 | 135 | dump_page_owner(page); |
82742a3a SL |
136 | } |
137 | EXPORT_SYMBOL(dump_page); | |
138 | ||
139 | #ifdef CONFIG_DEBUG_VM | |
140 | ||
82742a3a SL |
141 | void dump_vma(const struct vm_area_struct *vma) |
142 | { | |
763ecb03 | 143 | pr_emerg("vma %px start %px end %px mm %px\n" |
152a2d19 MW |
144 | "prot %lx anon_vma %px vm_ops %px\n" |
145 | "pgoff %lx file %px private_data %px\n" | |
b8eceeb9 | 146 | "flags: %#lx(%pGv)\n", |
763ecb03 | 147 | vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, |
82742a3a SL |
148 | (unsigned long)pgprot_val(vma->vm_page_prot), |
149 | vma->anon_vma, vma->vm_ops, vma->vm_pgoff, | |
b8eceeb9 VB |
150 | vma->vm_file, vma->vm_private_data, |
151 | vma->vm_flags, &vma->vm_flags); | |
82742a3a SL |
152 | } |
153 | EXPORT_SYMBOL(dump_vma); | |
154 | ||
31c9afa6 SL |
155 | void dump_mm(const struct mm_struct *mm) |
156 | { | |
763ecb03 | 157 | pr_emerg("mm %px task_size %lu\n" |
31c9afa6 | 158 | #ifdef CONFIG_MMU |
152a2d19 | 159 | "get_unmapped_area %px\n" |
31c9afa6 | 160 | #endif |
763ecb03 | 161 | "mmap_base %lu mmap_legacy_base %lu\n" |
152a2d19 | 162 | "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" |
31c9afa6 | 163 | "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" |
70f8a3ca | 164 | "pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n" |
31c9afa6 SL |
165 | "start_code %lx end_code %lx start_data %lx end_data %lx\n" |
166 | "start_brk %lx brk %lx start_stack %lx\n" | |
167 | "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" | |
0258b5fd | 168 | "binfmt %px flags %lx\n" |
31c9afa6 | 169 | #ifdef CONFIG_AIO |
152a2d19 | 170 | "ioctx_table %px\n" |
31c9afa6 SL |
171 | #endif |
172 | #ifdef CONFIG_MEMCG | |
152a2d19 | 173 | "owner %px " |
31c9afa6 | 174 | #endif |
152a2d19 | 175 | "exe_file %px\n" |
31c9afa6 | 176 | #ifdef CONFIG_MMU_NOTIFIER |
984cfe4e | 177 | "notifier_subscriptions %px\n" |
31c9afa6 SL |
178 | #endif |
179 | #ifdef CONFIG_NUMA_BALANCING | |
180 | "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" | |
181 | #endif | |
31c9afa6 | 182 | "tlb_flush_pending %d\n" |
b8eceeb9 | 183 | "def_flags: %#lx(%pGv)\n", |
31c9afa6 | 184 | |
763ecb03 | 185 | mm, mm->task_size, |
31c9afa6 SL |
186 | #ifdef CONFIG_MMU |
187 | mm->get_unmapped_area, | |
188 | #endif | |
763ecb03 | 189 | mm->mmap_base, mm->mmap_legacy_base, |
31c9afa6 SL |
190 | mm->pgd, atomic_read(&mm->mm_users), |
191 | atomic_read(&mm->mm_count), | |
af5b0f6a | 192 | mm_pgtables_bytes(mm), |
31c9afa6 SL |
193 | mm->map_count, |
194 | mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, | |
44dc1b1f | 195 | (u64)atomic64_read(&mm->pinned_vm), |
70f8a3ca | 196 | mm->data_vm, mm->exec_vm, mm->stack_vm, |
31c9afa6 SL |
197 | mm->start_code, mm->end_code, mm->start_data, mm->end_data, |
198 | mm->start_brk, mm->brk, mm->start_stack, | |
199 | mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, | |
0258b5fd | 200 | mm->binfmt, mm->flags, |
31c9afa6 SL |
201 | #ifdef CONFIG_AIO |
202 | mm->ioctx_table, | |
203 | #endif | |
204 | #ifdef CONFIG_MEMCG | |
205 | mm->owner, | |
206 | #endif | |
207 | mm->exe_file, | |
208 | #ifdef CONFIG_MMU_NOTIFIER | |
984cfe4e | 209 | mm->notifier_subscriptions, |
31c9afa6 SL |
210 | #endif |
211 | #ifdef CONFIG_NUMA_BALANCING | |
212 | mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, | |
213 | #endif | |
16af97dc | 214 | atomic_read(&mm->tlb_flush_pending), |
b8eceeb9 VB |
215 | mm->def_flags, &mm->def_flags |
216 | ); | |
31c9afa6 SL |
217 | } |
218 | ||
f682a97a AD |
219 | static bool page_init_poisoning __read_mostly = true; |
220 | ||
221 | static int __init setup_vm_debug(char *str) | |
222 | { | |
223 | bool __page_init_poisoning = true; | |
224 | ||
225 | /* | |
226 | * Calling vm_debug with no arguments is equivalent to requesting | |
227 | * to enable all debugging options we can control. | |
228 | */ | |
229 | if (*str++ != '=' || !*str) | |
230 | goto out; | |
231 | ||
232 | __page_init_poisoning = false; | |
233 | if (*str == '-') | |
234 | goto out; | |
235 | ||
236 | while (*str) { | |
237 | switch (tolower(*str)) { | |
238 | case'p': | |
239 | __page_init_poisoning = true; | |
240 | break; | |
241 | default: | |
242 | pr_err("vm_debug option '%c' unknown. skipped\n", | |
243 | *str); | |
244 | } | |
245 | ||
246 | str++; | |
247 | } | |
248 | out: | |
249 | if (page_init_poisoning && !__page_init_poisoning) | |
250 | pr_warn("Page struct poisoning disabled by kernel command line option 'vm_debug'\n"); | |
251 | ||
252 | page_init_poisoning = __page_init_poisoning; | |
253 | ||
254 | return 1; | |
255 | } | |
256 | __setup("vm_debug", setup_vm_debug); | |
257 | ||
258 | void page_init_poison(struct page *page, size_t size) | |
259 | { | |
260 | if (page_init_poisoning) | |
261 | memset(page, PAGE_POISON_PATTERN, size); | |
262 | } | |
82742a3a | 263 | #endif /* CONFIG_DEBUG_VM */ |