]>
Commit | Line | Data |
---|---|---|
48c96a36 JK |
1 | #include <linux/debugfs.h> |
2 | #include <linux/mm.h> | |
3 | #include <linux/slab.h> | |
4 | #include <linux/uaccess.h> | |
5 | #include <linux/bootmem.h> | |
6 | #include <linux/stacktrace.h> | |
7 | #include <linux/page_owner.h> | |
8 | #include "internal.h" | |
9 | ||
10 | static bool page_owner_disabled = true; | |
11 | bool page_owner_inited __read_mostly; | |
12 | ||
61cf5feb JK |
13 | static void init_early_allocated_pages(void); |
14 | ||
48c96a36 JK |
15 | static int early_page_owner_param(char *buf) |
16 | { | |
17 | if (!buf) | |
18 | return -EINVAL; | |
19 | ||
20 | if (strcmp(buf, "on") == 0) | |
21 | page_owner_disabled = false; | |
22 | ||
23 | return 0; | |
24 | } | |
25 | early_param("page_owner", early_page_owner_param); | |
26 | ||
27 | static bool need_page_owner(void) | |
28 | { | |
29 | if (page_owner_disabled) | |
30 | return false; | |
31 | ||
32 | return true; | |
33 | } | |
34 | ||
35 | static void init_page_owner(void) | |
36 | { | |
37 | if (page_owner_disabled) | |
38 | return; | |
39 | ||
40 | page_owner_inited = true; | |
61cf5feb | 41 | init_early_allocated_pages(); |
48c96a36 JK |
42 | } |
43 | ||
44 | struct page_ext_operations page_owner_ops = { | |
45 | .need = need_page_owner, | |
46 | .init = init_page_owner, | |
47 | }; | |
48 | ||
49 | void __reset_page_owner(struct page *page, unsigned int order) | |
50 | { | |
51 | int i; | |
52 | struct page_ext *page_ext; | |
53 | ||
54 | for (i = 0; i < (1 << order); i++) { | |
55 | page_ext = lookup_page_ext(page + i); | |
56 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); | |
57 | } | |
58 | } | |
59 | ||
60 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |
61 | { | |
94f759d6 SR |
62 | struct page_ext *page_ext = lookup_page_ext(page); |
63 | struct stack_trace trace = { | |
64 | .nr_entries = 0, | |
65 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), | |
66 | .entries = &page_ext->trace_entries[0], | |
67 | .skip = 3, | |
68 | }; | |
48c96a36 | 69 | |
94f759d6 | 70 | save_stack_trace(&trace); |
48c96a36 JK |
71 | |
72 | page_ext->order = order; | |
73 | page_ext->gfp_mask = gfp_mask; | |
94f759d6 | 74 | page_ext->nr_entries = trace.nr_entries; |
48c96a36 JK |
75 | |
76 | __set_bit(PAGE_EXT_OWNER, &page_ext->flags); | |
77 | } | |
78 | ||
e2cfc911 JK |
79 | gfp_t __get_page_owner_gfp(struct page *page) |
80 | { | |
81 | struct page_ext *page_ext = lookup_page_ext(page); | |
82 | ||
83 | return page_ext->gfp_mask; | |
84 | } | |
85 | ||
48c96a36 JK |
86 | static ssize_t |
87 | print_page_owner(char __user *buf, size_t count, unsigned long pfn, | |
88 | struct page *page, struct page_ext *page_ext) | |
89 | { | |
90 | int ret; | |
91 | int pageblock_mt, page_mt; | |
92 | char *kbuf; | |
94f759d6 SR |
93 | struct stack_trace trace = { |
94 | .nr_entries = page_ext->nr_entries, | |
95 | .entries = &page_ext->trace_entries[0], | |
96 | }; | |
48c96a36 JK |
97 | |
98 | kbuf = kmalloc(count, GFP_KERNEL); | |
99 | if (!kbuf) | |
100 | return -ENOMEM; | |
101 | ||
102 | ret = snprintf(kbuf, count, | |
103 | "Page allocated via order %u, mask 0x%x\n", | |
104 | page_ext->order, page_ext->gfp_mask); | |
105 | ||
106 | if (ret >= count) | |
107 | goto err; | |
108 | ||
109 | /* Print information relevant to grouping pages by mobility */ | |
110 | pageblock_mt = get_pfnblock_migratetype(page, pfn); | |
111 | page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); | |
112 | ret += snprintf(kbuf + ret, count - ret, | |
113 | "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n", | |
114 | pfn, | |
115 | pfn >> pageblock_order, | |
116 | pageblock_mt, | |
117 | pageblock_mt != page_mt ? "Fallback" : " ", | |
118 | PageLocked(page) ? "K" : " ", | |
119 | PageError(page) ? "E" : " ", | |
120 | PageReferenced(page) ? "R" : " ", | |
121 | PageUptodate(page) ? "U" : " ", | |
122 | PageDirty(page) ? "D" : " ", | |
123 | PageLRU(page) ? "L" : " ", | |
124 | PageActive(page) ? "A" : " ", | |
125 | PageSlab(page) ? "S" : " ", | |
126 | PageWriteback(page) ? "W" : " ", | |
127 | PageCompound(page) ? "C" : " ", | |
128 | PageSwapCache(page) ? "B" : " ", | |
129 | PageMappedToDisk(page) ? "M" : " "); | |
130 | ||
131 | if (ret >= count) | |
132 | goto err; | |
133 | ||
94f759d6 | 134 | ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); |
48c96a36 JK |
135 | if (ret >= count) |
136 | goto err; | |
137 | ||
138 | ret += snprintf(kbuf + ret, count - ret, "\n"); | |
139 | if (ret >= count) | |
140 | goto err; | |
141 | ||
142 | if (copy_to_user(buf, kbuf, ret)) | |
143 | ret = -EFAULT; | |
144 | ||
145 | kfree(kbuf); | |
146 | return ret; | |
147 | ||
148 | err: | |
149 | kfree(kbuf); | |
150 | return -ENOMEM; | |
151 | } | |
152 | ||
153 | static ssize_t | |
154 | read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |
155 | { | |
156 | unsigned long pfn; | |
157 | struct page *page; | |
158 | struct page_ext *page_ext; | |
159 | ||
160 | if (!page_owner_inited) | |
161 | return -EINVAL; | |
162 | ||
163 | page = NULL; | |
164 | pfn = min_low_pfn + *ppos; | |
165 | ||
166 | /* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */ | |
167 | while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) | |
168 | pfn++; | |
169 | ||
170 | drain_all_pages(NULL); | |
171 | ||
172 | /* Find an allocated page */ | |
173 | for (; pfn < max_pfn; pfn++) { | |
174 | /* | |
175 | * If the new page is in a new MAX_ORDER_NR_PAGES area, | |
176 | * validate the area as existing, skip it if not | |
177 | */ | |
178 | if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) { | |
179 | pfn += MAX_ORDER_NR_PAGES - 1; | |
180 | continue; | |
181 | } | |
182 | ||
183 | /* Check for holes within a MAX_ORDER area */ | |
184 | if (!pfn_valid_within(pfn)) | |
185 | continue; | |
186 | ||
187 | page = pfn_to_page(pfn); | |
188 | if (PageBuddy(page)) { | |
189 | unsigned long freepage_order = page_order_unsafe(page); | |
190 | ||
191 | if (freepage_order < MAX_ORDER) | |
192 | pfn += (1UL << freepage_order) - 1; | |
193 | continue; | |
194 | } | |
195 | ||
196 | page_ext = lookup_page_ext(page); | |
197 | ||
198 | /* | |
61cf5feb JK |
199 | * Some pages could be missed by concurrent allocation or free, |
200 | * because we don't hold the zone lock. | |
48c96a36 JK |
201 | */ |
202 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
203 | continue; | |
204 | ||
205 | /* Record the next PFN to read in the file offset */ | |
206 | *ppos = (pfn - min_low_pfn) + 1; | |
207 | ||
208 | return print_page_owner(buf, count, pfn, page, page_ext); | |
209 | } | |
210 | ||
211 | return 0; | |
212 | } | |
213 | ||
61cf5feb JK |
214 | static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) |
215 | { | |
216 | struct page *page; | |
217 | struct page_ext *page_ext; | |
218 | unsigned long pfn = zone->zone_start_pfn, block_end_pfn; | |
219 | unsigned long end_pfn = pfn + zone->spanned_pages; | |
220 | unsigned long count = 0; | |
221 | ||
222 | /* Scan block by block. First and last block may be incomplete */ | |
223 | pfn = zone->zone_start_pfn; | |
224 | ||
225 | /* | |
226 | * Walk the zone in pageblock_nr_pages steps. If a page block spans | |
227 | * a zone boundary, it will be double counted between zones. This does | |
228 | * not matter as the mixed block count will still be correct | |
229 | */ | |
230 | for (; pfn < end_pfn; ) { | |
231 | if (!pfn_valid(pfn)) { | |
232 | pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); | |
233 | continue; | |
234 | } | |
235 | ||
236 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | |
237 | block_end_pfn = min(block_end_pfn, end_pfn); | |
238 | ||
239 | page = pfn_to_page(pfn); | |
240 | ||
241 | for (; pfn < block_end_pfn; pfn++) { | |
242 | if (!pfn_valid_within(pfn)) | |
243 | continue; | |
244 | ||
245 | page = pfn_to_page(pfn); | |
246 | ||
247 | /* | |
248 | * We are safe to check buddy flag and order, because | |
249 | * this is init stage and only single thread runs. | |
250 | */ | |
251 | if (PageBuddy(page)) { | |
252 | pfn += (1UL << page_order(page)) - 1; | |
253 | continue; | |
254 | } | |
255 | ||
256 | if (PageReserved(page)) | |
257 | continue; | |
258 | ||
259 | page_ext = lookup_page_ext(page); | |
260 | ||
261 | /* Maybe overraping zone */ | |
262 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | |
263 | continue; | |
264 | ||
265 | /* Found early allocated page */ | |
266 | set_page_owner(page, 0, 0); | |
267 | count++; | |
268 | } | |
269 | } | |
270 | ||
271 | pr_info("Node %d, zone %8s: page owner found early allocated %lu pages\n", | |
272 | pgdat->node_id, zone->name, count); | |
273 | } | |
274 | ||
275 | static void init_zones_in_node(pg_data_t *pgdat) | |
276 | { | |
277 | struct zone *zone; | |
278 | struct zone *node_zones = pgdat->node_zones; | |
279 | unsigned long flags; | |
280 | ||
281 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | |
282 | if (!populated_zone(zone)) | |
283 | continue; | |
284 | ||
285 | spin_lock_irqsave(&zone->lock, flags); | |
286 | init_pages_in_zone(pgdat, zone); | |
287 | spin_unlock_irqrestore(&zone->lock, flags); | |
288 | } | |
289 | } | |
290 | ||
291 | static void init_early_allocated_pages(void) | |
292 | { | |
293 | pg_data_t *pgdat; | |
294 | ||
295 | drain_all_pages(NULL); | |
296 | for_each_online_pgdat(pgdat) | |
297 | init_zones_in_node(pgdat); | |
298 | } | |
299 | ||
48c96a36 JK |
300 | static const struct file_operations proc_page_owner_operations = { |
301 | .read = read_page_owner, | |
302 | }; | |
303 | ||
304 | static int __init pageowner_init(void) | |
305 | { | |
306 | struct dentry *dentry; | |
307 | ||
308 | if (!page_owner_inited) { | |
309 | pr_info("page_owner is disabled\n"); | |
310 | return 0; | |
311 | } | |
312 | ||
313 | dentry = debugfs_create_file("page_owner", S_IRUSR, NULL, | |
314 | NULL, &proc_page_owner_operations); | |
315 | if (IS_ERR(dentry)) | |
316 | return PTR_ERR(dentry); | |
317 | ||
318 | return 0; | |
319 | } | |
44c5af96 | 320 | late_initcall(pageowner_init) |