]>
Commit | Line | Data |
---|---|---|
f6ac2354 CL |
1 | /* |
2 | * linux/mm/vmstat.c | |
3 | * | |
4 | * Manages VM statistics | |
5 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
2244b95a CL |
6 | * |
7 | * zoned VM statistics | |
8 | * Copyright (C) 2006 Silicon Graphics, Inc., | |
9 | * Christoph Lameter <[email protected]> | |
f6ac2354 CL |
10 | */ |
11 | ||
f6ac2354 | 12 | #include <linux/mm.h> |
2244b95a | 13 | #include <linux/module.h> |
df9ecaba | 14 | #include <linux/cpu.h> |
f6ac2354 | 15 | |
f6ac2354 CL |
16 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, |
17 | unsigned long *free, struct pglist_data *pgdat) | |
18 | { | |
19 | struct zone *zones = pgdat->node_zones; | |
20 | int i; | |
21 | ||
22 | *active = 0; | |
23 | *inactive = 0; | |
24 | *free = 0; | |
25 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
26 | *active += zones[i].nr_active; | |
27 | *inactive += zones[i].nr_inactive; | |
28 | *free += zones[i].free_pages; | |
29 | } | |
30 | } | |
31 | ||
32 | void get_zone_counts(unsigned long *active, | |
33 | unsigned long *inactive, unsigned long *free) | |
34 | { | |
35 | struct pglist_data *pgdat; | |
36 | ||
37 | *active = 0; | |
38 | *inactive = 0; | |
39 | *free = 0; | |
40 | for_each_online_pgdat(pgdat) { | |
41 | unsigned long l, m, n; | |
42 | __get_zone_counts(&l, &m, &n, pgdat); | |
43 | *active += l; | |
44 | *inactive += m; | |
45 | *free += n; | |
46 | } | |
47 | } | |
48 | ||
f8891e5e CL |
49 | #ifdef CONFIG_VM_EVENT_COUNTERS |
50 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | |
51 | EXPORT_PER_CPU_SYMBOL(vm_event_states); | |
52 | ||
53 | static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) | |
54 | { | |
55 | int cpu = 0; | |
56 | int i; | |
57 | ||
58 | memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); | |
59 | ||
60 | cpu = first_cpu(*cpumask); | |
61 | while (cpu < NR_CPUS) { | |
62 | struct vm_event_state *this = &per_cpu(vm_event_states, cpu); | |
63 | ||
64 | cpu = next_cpu(cpu, *cpumask); | |
65 | ||
66 | if (cpu < NR_CPUS) | |
67 | prefetch(&per_cpu(vm_event_states, cpu)); | |
68 | ||
69 | ||
70 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) | |
71 | ret[i] += this->event[i]; | |
72 | } | |
73 | } | |
74 | ||
75 | /* | |
76 | * Accumulate the vm event counters across all CPUs. | |
77 | * The result is unavoidably approximate - it can change | |
78 | * during and after execution of this function. | |
79 | */ | |
80 | void all_vm_events(unsigned long *ret) | |
81 | { | |
82 | sum_vm_events(ret, &cpu_online_map); | |
83 | } | |
32dd66fc | 84 | EXPORT_SYMBOL_GPL(all_vm_events); |
f8891e5e CL |
85 | |
86 | #ifdef CONFIG_HOTPLUG | |
87 | /* | |
88 | * Fold the foreign cpu events into our own. | |
89 | * | |
90 | * This is adding to the events on one processor | |
91 | * but keeps the global counts constant. | |
92 | */ | |
93 | void vm_events_fold_cpu(int cpu) | |
94 | { | |
95 | struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); | |
96 | int i; | |
97 | ||
98 | for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { | |
99 | count_vm_events(i, fold_state->event[i]); | |
100 | fold_state->event[i] = 0; | |
101 | } | |
102 | } | |
103 | #endif /* CONFIG_HOTPLUG */ | |
104 | ||
105 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
106 | ||
2244b95a CL |
107 | /* |
108 | * Manage combined zone based / global counters | |
109 | * | |
110 | * vm_stat contains the global counters | |
111 | */ | |
112 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
113 | EXPORT_SYMBOL(vm_stat); | |
114 | ||
115 | #ifdef CONFIG_SMP | |
116 | ||
df9ecaba CL |
117 | static int calculate_threshold(struct zone *zone) |
118 | { | |
119 | int threshold; | |
120 | int mem; /* memory in 128 MB units */ | |
121 | ||
122 | /* | |
123 | * The threshold scales with the number of processors and the amount | |
124 | * of memory per zone. More memory means that we can defer updates for | |
125 | * longer, more processors could lead to more contention. | |
126 | * fls() is used to have a cheap way of logarithmic scaling. | |
127 | * | |
128 | * Some sample thresholds: | |
129 | * | |
130 | * Threshold Processors (fls) Zonesize fls(mem+1) | |
131 | * ------------------------------------------------------------------ | |
132 | * 8 1 1 0.9-1 GB 4 | |
133 | * 16 2 2 0.9-1 GB 4 | |
134 | * 20 2 2 1-2 GB 5 | |
135 | * 24 2 2 2-4 GB 6 | |
136 | * 28 2 2 4-8 GB 7 | |
137 | * 32 2 2 8-16 GB 8 | |
138 | * 4 2 2 <128M 1 | |
139 | * 30 4 3 2-4 GB 5 | |
140 | * 48 4 3 8-16 GB 8 | |
141 | * 32 8 4 1-2 GB 4 | |
142 | * 32 8 4 0.9-1GB 4 | |
143 | * 10 16 5 <128M 1 | |
144 | * 40 16 5 900M 4 | |
145 | * 70 64 7 2-4 GB 5 | |
146 | * 84 64 7 4-8 GB 6 | |
147 | * 108 512 9 4-8 GB 6 | |
148 | * 125 1024 10 8-16 GB 8 | |
149 | * 125 1024 10 16-32 GB 9 | |
150 | */ | |
151 | ||
152 | mem = zone->present_pages >> (27 - PAGE_SHIFT); | |
153 | ||
154 | threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); | |
155 | ||
156 | /* | |
157 | * Maximum threshold is 125 | |
158 | */ | |
159 | threshold = min(125, threshold); | |
160 | ||
161 | return threshold; | |
162 | } | |
2244b95a CL |
163 | |
164 | /* | |
df9ecaba | 165 | * Refresh the thresholds for each zone. |
2244b95a | 166 | */ |
df9ecaba | 167 | static void refresh_zone_stat_thresholds(void) |
2244b95a | 168 | { |
df9ecaba CL |
169 | struct zone *zone; |
170 | int cpu; | |
171 | int threshold; | |
172 | ||
173 | for_each_zone(zone) { | |
174 | ||
175 | if (!zone->present_pages) | |
176 | continue; | |
177 | ||
178 | threshold = calculate_threshold(zone); | |
179 | ||
180 | for_each_online_cpu(cpu) | |
181 | zone_pcp(zone, cpu)->stat_threshold = threshold; | |
182 | } | |
2244b95a CL |
183 | } |
184 | ||
185 | /* | |
186 | * For use when we know that interrupts are disabled. | |
187 | */ | |
188 | void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | |
189 | int delta) | |
190 | { | |
df9ecaba CL |
191 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
192 | s8 *p = pcp->vm_stat_diff + item; | |
2244b95a CL |
193 | long x; |
194 | ||
2244b95a CL |
195 | x = delta + *p; |
196 | ||
df9ecaba | 197 | if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { |
2244b95a CL |
198 | zone_page_state_add(x, zone, item); |
199 | x = 0; | |
200 | } | |
2244b95a CL |
201 | *p = x; |
202 | } | |
203 | EXPORT_SYMBOL(__mod_zone_page_state); | |
204 | ||
205 | /* | |
206 | * For an unknown interrupt state | |
207 | */ | |
208 | void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, | |
209 | int delta) | |
210 | { | |
211 | unsigned long flags; | |
212 | ||
213 | local_irq_save(flags); | |
214 | __mod_zone_page_state(zone, item, delta); | |
215 | local_irq_restore(flags); | |
216 | } | |
217 | EXPORT_SYMBOL(mod_zone_page_state); | |
218 | ||
219 | /* | |
220 | * Optimized increment and decrement functions. | |
221 | * | |
222 | * These are only for a single page and therefore can take a struct page * | |
223 | * argument instead of struct zone *. This allows the inclusion of the code | |
224 | * generated for page_zone(page) into the optimized functions. | |
225 | * | |
226 | * No overflow check is necessary and therefore the differential can be | |
227 | * incremented or decremented in place which may allow the compilers to | |
228 | * generate better code. | |
2244b95a CL |
229 | * The increment or decrement is known and therefore one boundary check can |
230 | * be omitted. | |
231 | * | |
df9ecaba CL |
232 | * NOTE: These functions are very performance sensitive. Change only |
233 | * with care. | |
234 | * | |
2244b95a CL |
235 | * Some processors have inc/dec instructions that are atomic vs an interrupt. |
236 | * However, the code must first determine the differential location in a zone | |
237 | * based on the processor number and then inc/dec the counter. There is no | |
238 | * guarantee without disabling preemption that the processor will not change | |
239 | * in between and therefore the atomicity vs. interrupt cannot be exploited | |
240 | * in a useful way here. | |
241 | */ | |
ca889e6c | 242 | static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
2244b95a | 243 | { |
df9ecaba CL |
244 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
245 | s8 *p = pcp->vm_stat_diff + item; | |
2244b95a CL |
246 | |
247 | (*p)++; | |
248 | ||
df9ecaba CL |
249 | if (unlikely(*p > pcp->stat_threshold)) { |
250 | int overstep = pcp->stat_threshold / 2; | |
251 | ||
252 | zone_page_state_add(*p + overstep, zone, item); | |
253 | *p = -overstep; | |
2244b95a CL |
254 | } |
255 | } | |
ca889e6c CL |
256 | |
257 | void __inc_zone_page_state(struct page *page, enum zone_stat_item item) | |
258 | { | |
259 | __inc_zone_state(page_zone(page), item); | |
260 | } | |
2244b95a CL |
261 | EXPORT_SYMBOL(__inc_zone_page_state); |
262 | ||
263 | void __dec_zone_page_state(struct page *page, enum zone_stat_item item) | |
264 | { | |
265 | struct zone *zone = page_zone(page); | |
df9ecaba CL |
266 | struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); |
267 | s8 *p = pcp->vm_stat_diff + item; | |
2244b95a CL |
268 | |
269 | (*p)--; | |
270 | ||
df9ecaba CL |
271 | if (unlikely(*p < - pcp->stat_threshold)) { |
272 | int overstep = pcp->stat_threshold / 2; | |
273 | ||
274 | zone_page_state_add(*p - overstep, zone, item); | |
275 | *p = overstep; | |
2244b95a CL |
276 | } |
277 | } | |
278 | EXPORT_SYMBOL(__dec_zone_page_state); | |
279 | ||
ca889e6c CL |
280 | void inc_zone_state(struct zone *zone, enum zone_stat_item item) |
281 | { | |
282 | unsigned long flags; | |
283 | ||
284 | local_irq_save(flags); | |
285 | __inc_zone_state(zone, item); | |
286 | local_irq_restore(flags); | |
287 | } | |
288 | ||
2244b95a CL |
289 | void inc_zone_page_state(struct page *page, enum zone_stat_item item) |
290 | { | |
291 | unsigned long flags; | |
292 | struct zone *zone; | |
2244b95a CL |
293 | |
294 | zone = page_zone(page); | |
295 | local_irq_save(flags); | |
ca889e6c | 296 | __inc_zone_state(zone, item); |
2244b95a CL |
297 | local_irq_restore(flags); |
298 | } | |
299 | EXPORT_SYMBOL(inc_zone_page_state); | |
300 | ||
301 | void dec_zone_page_state(struct page *page, enum zone_stat_item item) | |
302 | { | |
303 | unsigned long flags; | |
2244b95a | 304 | |
2244b95a | 305 | local_irq_save(flags); |
a302eb4e | 306 | __dec_zone_page_state(page, item); |
2244b95a CL |
307 | local_irq_restore(flags); |
308 | } | |
309 | EXPORT_SYMBOL(dec_zone_page_state); | |
310 | ||
311 | /* | |
312 | * Update the zone counters for one cpu. | |
313 | */ | |
314 | void refresh_cpu_vm_stats(int cpu) | |
315 | { | |
316 | struct zone *zone; | |
317 | int i; | |
318 | unsigned long flags; | |
319 | ||
320 | for_each_zone(zone) { | |
321 | struct per_cpu_pageset *pcp; | |
322 | ||
39bbcb8f CL |
323 | if (!populated_zone(zone)) |
324 | continue; | |
325 | ||
2244b95a CL |
326 | pcp = zone_pcp(zone, cpu); |
327 | ||
328 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | |
329 | if (pcp->vm_stat_diff[i]) { | |
330 | local_irq_save(flags); | |
331 | zone_page_state_add(pcp->vm_stat_diff[i], | |
332 | zone, i); | |
333 | pcp->vm_stat_diff[i] = 0; | |
334 | local_irq_restore(flags); | |
335 | } | |
336 | } | |
337 | } | |
338 | ||
339 | static void __refresh_cpu_vm_stats(void *dummy) | |
340 | { | |
341 | refresh_cpu_vm_stats(smp_processor_id()); | |
342 | } | |
343 | ||
344 | /* | |
345 | * Consolidate all counters. | |
346 | * | |
347 | * Note that the result is less inaccurate but still inaccurate | |
348 | * if concurrent processes are allowed to run. | |
349 | */ | |
350 | void refresh_vm_stats(void) | |
351 | { | |
352 | on_each_cpu(__refresh_cpu_vm_stats, NULL, 0, 1); | |
353 | } | |
354 | EXPORT_SYMBOL(refresh_vm_stats); | |
355 | ||
356 | #endif | |
357 | ||
ca889e6c CL |
358 | #ifdef CONFIG_NUMA |
359 | /* | |
360 | * zonelist = the list of zones passed to the allocator | |
361 | * z = the zone from which the allocation occurred. | |
362 | * | |
363 | * Must be called with interrupts disabled. | |
364 | */ | |
365 | void zone_statistics(struct zonelist *zonelist, struct zone *z) | |
366 | { | |
367 | if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) { | |
368 | __inc_zone_state(z, NUMA_HIT); | |
369 | } else { | |
370 | __inc_zone_state(z, NUMA_MISS); | |
371 | __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN); | |
372 | } | |
5d292343 | 373 | if (z->node == numa_node_id()) |
ca889e6c CL |
374 | __inc_zone_state(z, NUMA_LOCAL); |
375 | else | |
376 | __inc_zone_state(z, NUMA_OTHER); | |
377 | } | |
378 | #endif | |
379 | ||
f6ac2354 CL |
380 | #ifdef CONFIG_PROC_FS |
381 | ||
382 | #include <linux/seq_file.h> | |
383 | ||
384 | static void *frag_start(struct seq_file *m, loff_t *pos) | |
385 | { | |
386 | pg_data_t *pgdat; | |
387 | loff_t node = *pos; | |
388 | for (pgdat = first_online_pgdat(); | |
389 | pgdat && node; | |
390 | pgdat = next_online_pgdat(pgdat)) | |
391 | --node; | |
392 | ||
393 | return pgdat; | |
394 | } | |
395 | ||
396 | static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) | |
397 | { | |
398 | pg_data_t *pgdat = (pg_data_t *)arg; | |
399 | ||
400 | (*pos)++; | |
401 | return next_online_pgdat(pgdat); | |
402 | } | |
403 | ||
404 | static void frag_stop(struct seq_file *m, void *arg) | |
405 | { | |
406 | } | |
407 | ||
408 | /* | |
409 | * This walks the free areas for each zone. | |
410 | */ | |
411 | static int frag_show(struct seq_file *m, void *arg) | |
412 | { | |
413 | pg_data_t *pgdat = (pg_data_t *)arg; | |
414 | struct zone *zone; | |
415 | struct zone *node_zones = pgdat->node_zones; | |
416 | unsigned long flags; | |
417 | int order; | |
418 | ||
419 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { | |
420 | if (!populated_zone(zone)) | |
421 | continue; | |
422 | ||
423 | spin_lock_irqsave(&zone->lock, flags); | |
424 | seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); | |
425 | for (order = 0; order < MAX_ORDER; ++order) | |
426 | seq_printf(m, "%6lu ", zone->free_area[order].nr_free); | |
427 | spin_unlock_irqrestore(&zone->lock, flags); | |
428 | seq_putc(m, '\n'); | |
429 | } | |
430 | return 0; | |
431 | } | |
432 | ||
433 | struct seq_operations fragmentation_op = { | |
434 | .start = frag_start, | |
435 | .next = frag_next, | |
436 | .stop = frag_stop, | |
437 | .show = frag_show, | |
438 | }; | |
439 | ||
27bf71c2 CL |
440 | #ifdef CONFIG_ZONE_DMA32 |
441 | #define TEXT_FOR_DMA32(xx) xx "_dma32", | |
442 | #else | |
443 | #define TEXT_FOR_DMA32(xx) | |
444 | #endif | |
445 | ||
446 | #ifdef CONFIG_HIGHMEM | |
447 | #define TEXT_FOR_HIGHMEM(xx) xx "_high", | |
448 | #else | |
449 | #define TEXT_FOR_HIGHMEM(xx) | |
450 | #endif | |
451 | ||
452 | #define TEXTS_FOR_ZONES(xx) xx "_dma", TEXT_FOR_DMA32(xx) xx "_normal", \ | |
453 | TEXT_FOR_HIGHMEM(xx) | |
454 | ||
f6ac2354 | 455 | static char *vmstat_text[] = { |
2244b95a | 456 | /* Zoned VM counters */ |
f3dbd344 | 457 | "nr_anon_pages", |
65ba55f5 | 458 | "nr_mapped", |
347ce434 | 459 | "nr_file_pages", |
972d1a7b CL |
460 | "nr_slab_reclaimable", |
461 | "nr_slab_unreclaimable", | |
df849a15 | 462 | "nr_page_table_pages", |
b1e7a8fd | 463 | "nr_dirty", |
ce866b34 | 464 | "nr_writeback", |
f6ac2354 | 465 | "nr_unstable", |
d2c5e30c | 466 | "nr_bounce", |
e129b5c2 | 467 | "nr_vmscan_write", |
f6ac2354 | 468 | |
ca889e6c CL |
469 | #ifdef CONFIG_NUMA |
470 | "numa_hit", | |
471 | "numa_miss", | |
472 | "numa_foreign", | |
473 | "numa_interleave", | |
474 | "numa_local", | |
475 | "numa_other", | |
476 | #endif | |
477 | ||
f8891e5e | 478 | #ifdef CONFIG_VM_EVENT_COUNTERS |
f6ac2354 CL |
479 | "pgpgin", |
480 | "pgpgout", | |
481 | "pswpin", | |
482 | "pswpout", | |
483 | ||
27bf71c2 | 484 | TEXTS_FOR_ZONES("pgalloc") |
f6ac2354 CL |
485 | |
486 | "pgfree", | |
487 | "pgactivate", | |
488 | "pgdeactivate", | |
489 | ||
490 | "pgfault", | |
491 | "pgmajfault", | |
492 | ||
27bf71c2 CL |
493 | TEXTS_FOR_ZONES("pgrefill") |
494 | TEXTS_FOR_ZONES("pgsteal") | |
495 | TEXTS_FOR_ZONES("pgscan_kswapd") | |
496 | TEXTS_FOR_ZONES("pgscan_direct") | |
f6ac2354 CL |
497 | |
498 | "pginodesteal", | |
499 | "slabs_scanned", | |
500 | "kswapd_steal", | |
501 | "kswapd_inodesteal", | |
502 | "pageoutrun", | |
503 | "allocstall", | |
504 | ||
505 | "pgrotated", | |
f8891e5e | 506 | #endif |
f6ac2354 CL |
507 | }; |
508 | ||
509 | /* | |
510 | * Output information about zones in @pgdat. | |
511 | */ | |
512 | static int zoneinfo_show(struct seq_file *m, void *arg) | |
513 | { | |
514 | pg_data_t *pgdat = arg; | |
515 | struct zone *zone; | |
516 | struct zone *node_zones = pgdat->node_zones; | |
517 | unsigned long flags; | |
518 | ||
519 | for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { | |
520 | int i; | |
521 | ||
522 | if (!populated_zone(zone)) | |
523 | continue; | |
524 | ||
525 | spin_lock_irqsave(&zone->lock, flags); | |
526 | seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); | |
527 | seq_printf(m, | |
528 | "\n pages free %lu" | |
529 | "\n min %lu" | |
530 | "\n low %lu" | |
531 | "\n high %lu" | |
532 | "\n active %lu" | |
533 | "\n inactive %lu" | |
534 | "\n scanned %lu (a: %lu i: %lu)" | |
535 | "\n spanned %lu" | |
536 | "\n present %lu", | |
537 | zone->free_pages, | |
538 | zone->pages_min, | |
539 | zone->pages_low, | |
540 | zone->pages_high, | |
541 | zone->nr_active, | |
542 | zone->nr_inactive, | |
543 | zone->pages_scanned, | |
544 | zone->nr_scan_active, zone->nr_scan_inactive, | |
545 | zone->spanned_pages, | |
546 | zone->present_pages); | |
2244b95a CL |
547 | |
548 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | |
549 | seq_printf(m, "\n %-12s %lu", vmstat_text[i], | |
550 | zone_page_state(zone, i)); | |
551 | ||
f6ac2354 CL |
552 | seq_printf(m, |
553 | "\n protection: (%lu", | |
554 | zone->lowmem_reserve[0]); | |
555 | for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) | |
556 | seq_printf(m, ", %lu", zone->lowmem_reserve[i]); | |
557 | seq_printf(m, | |
558 | ")" | |
559 | "\n pagesets"); | |
560 | for_each_online_cpu(i) { | |
561 | struct per_cpu_pageset *pageset; | |
562 | int j; | |
563 | ||
564 | pageset = zone_pcp(zone, i); | |
565 | for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { | |
566 | if (pageset->pcp[j].count) | |
567 | break; | |
568 | } | |
569 | if (j == ARRAY_SIZE(pageset->pcp)) | |
570 | continue; | |
571 | for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { | |
572 | seq_printf(m, | |
573 | "\n cpu: %i pcp: %i" | |
574 | "\n count: %i" | |
575 | "\n high: %i" | |
576 | "\n batch: %i", | |
577 | i, j, | |
578 | pageset->pcp[j].count, | |
579 | pageset->pcp[j].high, | |
580 | pageset->pcp[j].batch); | |
581 | } | |
df9ecaba CL |
582 | #ifdef CONFIG_SMP |
583 | seq_printf(m, "\n vm stats threshold: %d", | |
584 | pageset->stat_threshold); | |
585 | #endif | |
f6ac2354 CL |
586 | } |
587 | seq_printf(m, | |
588 | "\n all_unreclaimable: %u" | |
589 | "\n prev_priority: %i" | |
f6ac2354 CL |
590 | "\n start_pfn: %lu", |
591 | zone->all_unreclaimable, | |
592 | zone->prev_priority, | |
f6ac2354 CL |
593 | zone->zone_start_pfn); |
594 | spin_unlock_irqrestore(&zone->lock, flags); | |
595 | seq_putc(m, '\n'); | |
596 | } | |
597 | return 0; | |
598 | } | |
599 | ||
600 | struct seq_operations zoneinfo_op = { | |
601 | .start = frag_start, /* iterate over all zones. The same as in | |
602 | * fragmentation. */ | |
603 | .next = frag_next, | |
604 | .stop = frag_stop, | |
605 | .show = zoneinfo_show, | |
606 | }; | |
607 | ||
608 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | |
609 | { | |
2244b95a | 610 | unsigned long *v; |
f8891e5e CL |
611 | #ifdef CONFIG_VM_EVENT_COUNTERS |
612 | unsigned long *e; | |
613 | #endif | |
2244b95a | 614 | int i; |
f6ac2354 CL |
615 | |
616 | if (*pos >= ARRAY_SIZE(vmstat_text)) | |
617 | return NULL; | |
618 | ||
f8891e5e | 619 | #ifdef CONFIG_VM_EVENT_COUNTERS |
2244b95a | 620 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) |
f8891e5e CL |
621 | + sizeof(struct vm_event_state), GFP_KERNEL); |
622 | #else | |
623 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), | |
624 | GFP_KERNEL); | |
625 | #endif | |
2244b95a CL |
626 | m->private = v; |
627 | if (!v) | |
f6ac2354 | 628 | return ERR_PTR(-ENOMEM); |
2244b95a CL |
629 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
630 | v[i] = global_page_state(i); | |
f8891e5e CL |
631 | #ifdef CONFIG_VM_EVENT_COUNTERS |
632 | e = v + NR_VM_ZONE_STAT_ITEMS; | |
633 | all_vm_events(e); | |
634 | e[PGPGIN] /= 2; /* sectors -> kbytes */ | |
635 | e[PGPGOUT] /= 2; | |
636 | #endif | |
2244b95a | 637 | return v + *pos; |
f6ac2354 CL |
638 | } |
639 | ||
640 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) | |
641 | { | |
642 | (*pos)++; | |
643 | if (*pos >= ARRAY_SIZE(vmstat_text)) | |
644 | return NULL; | |
645 | return (unsigned long *)m->private + *pos; | |
646 | } | |
647 | ||
648 | static int vmstat_show(struct seq_file *m, void *arg) | |
649 | { | |
650 | unsigned long *l = arg; | |
651 | unsigned long off = l - (unsigned long *)m->private; | |
652 | ||
653 | seq_printf(m, "%s %lu\n", vmstat_text[off], *l); | |
654 | return 0; | |
655 | } | |
656 | ||
657 | static void vmstat_stop(struct seq_file *m, void *arg) | |
658 | { | |
659 | kfree(m->private); | |
660 | m->private = NULL; | |
661 | } | |
662 | ||
663 | struct seq_operations vmstat_op = { | |
664 | .start = vmstat_start, | |
665 | .next = vmstat_next, | |
666 | .stop = vmstat_stop, | |
667 | .show = vmstat_show, | |
668 | }; | |
669 | ||
670 | #endif /* CONFIG_PROC_FS */ | |
671 | ||
df9ecaba CL |
672 | #ifdef CONFIG_SMP |
673 | /* | |
674 | * Use the cpu notifier to insure that the thresholds are recalculated | |
675 | * when necessary. | |
676 | */ | |
677 | static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, | |
678 | unsigned long action, | |
679 | void *hcpu) | |
680 | { | |
681 | switch (action) { | |
682 | case CPU_UP_PREPARE: | |
683 | case CPU_UP_CANCELED: | |
684 | case CPU_DEAD: | |
685 | refresh_zone_stat_thresholds(); | |
686 | break; | |
687 | default: | |
688 | break; | |
689 | } | |
690 | return NOTIFY_OK; | |
691 | } | |
692 | ||
693 | static struct notifier_block __cpuinitdata vmstat_notifier = | |
694 | { &vmstat_cpuup_callback, NULL, 0 }; | |
695 | ||
696 | int __init setup_vmstat(void) | |
697 | { | |
698 | refresh_zone_stat_thresholds(); | |
699 | register_cpu_notifier(&vmstat_notifier); | |
700 | return 0; | |
701 | } | |
702 | module_init(setup_vmstat) | |
703 | #endif |