1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic show_mem() implementation
8 #include <linux/blkdev.h>
10 #include <linux/cpuset.h>
11 #include <linux/highmem.h>
12 #include <linux/hugetlb.h>
14 #include <linux/mmzone.h>
15 #include <linux/swap.h>
16 #include <linux/vmstat.h>
21 atomic_long_t _totalram_pages __read_mostly;
22 EXPORT_SYMBOL(_totalram_pages);
23 unsigned long totalreserve_pages __read_mostly;
24 unsigned long totalcma_pages __read_mostly;
26 static inline void show_node(struct zone *zone)
28 if (IS_ENABLED(CONFIG_NUMA))
29 printk("Node %d ", zone_to_nid(zone));
32 long si_mem_available(void)
35 unsigned long pagecache;
36 unsigned long wmark_low = 0;
37 unsigned long reclaimable;
41 wmark_low += low_wmark_pages(zone);
44 * Estimate the amount of memory available for userspace allocations,
45 * without causing swapping or OOM.
47 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
50 * Not all the page cache can be freed, otherwise the system will
51 * start swapping or thrashing. Assume at least half of the page
52 * cache, or the low watermark worth of cache, needs to stay.
54 pagecache = global_node_page_state(NR_ACTIVE_FILE) +
55 global_node_page_state(NR_INACTIVE_FILE);
56 pagecache -= min(pagecache / 2, wmark_low);
57 available += pagecache;
60 * Part of the reclaimable slab and other kernel memory consists of
61 * items that are in use, and cannot be freed. Cap this estimate at the
64 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
65 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
66 reclaimable -= min(reclaimable / 2, wmark_low);
67 available += reclaimable;
73 EXPORT_SYMBOL_GPL(si_mem_available);
75 void si_meminfo(struct sysinfo *val)
77 val->totalram = totalram_pages();
78 val->sharedram = global_node_page_state(NR_SHMEM);
79 val->freeram = global_zone_page_state(NR_FREE_PAGES);
80 val->bufferram = nr_blockdev_pages();
81 val->totalhigh = totalhigh_pages();
82 val->freehigh = nr_free_highpages();
83 val->mem_unit = PAGE_SIZE;
86 EXPORT_SYMBOL(si_meminfo);
89 void si_meminfo_node(struct sysinfo *val, int nid)
91 int zone_type; /* needs to be signed */
92 unsigned long managed_pages = 0;
93 unsigned long managed_highpages = 0;
94 unsigned long free_highpages = 0;
95 pg_data_t *pgdat = NODE_DATA(nid);
97 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
98 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
99 val->totalram = managed_pages;
100 val->sharedram = node_page_state(pgdat, NR_SHMEM);
101 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
102 #ifdef CONFIG_HIGHMEM
103 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
104 struct zone *zone = &pgdat->node_zones[zone_type];
106 if (is_highmem(zone)) {
107 managed_highpages += zone_managed_pages(zone);
108 free_highpages += zone_page_state(zone, NR_FREE_PAGES);
111 val->totalhigh = managed_highpages;
112 val->freehigh = free_highpages;
114 val->totalhigh = managed_highpages;
115 val->freehigh = free_highpages;
117 val->mem_unit = PAGE_SIZE;
122 * Determine whether the node should be displayed or not, depending on whether
123 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
125 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
127 if (!(flags & SHOW_MEM_FILTER_NODES))
131 * no node mask - aka implicit memory numa policy. Do not bother with
132 * the synchronization - read_mems_allowed_begin - because we do not
133 * have to be precise here.
136 nodemask = &cpuset_current_mems_allowed;
138 return !node_isset(nid, *nodemask);
141 static void show_migration_types(unsigned char type)
143 static const char types[MIGRATE_TYPES] = {
144 [MIGRATE_UNMOVABLE] = 'U',
145 [MIGRATE_MOVABLE] = 'M',
146 [MIGRATE_RECLAIMABLE] = 'E',
147 [MIGRATE_HIGHATOMIC] = 'H',
151 #ifdef CONFIG_MEMORY_ISOLATION
152 [MIGRATE_ISOLATE] = 'I',
155 char tmp[MIGRATE_TYPES + 1];
159 for (i = 0; i < MIGRATE_TYPES; i++) {
165 printk(KERN_CONT "(%s) ", tmp);
168 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
171 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
172 if (zone_managed_pages(pgdat->node_zones + zone_idx))
178 * Show free area list (used inside shift_scroll-lock stuff)
179 * We also calculate the percentage fragmentation. We do this by counting the
180 * memory on each free list with the exception of the first item on the list.
183 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
186 static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
188 unsigned long free_pcp = 0;
193 for_each_populated_zone(zone) {
194 if (zone_idx(zone) > max_zone_idx)
196 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
199 for_each_online_cpu(cpu)
200 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
203 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
204 " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
205 " unevictable:%lu dirty:%lu writeback:%lu\n"
206 " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
207 " mapped:%lu shmem:%lu pagetables:%lu\n"
208 " sec_pagetables:%lu bounce:%lu\n"
209 " kernel_misc_reclaimable:%lu\n"
210 " free:%lu free_pcp:%lu free_cma:%lu\n",
211 global_node_page_state(NR_ACTIVE_ANON),
212 global_node_page_state(NR_INACTIVE_ANON),
213 global_node_page_state(NR_ISOLATED_ANON),
214 global_node_page_state(NR_ACTIVE_FILE),
215 global_node_page_state(NR_INACTIVE_FILE),
216 global_node_page_state(NR_ISOLATED_FILE),
217 global_node_page_state(NR_UNEVICTABLE),
218 global_node_page_state(NR_FILE_DIRTY),
219 global_node_page_state(NR_WRITEBACK),
220 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
221 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
222 global_node_page_state(NR_FILE_MAPPED),
223 global_node_page_state(NR_SHMEM),
224 global_node_page_state(NR_PAGETABLE),
225 global_node_page_state(NR_SECONDARY_PAGETABLE),
226 global_zone_page_state(NR_BOUNCE),
227 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
228 global_zone_page_state(NR_FREE_PAGES),
230 global_zone_page_state(NR_FREE_CMA_PAGES));
232 for_each_online_pgdat(pgdat) {
233 if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
235 if (!node_has_managed_zones(pgdat, max_zone_idx))
240 " inactive_anon:%lukB"
242 " inactive_file:%lukB"
244 " isolated(anon):%lukB"
245 " isolated(file):%lukB"
250 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
252 " shmem_pmdmapped:%lukB"
255 " writeback_tmp:%lukB"
256 " kernel_stack:%lukB"
257 #ifdef CONFIG_SHADOW_CALL_STACK
258 " shadow_call_stack:%lukB"
261 " sec_pagetables:%lukB"
262 " all_unreclaimable? %s"
265 K(node_page_state(pgdat, NR_ACTIVE_ANON)),
266 K(node_page_state(pgdat, NR_INACTIVE_ANON)),
267 K(node_page_state(pgdat, NR_ACTIVE_FILE)),
268 K(node_page_state(pgdat, NR_INACTIVE_FILE)),
269 K(node_page_state(pgdat, NR_UNEVICTABLE)),
270 K(node_page_state(pgdat, NR_ISOLATED_ANON)),
271 K(node_page_state(pgdat, NR_ISOLATED_FILE)),
272 K(node_page_state(pgdat, NR_FILE_MAPPED)),
273 K(node_page_state(pgdat, NR_FILE_DIRTY)),
274 K(node_page_state(pgdat, NR_WRITEBACK)),
275 K(node_page_state(pgdat, NR_SHMEM)),
276 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
277 K(node_page_state(pgdat, NR_SHMEM_THPS)),
278 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
279 K(node_page_state(pgdat, NR_ANON_THPS)),
281 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
282 node_page_state(pgdat, NR_KERNEL_STACK_KB),
283 #ifdef CONFIG_SHADOW_CALL_STACK
284 node_page_state(pgdat, NR_KERNEL_SCS_KB),
286 K(node_page_state(pgdat, NR_PAGETABLE)),
287 K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
288 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
292 for_each_populated_zone(zone) {
295 if (zone_idx(zone) > max_zone_idx)
297 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
301 for_each_online_cpu(cpu)
302 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
312 " reserved_highatomic:%luKB"
314 " inactive_anon:%lukB"
316 " inactive_file:%lukB"
318 " writepending:%lukB"
328 K(zone_page_state(zone, NR_FREE_PAGES)),
329 K(zone->watermark_boost),
330 K(min_wmark_pages(zone)),
331 K(low_wmark_pages(zone)),
332 K(high_wmark_pages(zone)),
333 K(zone->nr_reserved_highatomic),
334 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
335 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
336 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
337 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
338 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
339 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
340 K(zone->present_pages),
341 K(zone_managed_pages(zone)),
342 K(zone_page_state(zone, NR_MLOCK)),
343 K(zone_page_state(zone, NR_BOUNCE)),
345 K(this_cpu_read(zone->per_cpu_pageset->count)),
346 K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
347 printk("lowmem_reserve[]:");
348 for (i = 0; i < MAX_NR_ZONES; i++)
349 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
350 printk(KERN_CONT "\n");
353 for_each_populated_zone(zone) {
355 unsigned long nr[MAX_ORDER + 1], flags, total = 0;
356 unsigned char types[MAX_ORDER + 1];
358 if (zone_idx(zone) > max_zone_idx)
360 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
363 printk(KERN_CONT "%s: ", zone->name);
365 spin_lock_irqsave(&zone->lock, flags);
366 for (order = 0; order <= MAX_ORDER; order++) {
367 struct free_area *area = &zone->free_area[order];
370 nr[order] = area->nr_free;
371 total += nr[order] << order;
374 for (type = 0; type < MIGRATE_TYPES; type++) {
375 if (!free_area_empty(area, type))
376 types[order] |= 1 << type;
379 spin_unlock_irqrestore(&zone->lock, flags);
380 for (order = 0; order <= MAX_ORDER; order++) {
381 printk(KERN_CONT "%lu*%lukB ",
382 nr[order], K(1UL) << order);
384 show_migration_types(types[order]);
386 printk(KERN_CONT "= %lukB\n", K(total));
389 for_each_online_node(nid) {
390 if (show_mem_node_skip(filter, nid, nodemask))
392 hugetlb_show_meminfo_node(nid);
395 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
397 show_swap_cache_info();
400 void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
402 unsigned long total = 0, reserved = 0, highmem = 0;
405 printk("Mem-Info:\n");
406 show_free_areas(filter, nodemask, max_zone_idx);
408 for_each_populated_zone(zone) {
410 total += zone->present_pages;
411 reserved += zone->present_pages - zone_managed_pages(zone);
413 if (is_highmem(zone))
414 highmem += zone->present_pages;
417 printk("%lu pages RAM\n", total);
418 printk("%lu pages HighMem/MovableOnly\n", highmem);
419 printk("%lu pages reserved\n", reserved);
421 printk("%lu pages cma reserved\n", totalcma_pages);
423 #ifdef CONFIG_MEMORY_FAILURE
424 printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages));