1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/memblock.h>
3 #include <linux/compiler.h>
5 #include <linux/init.h>
8 #include <linux/mmzone.h>
9 #include <linux/huge_mm.h>
10 #include <linux/proc_fs.h>
11 #include <linux/seq_file.h>
12 #include <linux/hugetlb.h>
13 #include <linux/memremap.h>
14 #include <linux/memcontrol.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/page_idle.h>
17 #include <linux/kernel-page-flags.h>
18 #include <linux/uaccess.h>
21 #define KPMSIZE sizeof(u64)
22 #define KPMMASK (KPMSIZE - 1)
23 #define KPMBITS (KPMSIZE * BITS_PER_BYTE)
25 static inline unsigned long get_max_dump_pfn(void)
27 #ifdef CONFIG_SPARSEMEM
29 * The memmap of early sections is completely populated and marked
30 * online even if max_pfn does not fall on a section boundary -
31 * pfn_to_online_page() will succeed on all pages. Allow inspecting
34 return round_up(max_pfn, PAGES_PER_SECTION);
40 /* /proc/kpagecount - an array exposing page mapcounts
42 * Each entry is a u64 representing the corresponding
43 * physical page mapcount.
45 static ssize_t kpagecount_read(struct file *file, char __user *buf,
46 size_t count, loff_t *ppos)
48 const unsigned long max_dump_pfn = get_max_dump_pfn();
49 u64 __user *out = (u64 __user *)buf;
50 unsigned long src = *ppos;
55 if (src & KPMMASK || count & KPMMASK)
57 if (src >= max_dump_pfn * KPMSIZE)
59 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
66 * TODO: ZONE_DEVICE support requires to identify
67 * memmaps that were actually initialized.
69 page = pfn_to_online_page(pfn);
71 mapcount = folio_precise_page_mapcount(page_folio(page),
74 if (put_user(mapcount, out)) {
86 *ppos += (char __user *)out - buf;
88 ret = (char __user *)out - buf;
92 static const struct proc_ops kpagecount_proc_ops = {
93 .proc_flags = PROC_ENTRY_PERMANENT,
94 .proc_lseek = mem_lseek,
95 .proc_read = kpagecount_read,
98 /* /proc/kpageflags - an array exposing page flags
100 * Each entry is a u64 representing the corresponding
101 * physical page flags.
104 static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
106 return ((kflags >> kbit) & 1) << ubit;
109 u64 stable_page_flags(const struct page *page)
111 const struct folio *folio;
113 unsigned long mapping;
118 * pseudo flag: KPF_NOPAGE
119 * it differentiates a memory hole from a page with no flags
122 return 1 << KPF_NOPAGE;
123 folio = page_folio(page);
126 mapping = (unsigned long)folio->mapping;
127 is_anon = mapping & PAGE_MAPPING_ANON;
130 * pseudo flags for the well known (anonymous) memory mapped pages
132 if (page_mapped(page))
136 if (mapping & PAGE_MAPPING_KSM)
141 * compound pages: export both head/tail info
142 * they together define a compound page's start/end pos and order
144 if (page == &folio->page)
145 u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
147 u |= 1 << KPF_COMPOUND_TAIL;
148 if (folio_test_hugetlb(folio))
150 else if (folio_test_large(folio) &&
151 folio_test_large_rmappable(folio)) {
152 /* Note: we indicate any THPs here, not just PMD-sized ones */
154 } else if (is_huge_zero_folio(folio)) {
155 u |= 1 << KPF_ZERO_PAGE;
157 } else if (is_zero_folio(folio)) {
158 u |= 1 << KPF_ZERO_PAGE;
162 * Caveats on high order pages: PG_buddy and PG_slab will only be set
167 else if (page_count(page) == 0 && is_free_buddy_page(page))
170 if (PageOffline(page))
171 u |= 1 << KPF_OFFLINE;
173 u |= 1 << KPF_PGTABLE;
174 if (folio_test_slab(folio))
177 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
178 u |= kpf_copy_bit(k, KPF_IDLE, PG_idle);
180 if (folio_test_idle(folio))
184 u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
185 u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
186 u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
187 u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
189 u |= kpf_copy_bit(k, KPF_LRU, PG_lru);
190 u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced);
191 u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
192 u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
194 #define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
195 if ((k & SWAPCACHE) == SWAPCACHE)
196 u |= 1 << KPF_SWAPCACHE;
197 u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
199 u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable);
200 u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
202 #ifdef CONFIG_MEMORY_FAILURE
203 if (u & (1 << KPF_HUGE))
204 u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
206 u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
209 u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved);
210 u |= kpf_copy_bit(k, KPF_OWNER_2, PG_owner_2);
211 u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private);
212 u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2);
213 u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1);
214 u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1);
215 #ifdef CONFIG_ARCH_USES_PG_ARCH_2
216 u |= kpf_copy_bit(k, KPF_ARCH_2, PG_arch_2);
218 #ifdef CONFIG_ARCH_USES_PG_ARCH_3
219 u |= kpf_copy_bit(k, KPF_ARCH_3, PG_arch_3);
225 static ssize_t kpageflags_read(struct file *file, char __user *buf,
226 size_t count, loff_t *ppos)
228 const unsigned long max_dump_pfn = get_max_dump_pfn();
229 u64 __user *out = (u64 __user *)buf;
230 unsigned long src = *ppos;
235 if (src & KPMMASK || count & KPMMASK)
237 if (src >= max_dump_pfn * KPMSIZE)
239 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
243 * TODO: ZONE_DEVICE support requires to identify
244 * memmaps that were actually initialized.
246 struct page *page = pfn_to_online_page(pfn);
248 if (put_user(stable_page_flags(page), out)) {
260 *ppos += (char __user *)out - buf;
262 ret = (char __user *)out - buf;
266 static const struct proc_ops kpageflags_proc_ops = {
267 .proc_flags = PROC_ENTRY_PERMANENT,
268 .proc_lseek = mem_lseek,
269 .proc_read = kpageflags_read,
273 static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
274 size_t count, loff_t *ppos)
276 const unsigned long max_dump_pfn = get_max_dump_pfn();
277 u64 __user *out = (u64 __user *)buf;
279 unsigned long src = *ppos;
285 if (src & KPMMASK || count & KPMMASK)
287 if (src >= max_dump_pfn * KPMSIZE)
289 count = min_t(unsigned long, count, (max_dump_pfn * KPMSIZE) - src);
293 * TODO: ZONE_DEVICE support requires to identify
294 * memmaps that were actually initialized.
296 ppage = pfn_to_online_page(pfn);
299 ino = page_cgroup_ino(ppage);
303 if (put_user(ino, out)) {
315 *ppos += (char __user *)out - buf;
317 ret = (char __user *)out - buf;
321 static const struct proc_ops kpagecgroup_proc_ops = {
322 .proc_flags = PROC_ENTRY_PERMANENT,
323 .proc_lseek = mem_lseek,
324 .proc_read = kpagecgroup_read,
326 #endif /* CONFIG_MEMCG */
328 static int __init proc_page_init(void)
330 proc_create("kpagecount", S_IRUSR, NULL, &kpagecount_proc_ops);
331 proc_create("kpageflags", S_IRUSR, NULL, &kpageflags_proc_ops);
333 proc_create("kpagecgroup", S_IRUSR, NULL, &kpagecgroup_proc_ops);
337 fs_initcall(proc_page_init);