]>
Commit | Line | Data |
---|---|---|
6d80e53f AD |
1 | #include <linux/bootmem.h> |
2 | #include <linux/compiler.h> | |
3 | #include <linux/fs.h> | |
4 | #include <linux/init.h> | |
9a840895 | 5 | #include <linux/ksm.h> |
6d80e53f AD |
6 | #include <linux/mm.h> |
7 | #include <linux/mmzone.h> | |
8 | #include <linux/proc_fs.h> | |
9 | #include <linux/seq_file.h> | |
20a0307c | 10 | #include <linux/hugetlb.h> |
1a9b5b7f | 11 | #include <linux/kernel-page-flags.h> |
6d80e53f AD |
12 | #include <asm/uaccess.h> |
13 | #include "internal.h" | |
14 | ||
15 | #define KPMSIZE sizeof(u64) | |
16 | #define KPMMASK (KPMSIZE - 1) | |
ed7ce0f1 | 17 | |
6d80e53f AD |
18 | /* /proc/kpagecount - an array exposing page counts |
19 | * | |
20 | * Each entry is a u64 representing the corresponding | |
21 | * physical page count. | |
22 | */ | |
23 | static ssize_t kpagecount_read(struct file *file, char __user *buf, | |
24 | size_t count, loff_t *ppos) | |
25 | { | |
26 | u64 __user *out = (u64 __user *)buf; | |
27 | struct page *ppage; | |
28 | unsigned long src = *ppos; | |
29 | unsigned long pfn; | |
30 | ssize_t ret = 0; | |
31 | u64 pcount; | |
32 | ||
33 | pfn = src / KPMSIZE; | |
34 | count = min_t(size_t, count, (max_pfn * KPMSIZE) - src); | |
35 | if (src & KPMMASK || count & KPMMASK) | |
36 | return -EINVAL; | |
37 | ||
38 | while (count > 0) { | |
6d80e53f AD |
39 | if (pfn_valid(pfn)) |
40 | ppage = pfn_to_page(pfn); | |
ed7ce0f1 WF |
41 | else |
42 | ppage = NULL; | |
6d80e53f AD |
43 | if (!ppage) |
44 | pcount = 0; | |
45 | else | |
46 | pcount = page_mapcount(ppage); | |
47 | ||
ed7ce0f1 | 48 | if (put_user(pcount, out)) { |
6d80e53f AD |
49 | ret = -EFAULT; |
50 | break; | |
51 | } | |
52 | ||
ed7ce0f1 WF |
53 | pfn++; |
54 | out++; | |
6d80e53f AD |
55 | count -= KPMSIZE; |
56 | } | |
57 | ||
58 | *ppos += (char __user *)out - buf; | |
59 | if (!ret) | |
60 | ret = (char __user *)out - buf; | |
61 | return ret; | |
62 | } | |
63 | ||
64 | static const struct file_operations proc_kpagecount_operations = { | |
65 | .llseek = mem_lseek, | |
66 | .read = kpagecount_read, | |
67 | }; | |
68 | ||
69 | /* /proc/kpageflags - an array exposing page flags | |
70 | * | |
71 | * Each entry is a u64 representing the corresponding | |
72 | * physical page flags. | |
73 | */ | |
74 | ||
17797549 WF |
75 | static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) |
76 | { | |
77 | return ((kflags >> kbit) & 1) << ubit; | |
78 | } | |
79 | ||
1a9b5b7f | 80 | u64 stable_page_flags(struct page *page) |
17797549 WF |
81 | { |
82 | u64 k; | |
83 | u64 u; | |
84 | ||
85 | /* | |
86 | * pseudo flag: KPF_NOPAGE | |
87 | * it differentiates a memory hole from a page with no flags | |
88 | */ | |
89 | if (!page) | |
90 | return 1 << KPF_NOPAGE; | |
91 | ||
92 | k = page->flags; | |
93 | u = 0; | |
94 | ||
95 | /* | |
96 | * pseudo flags for the well known (anonymous) memory mapped pages | |
97 | * | |
98 | * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the | |
99 | * simple test in page_mapped() is not enough. | |
100 | */ | |
101 | if (!PageSlab(page) && page_mapped(page)) | |
102 | u |= 1 << KPF_MMAP; | |
103 | if (PageAnon(page)) | |
104 | u |= 1 << KPF_ANON; | |
9a840895 HD |
105 | if (PageKsm(page)) |
106 | u |= 1 << KPF_KSM; | |
17797549 WF |
107 | |
108 | /* | |
109 | * compound pages: export both head/tail info | |
110 | * they together define a compound page's start/end pos and order | |
111 | */ | |
112 | if (PageHead(page)) | |
113 | u |= 1 << KPF_COMPOUND_HEAD; | |
114 | if (PageTail(page)) | |
115 | u |= 1 << KPF_COMPOUND_TAIL; | |
116 | if (PageHuge(page)) | |
117 | u |= 1 << KPF_HUGE; | |
118 | ||
119 | u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); | |
120 | ||
121 | /* | |
122 | * Caveats on high order pages: | |
123 | * PG_buddy will only be set on the head page; SLUB/SLQB do the same | |
124 | * for PG_slab; SLOB won't set PG_slab at all on compound pages. | |
125 | */ | |
126 | u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); | |
127 | u |= kpf_copy_bit(k, KPF_BUDDY, PG_buddy); | |
128 | ||
129 | u |= kpf_copy_bit(k, KPF_ERROR, PG_error); | |
130 | u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); | |
131 | u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); | |
132 | u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback); | |
133 | ||
134 | u |= kpf_copy_bit(k, KPF_LRU, PG_lru); | |
135 | u |= kpf_copy_bit(k, KPF_REFERENCED, PG_referenced); | |
136 | u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); | |
137 | u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); | |
138 | ||
139 | u |= kpf_copy_bit(k, KPF_SWAPCACHE, PG_swapcache); | |
140 | u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); | |
141 | ||
17797549 WF |
142 | u |= kpf_copy_bit(k, KPF_UNEVICTABLE, PG_unevictable); |
143 | u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); | |
17797549 | 144 | |
253fb02d WF |
145 | #ifdef CONFIG_MEMORY_FAILURE |
146 | u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); | |
147 | #endif | |
148 | ||
17797549 WF |
149 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
150 | u |= kpf_copy_bit(k, KPF_UNCACHED, PG_uncached); | |
151 | #endif | |
152 | ||
153 | u |= kpf_copy_bit(k, KPF_RESERVED, PG_reserved); | |
154 | u |= kpf_copy_bit(k, KPF_MAPPEDTODISK, PG_mappedtodisk); | |
155 | u |= kpf_copy_bit(k, KPF_PRIVATE, PG_private); | |
156 | u |= kpf_copy_bit(k, KPF_PRIVATE_2, PG_private_2); | |
157 | u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE, PG_owner_priv_1); | |
158 | u |= kpf_copy_bit(k, KPF_ARCH, PG_arch_1); | |
159 | ||
160 | return u; | |
161 | }; | |
6d80e53f AD |
162 | |
163 | static ssize_t kpageflags_read(struct file *file, char __user *buf, | |
164 | size_t count, loff_t *ppos) | |
165 | { | |
166 | u64 __user *out = (u64 __user *)buf; | |
167 | struct page *ppage; | |
168 | unsigned long src = *ppos; | |
169 | unsigned long pfn; | |
170 | ssize_t ret = 0; | |
6d80e53f AD |
171 | |
172 | pfn = src / KPMSIZE; | |
173 | count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src); | |
174 | if (src & KPMMASK || count & KPMMASK) | |
175 | return -EINVAL; | |
176 | ||
177 | while (count > 0) { | |
6d80e53f AD |
178 | if (pfn_valid(pfn)) |
179 | ppage = pfn_to_page(pfn); | |
ed7ce0f1 WF |
180 | else |
181 | ppage = NULL; | |
17797549 | 182 | |
1a9b5b7f | 183 | if (put_user(stable_page_flags(ppage), out)) { |
6d80e53f AD |
184 | ret = -EFAULT; |
185 | break; | |
186 | } | |
187 | ||
ed7ce0f1 WF |
188 | pfn++; |
189 | out++; | |
6d80e53f AD |
190 | count -= KPMSIZE; |
191 | } | |
192 | ||
193 | *ppos += (char __user *)out - buf; | |
194 | if (!ret) | |
195 | ret = (char __user *)out - buf; | |
196 | return ret; | |
197 | } | |
198 | ||
199 | static const struct file_operations proc_kpageflags_operations = { | |
200 | .llseek = mem_lseek, | |
201 | .read = kpageflags_read, | |
202 | }; | |
203 | ||
204 | static int __init proc_page_init(void) | |
205 | { | |
206 | proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); | |
207 | proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); | |
208 | return 0; | |
209 | } | |
210 | module_init(proc_page_init); |