1 // SPDX-License-Identifier: GPL-2.0-only
3 * Debug helper to dump the current kernel pagetables of the system
4 * so that we can see what the various memory ranges are set to.
6 * Derived from x86 implementation:
7 * (C) Copyright 2008 Intel Corporation
11 #include <linux/debugfs.h>
14 #include <linux/seq_file.h>
16 #include <asm/domain.h>
17 #include <asm/fixmap.h>
18 #include <asm/memory.h>
19 #include <asm/ptdump.h>
21 static struct addr_marker address_markers[] = {
22 { MODULES_VADDR, "Modules" },
23 { PAGE_OFFSET, "Kernel Mapping" },
24 { 0, "vmalloc() Area" },
25 { VMALLOC_END, "vmalloc() End" },
26 { FIXADDR_START, "Fixmap Area" },
27 { VECTORS_BASE, "Vectors" },
28 { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
32 #define pt_dump_seq_printf(m, fmt, args...) \
35 seq_printf(m, fmt, ##args); \
38 #define pt_dump_seq_puts(m, fmt) \
46 const struct addr_marker *marker;
47 unsigned long start_address;
51 unsigned long wx_pages;
52 const char *current_domain;
64 static const struct prot_bits pte_bits[] = {
88 .mask = L_PTE_MT_MASK,
89 .val = L_PTE_MT_UNCACHED,
92 .mask = L_PTE_MT_MASK,
93 .val = L_PTE_MT_BUFFERABLE,
94 .set = "MEM/BUFFERABLE/WC",
96 .mask = L_PTE_MT_MASK,
97 .val = L_PTE_MT_WRITETHROUGH,
98 .set = "MEM/CACHED/WT",
100 .mask = L_PTE_MT_MASK,
101 .val = L_PTE_MT_WRITEBACK,
102 .set = "MEM/CACHED/WBRA",
103 #ifndef CONFIG_ARM_LPAE
105 .mask = L_PTE_MT_MASK,
106 .val = L_PTE_MT_MINICACHE,
107 .set = "MEM/MINICACHE",
110 .mask = L_PTE_MT_MASK,
111 .val = L_PTE_MT_WRITEALLOC,
112 .set = "MEM/CACHED/WBWA",
114 .mask = L_PTE_MT_MASK,
115 .val = L_PTE_MT_DEV_SHARED,
117 #ifndef CONFIG_ARM_LPAE
119 .mask = L_PTE_MT_MASK,
120 .val = L_PTE_MT_DEV_NONSHARED,
121 .set = "DEV/NONSHARED",
124 .mask = L_PTE_MT_MASK,
125 .val = L_PTE_MT_DEV_WC,
128 .mask = L_PTE_MT_MASK,
129 .val = L_PTE_MT_DEV_CACHED,
134 static const struct prot_bits section_bits[] = {
135 #ifdef CONFIG_ARM_LPAE
137 .mask = PMD_SECT_USER,
138 .val = PMD_SECT_USER,
141 .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
142 .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
146 #elif __LINUX_ARM_ARCH__ >= 6
148 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
149 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
153 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
154 .val = PMD_SECT_AP_WRITE,
157 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
158 .val = PMD_SECT_AP_READ,
161 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
162 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
164 #else /* ARMv4/ARMv5 */
165 /* These are approximate */
167 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
172 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
173 .val = PMD_SECT_AP_WRITE,
176 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
177 .val = PMD_SECT_AP_READ,
180 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
181 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
199 const struct prot_bits *bits;
202 const struct prot_bits *ro_bit;
203 const struct prot_bits *nx_bit;
206 static struct pg_level pg_level[] = {
212 .bits = section_bits,
213 .num = ARRAY_SIZE(section_bits),
216 .num = ARRAY_SIZE(pte_bits),
220 static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num)
224 for (i = 0; i < num; i++, bits++) {
227 if ((st->current_prot & bits->mask) == bits->val)
233 pt_dump_seq_printf(st->seq, " %s", s);
237 static void note_prot_wx(struct pg_state *st, unsigned long addr)
241 if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
242 pg_level[st->level].ro_bit->val)
244 if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
245 pg_level[st->level].nx_bit->val)
248 WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
249 (void *)st->start_address);
251 st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
254 static void note_page(struct pg_state *st, unsigned long addr,
255 unsigned int level, u64 val, const char *domain)
257 static const char units[] = "KMGTPE";
258 u64 prot = val & pg_level[level].mask;
262 st->current_prot = prot;
263 st->current_domain = domain;
264 pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
265 } else if (prot != st->current_prot || level != st->level ||
266 domain != st->current_domain ||
267 addr >= st->marker[1].start_address) {
268 const char *unit = units;
271 if (st->current_prot) {
272 note_prot_wx(st, addr);
273 pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
274 st->start_address, addr);
276 delta = (addr - st->start_address) >> 10;
277 while (!(delta & 1023) && unit[1]) {
281 pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
282 if (st->current_domain)
283 pt_dump_seq_printf(st->seq, " %s",
285 if (pg_level[st->level].bits)
286 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
287 pt_dump_seq_printf(st->seq, "\n");
290 if (addr >= st->marker[1].start_address) {
292 pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
295 st->start_address = addr;
296 st->current_prot = prot;
297 st->current_domain = domain;
302 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start,
305 pte_t *pte = pte_offset_kernel(pmd, 0);
309 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
310 addr = start + i * PAGE_SIZE;
311 note_page(st, addr, 5, pte_val(*pte), domain);
315 static const char *get_domain_name(pmd_t *pmd)
317 #ifndef CONFIG_ARM_LPAE
318 switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
319 case PMD_DOMAIN(DOMAIN_KERNEL):
321 case PMD_DOMAIN(DOMAIN_USER):
323 case PMD_DOMAIN(DOMAIN_IO):
325 case PMD_DOMAIN(DOMAIN_VECTORS):
334 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
336 pmd_t *pmd = pmd_offset(pud, 0);
341 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
342 addr = start + i * PMD_SIZE;
343 domain = get_domain_name(pmd);
344 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd))
345 note_page(st, addr, 3, pmd_val(*pmd), domain);
347 walk_pte(st, pmd, addr, domain);
349 if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) {
350 addr += SECTION_SIZE;
352 domain = get_domain_name(pmd);
353 note_page(st, addr, 4, pmd_val(*pmd), domain);
358 static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
360 pud_t *pud = pud_offset(p4d, 0);
364 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
365 addr = start + i * PUD_SIZE;
366 if (!pud_none(*pud)) {
367 walk_pmd(st, pud, addr);
369 note_page(st, addr, 3, pud_val(*pud), NULL);
374 static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
376 p4d_t *p4d = p4d_offset(pgd, 0);
380 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
381 addr = start + i * P4D_SIZE;
382 if (!p4d_none(*p4d)) {
383 walk_pud(st, p4d, addr);
385 note_page(st, addr, 2, p4d_val(*p4d), NULL);
390 static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
393 pgd_t *pgd = pgd_offset(mm, 0UL);
397 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
398 addr = start + i * PGDIR_SIZE;
399 if (!pgd_none(*pgd)) {
400 walk_p4d(st, pgd, addr);
402 note_page(st, addr, 1, pgd_val(*pgd), NULL);
407 void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
409 struct pg_state st = {
411 .marker = info->markers,
415 walk_pgd(&st, info->mm, info->base_addr);
416 note_page(&st, 0, 0, 0, NULL);
419 static void ptdump_initialize(void)
423 for (i = 0; i < ARRAY_SIZE(pg_level); i++)
424 if (pg_level[i].bits)
425 for (j = 0; j < pg_level[i].num; j++) {
426 pg_level[i].mask |= pg_level[i].bits[j].mask;
427 if (pg_level[i].bits[j].ro_bit)
428 pg_level[i].ro_bit = &pg_level[i].bits[j];
429 if (pg_level[i].bits[j].nx_bit)
430 pg_level[i].nx_bit = &pg_level[i].bits[j];
433 address_markers[2].start_address = VMALLOC_START;
436 static struct ptdump_info kernel_ptdump_info = {
438 .markers = address_markers,
442 void ptdump_check_wx(void)
444 struct pg_state st = {
446 .marker = (struct addr_marker[]) {
453 walk_pgd(&st, &init_mm, 0);
454 note_page(&st, 0, 0, 0, NULL);
456 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
459 pr_info("Checked W+X mappings: passed, no W+X pages found\n");
462 static int ptdump_init(void)
465 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
468 __initcall(ptdump_init);