]> Git Repo - linux.git/blob - arch/loongarch/mm/kasan_init.c
Linux 6.14-rc3
[linux.git] / arch / loongarch / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5 #define pr_fmt(fmt) "kasan: " fmt
6 #include <linux/kasan.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9
10 #include <asm/tlbflush.h>
11 #include <asm/pgalloc.h>
12 #include <asm-generic/sections.h>
13
14 static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
15
16 #ifdef __PAGETABLE_P4D_FOLDED
17 #define __pgd_none(early, pgd) (0)
18 #else
19 #define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
20 (__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
21 #endif
22
23 #ifdef __PAGETABLE_PUD_FOLDED
24 #define __p4d_none(early, p4d) (0)
25 #else
26 #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
27 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
28 #endif
29
30 #ifdef __PAGETABLE_PMD_FOLDED
31 #define __pud_none(early, pud) (0)
32 #else
33 #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
34 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
35 #endif
36
37 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
38 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
39
40 #define __pte_none(early, pte) (early ? pte_none(pte) : \
41 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
42
43 bool kasan_early_stage = true;
44
45 void *kasan_mem_to_shadow(const void *addr)
46 {
47         if (!kasan_arch_is_ready()) {
48                 return (void *)(kasan_early_shadow_page);
49         } else {
50                 unsigned long maddr = (unsigned long)addr;
51                 unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
52                 unsigned long offset = 0;
53
54                 if (maddr >= FIXADDR_START)
55                         return (void *)(kasan_early_shadow_page);
56
57                 maddr &= XRANGE_SHADOW_MASK;
58                 switch (xrange) {
59                 case XKPRANGE_CC_SEG:
60                         offset = XKPRANGE_CC_SHADOW_OFFSET;
61                         break;
62                 case XKPRANGE_UC_SEG:
63                         offset = XKPRANGE_UC_SHADOW_OFFSET;
64                         break;
65                 case XKPRANGE_WC_SEG:
66                         offset = XKPRANGE_WC_SHADOW_OFFSET;
67                         break;
68                 case XKVRANGE_VC_SEG:
69                         offset = XKVRANGE_VC_SHADOW_OFFSET;
70                         break;
71                 default:
72                         WARN_ON(1);
73                         return NULL;
74                 }
75
76                 return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
77         }
78 }
79
80 const void *kasan_shadow_to_mem(const void *shadow_addr)
81 {
82         unsigned long addr = (unsigned long)shadow_addr;
83
84         if (unlikely(addr > KASAN_SHADOW_END) ||
85                 unlikely(addr < KASAN_SHADOW_START)) {
86                 WARN_ON(1);
87                 return NULL;
88         }
89
90         if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
91                 return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
92         else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
93                 return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
94         else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
95                 return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
96         else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
97                 return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
98         else {
99                 WARN_ON(1);
100                 return NULL;
101         }
102 }
103
104 /*
105  * Alloc memory for shadow memory page table.
106  */
107 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
108 {
109         void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
110                                         __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
111         if (!p)
112                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
113                         __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
114
115         return __pa(p);
116 }
117
118 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
119 {
120         if (__pmd_none(early, pmdp_get(pmdp))) {
121                 phys_addr_t pte_phys = early ?
122                                 __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
123                 if (!early)
124                         memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
125                 pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
126         }
127
128         return pte_offset_kernel(pmdp, addr);
129 }
130
131 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
132 {
133         if (__pud_none(early, pudp_get(pudp))) {
134                 phys_addr_t pmd_phys = early ?
135                                 __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
136                 if (!early)
137                         memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
138                 pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
139         }
140
141         return pmd_offset(pudp, addr);
142 }
143
144 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
145 {
146         if (__p4d_none(early, p4dp_get(p4dp))) {
147                 phys_addr_t pud_phys = early ?
148                         __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
149                 if (!early)
150                         memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
151                 p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
152         }
153
154         return pud_offset(p4dp, addr);
155 }
156
157 static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
158 {
159         if (__pgd_none(early, pgdp_get(pgdp))) {
160                 phys_addr_t p4d_phys = early ?
161                         __pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
162                 if (!early)
163                         memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
164                 pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
165         }
166
167         return p4d_offset(pgdp, addr);
168 }
169
170 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
171                                       unsigned long end, int node, bool early)
172 {
173         unsigned long next;
174         pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
175
176         do {
177                 phys_addr_t page_phys = early ?
178                                         __pa_symbol(kasan_early_shadow_page)
179                                               : kasan_alloc_zeroed_page(node);
180                 next = addr + PAGE_SIZE;
181                 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
182         } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
183 }
184
185 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
186                                       unsigned long end, int node, bool early)
187 {
188         unsigned long next;
189         pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
190
191         do {
192                 next = pmd_addr_end(addr, end);
193                 kasan_pte_populate(pmdp, addr, next, node, early);
194         } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
195 }
196
197 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
198                                             unsigned long end, int node, bool early)
199 {
200         unsigned long next;
201         pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
202
203         do {
204                 next = pud_addr_end(addr, end);
205                 kasan_pmd_populate(pudp, addr, next, node, early);
206         } while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
207 }
208
209 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
210                                             unsigned long end, int node, bool early)
211 {
212         unsigned long next;
213         p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
214
215         do {
216                 next = p4d_addr_end(addr, end);
217                 kasan_pud_populate(p4dp, addr, next, node, early);
218         } while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
219 }
220
221 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
222                                       int node, bool early)
223 {
224         unsigned long next;
225         pgd_t *pgdp;
226
227         pgdp = pgd_offset_k(addr);
228
229         do {
230                 next = pgd_addr_end(addr, end);
231                 kasan_p4d_populate(pgdp, addr, next, node, early);
232         } while (pgdp++, addr = next, addr != end);
233
234 }
235
236 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
237 static void __init kasan_map_populate(unsigned long start, unsigned long end,
238                                       int node)
239 {
240         kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
241 }
242
243 asmlinkage void __init kasan_early_init(void)
244 {
245         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
246         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
247 }
248
249 static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
250 {
251         WRITE_ONCE(*pgdp, pgdval);
252 }
253
254 static void __init clear_pgds(unsigned long start, unsigned long end)
255 {
256         /*
257          * Remove references to kasan page tables from
258          * swapper_pg_dir. pgd_clear() can't be used
259          * here because it's nop on 2,3-level pagetable setups
260          */
261         for (; start < end; start = pgd_addr_end(start, end))
262                 kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
263 }
264
265 void __init kasan_init(void)
266 {
267         u64 i;
268         phys_addr_t pa_start, pa_end;
269
270         /*
271          * If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
272          * overflow UINTPTR_MAX and then looks like a user space address.
273          * For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
274          * large for Loongson-2K series whose cpu_vabits = 39.
275          */
276         if (KASAN_SHADOW_END < vm_map_base) {
277                 pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
278                 return;
279         }
280
281         /*
282          * PGD was populated as invalid_pmd_table or invalid_pud_table
283          * in pagetable_init() which depends on how many levels of page
284          * table you are using, but we had to clean the gpd of kasan
285          * shadow memory, as the pgd value is none-zero.
286          * The assertion pgd_none is going to be false and the formal populate
287          * afterwards is not going to create any new pgd at all.
288          */
289         memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
290         csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
291         local_flush_tlb_all();
292
293         clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
294
295         /* Maps everything to a single page of zeroes */
296         kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
297
298         kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
299                                         kasan_mem_to_shadow((void *)KFENCE_AREA_END));
300
301         kasan_early_stage = false;
302
303         /* Populate the linear mapping */
304         for_each_mem_range(i, &pa_start, &pa_end) {
305                 void *start = (void *)phys_to_virt(pa_start);
306                 void *end   = (void *)phys_to_virt(pa_end);
307
308                 if (start >= end)
309                         break;
310
311                 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
312                         (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
313         }
314
315         /* Populate modules mapping */
316         kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
317                 (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
318         /*
319          * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
320          * should make sure that it maps the zero page read-only.
321          */
322         for (i = 0; i < PTRS_PER_PTE; i++)
323                 set_pte(&kasan_early_shadow_pte[i],
324                         pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
325
326         memset(kasan_early_shadow_page, 0, PAGE_SIZE);
327         csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
328         local_flush_tlb_all();
329
330         /* At this point kasan is fully initialized. Enable error messages */
331         init_task.kasan_depth = 0;
332         pr_info("KernelAddressSanitizer initialized.\n");
333 }
This page took 0.047185 seconds and 4 git commands to generate.