1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains some kasan initialization code.
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 #include <linux/memblock.h>
15 #include <linux/init.h>
16 #include <linux/kasan.h>
17 #include <linux/kernel.h>
19 #include <linux/pfn.h>
20 #include <linux/slab.h>
23 #include <asm/pgalloc.h>
28 * This page serves two purposes:
29 * - It used as early shadow memory. The entire shadow region populated
30 * with this page, before we will be able to setup normal shadow memory.
31 * - Latter it reused it as zero shadow to cover large ranges of memory
32 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
34 unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
36 #if CONFIG_PGTABLE_LEVELS > 4
37 p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
38 static inline bool kasan_p4d_table(pgd_t pgd)
40 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
43 static inline bool kasan_p4d_table(pgd_t pgd)
48 #if CONFIG_PGTABLE_LEVELS > 3
49 pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss;
50 static inline bool kasan_pud_table(p4d_t p4d)
52 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
55 static inline bool kasan_pud_table(p4d_t p4d)
60 #if CONFIG_PGTABLE_LEVELS > 2
61 pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss;
62 static inline bool kasan_pmd_table(pud_t pud)
64 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
67 static inline bool kasan_pmd_table(pud_t pud)
72 pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
74 static inline bool kasan_pte_table(pmd_t pmd)
76 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
79 static inline bool kasan_early_shadow_page_entry(pte_t pte)
81 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
84 static __init void *early_alloc(size_t size, int node)
86 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
87 MEMBLOCK_ALLOC_ACCESSIBLE, node);
90 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
93 pte_t *pte = pte_offset_kernel(pmd, addr);
96 zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
98 zero_pte = pte_wrprotect(zero_pte);
100 while (addr + PAGE_SIZE <= end) {
101 set_pte_at(&init_mm, addr, pte, zero_pte);
103 pte = pte_offset_kernel(pmd, addr);
107 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
110 pmd_t *pmd = pmd_offset(pud, addr);
114 next = pmd_addr_end(addr, end);
116 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
117 pmd_populate_kernel(&init_mm, pmd,
118 lm_alias(kasan_early_shadow_pte));
122 if (pmd_none(*pmd)) {
125 if (slab_is_available())
126 p = pte_alloc_one_kernel(&init_mm);
128 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
132 pmd_populate_kernel(&init_mm, pmd, p);
134 zero_pte_populate(pmd, addr, next);
135 } while (pmd++, addr = next, addr != end);
140 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
143 pud_t *pud = pud_offset(p4d, addr);
147 next = pud_addr_end(addr, end);
148 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
151 pud_populate(&init_mm, pud,
152 lm_alias(kasan_early_shadow_pmd));
153 pmd = pmd_offset(pud, addr);
154 pmd_populate_kernel(&init_mm, pmd,
155 lm_alias(kasan_early_shadow_pte));
159 if (pud_none(*pud)) {
162 if (slab_is_available()) {
163 p = pmd_alloc(&init_mm, pud, addr);
167 pud_populate(&init_mm, pud,
168 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
171 zero_pmd_populate(pud, addr, next);
172 } while (pud++, addr = next, addr != end);
177 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
180 p4d_t *p4d = p4d_offset(pgd, addr);
184 next = p4d_addr_end(addr, end);
185 if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
189 p4d_populate(&init_mm, p4d,
190 lm_alias(kasan_early_shadow_pud));
191 pud = pud_offset(p4d, addr);
192 pud_populate(&init_mm, pud,
193 lm_alias(kasan_early_shadow_pmd));
194 pmd = pmd_offset(pud, addr);
195 pmd_populate_kernel(&init_mm, pmd,
196 lm_alias(kasan_early_shadow_pte));
200 if (p4d_none(*p4d)) {
203 if (slab_is_available()) {
204 p = pud_alloc(&init_mm, p4d, addr);
208 p4d_populate(&init_mm, p4d,
209 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
212 zero_pud_populate(p4d, addr, next);
213 } while (p4d++, addr = next, addr != end);
219 * kasan_populate_early_shadow - populate shadow memory region with
220 * kasan_early_shadow_page
221 * @shadow_start - start of the memory range to populate
222 * @shadow_end - end of the memory range to populate
224 int __ref kasan_populate_early_shadow(const void *shadow_start,
225 const void *shadow_end)
227 unsigned long addr = (unsigned long)shadow_start;
228 unsigned long end = (unsigned long)shadow_end;
229 pgd_t *pgd = pgd_offset_k(addr);
233 next = pgd_addr_end(addr, end);
235 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
241 * kasan_early_shadow_pud should be populated with pmds
243 * [pud,pmd]_populate*() below needed only for
244 * 3,2 - level page tables where we don't have
245 * puds,pmds, so pgd_populate(), pud_populate()
248 * The ifndef is required to avoid build breakage.
250 * With 5level-fixup.h, pgd_populate() is not nop and
251 * we reference kasan_early_shadow_p4d. It's not defined
252 * unless 5-level paging enabled.
254 * The ifndef can be dropped once all KASAN-enabled
255 * architectures will switch to pgtable-nop4d.h.
257 #ifndef __ARCH_HAS_5LEVEL_HACK
258 pgd_populate(&init_mm, pgd,
259 lm_alias(kasan_early_shadow_p4d));
261 p4d = p4d_offset(pgd, addr);
262 p4d_populate(&init_mm, p4d,
263 lm_alias(kasan_early_shadow_pud));
264 pud = pud_offset(p4d, addr);
265 pud_populate(&init_mm, pud,
266 lm_alias(kasan_early_shadow_pmd));
267 pmd = pmd_offset(pud, addr);
268 pmd_populate_kernel(&init_mm, pmd,
269 lm_alias(kasan_early_shadow_pte));
273 if (pgd_none(*pgd)) {
276 if (slab_is_available()) {
277 p = p4d_alloc(&init_mm, pgd, addr);
281 pgd_populate(&init_mm, pgd,
282 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
285 zero_p4d_populate(pgd, addr, next);
286 } while (pgd++, addr = next, addr != end);
291 static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
296 for (i = 0; i < PTRS_PER_PTE; i++) {
302 pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
306 static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
311 for (i = 0; i < PTRS_PER_PMD; i++) {
317 pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
321 static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
326 for (i = 0; i < PTRS_PER_PUD; i++) {
332 pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
336 static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
341 for (i = 0; i < PTRS_PER_P4D; i++) {
347 p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
351 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
356 for (; addr < end; addr = next, pte++) {
357 next = (addr + PAGE_SIZE) & PAGE_MASK;
361 if (!pte_present(*pte))
364 if (WARN_ON(!kasan_early_shadow_page_entry(*pte)))
366 pte_clear(&init_mm, addr, pte);
370 static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
375 for (; addr < end; addr = next, pmd++) {
378 next = pmd_addr_end(addr, end);
380 if (!pmd_present(*pmd))
383 if (kasan_pte_table(*pmd)) {
384 if (IS_ALIGNED(addr, PMD_SIZE) &&
385 IS_ALIGNED(next, PMD_SIZE))
389 pte = pte_offset_kernel(pmd, addr);
390 kasan_remove_pte_table(pte, addr, next);
391 kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
395 static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
400 for (; addr < end; addr = next, pud++) {
401 pmd_t *pmd, *pmd_base;
403 next = pud_addr_end(addr, end);
405 if (!pud_present(*pud))
408 if (kasan_pmd_table(*pud)) {
409 if (IS_ALIGNED(addr, PUD_SIZE) &&
410 IS_ALIGNED(next, PUD_SIZE))
414 pmd = pmd_offset(pud, addr);
415 pmd_base = pmd_offset(pud, 0);
416 kasan_remove_pmd_table(pmd, addr, next);
417 kasan_free_pmd(pmd_base, pud);
421 static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
426 for (; addr < end; addr = next, p4d++) {
429 next = p4d_addr_end(addr, end);
431 if (!p4d_present(*p4d))
434 if (kasan_pud_table(*p4d)) {
435 if (IS_ALIGNED(addr, P4D_SIZE) &&
436 IS_ALIGNED(next, P4D_SIZE))
440 pud = pud_offset(p4d, addr);
441 kasan_remove_pud_table(pud, addr, next);
442 kasan_free_pud(pud_offset(p4d, 0), p4d);
446 void kasan_remove_zero_shadow(void *start, unsigned long size)
448 unsigned long addr, end, next;
451 addr = (unsigned long)kasan_mem_to_shadow(start);
452 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
454 if (WARN_ON((unsigned long)start %
455 (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
456 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
459 for (; addr < end; addr = next) {
462 next = pgd_addr_end(addr, end);
464 pgd = pgd_offset_k(addr);
465 if (!pgd_present(*pgd))
468 if (kasan_p4d_table(*pgd)) {
469 if (IS_ALIGNED(addr, PGDIR_SIZE) &&
470 IS_ALIGNED(next, PGDIR_SIZE))
475 p4d = p4d_offset(pgd, addr);
476 kasan_remove_p4d_table(p4d, addr, next);
477 kasan_free_p4d(p4d_offset(pgd, 0), pgd);
481 int kasan_add_zero_shadow(void *start, unsigned long size)
484 void *shadow_start, *shadow_end;
486 shadow_start = kasan_mem_to_shadow(start);
487 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
489 if (WARN_ON((unsigned long)start %
490 (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
491 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
494 ret = kasan_populate_early_shadow(shadow_start, shadow_end);
496 kasan_remove_zero_shadow(shadow_start,
497 size >> KASAN_SHADOW_SCALE_SHIFT);