2 * This file contains common generic and tag-based KASAN code.
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
16 #include <linux/export.h>
17 #include <linux/interrupt.h>
18 #include <linux/init.h>
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kmemleak.h>
22 #include <linux/linkage.h>
23 #include <linux/memblock.h>
24 #include <linux/memory.h>
26 #include <linux/module.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/slab.h>
31 #include <linux/stacktrace.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/vmalloc.h>
35 #include <linux/bug.h>
40 static inline int in_irqentry_text(unsigned long ptr)
42 return (ptr >= (unsigned long)&__irqentry_text_start &&
43 ptr < (unsigned long)&__irqentry_text_end) ||
44 (ptr >= (unsigned long)&__softirqentry_text_start &&
45 ptr < (unsigned long)&__softirqentry_text_end);
48 static inline void filter_irq_stacks(struct stack_trace *trace)
52 if (!trace->nr_entries)
54 for (i = 0; i < trace->nr_entries; i++)
55 if (in_irqentry_text(trace->entries[i])) {
56 /* Include the irqentry function into the stack. */
57 trace->nr_entries = i + 1;
62 static inline depot_stack_handle_t save_stack(gfp_t flags)
64 unsigned long entries[KASAN_STACK_DEPTH];
65 struct stack_trace trace = {
68 .max_entries = KASAN_STACK_DEPTH,
72 save_stack_trace(&trace);
73 filter_irq_stacks(&trace);
74 if (trace.nr_entries != 0 &&
75 trace.entries[trace.nr_entries-1] == ULONG_MAX)
78 return depot_save_stack(&trace, flags);
81 static inline void set_track(struct kasan_track *track, gfp_t flags)
83 track->pid = current->pid;
84 track->stack = save_stack(flags);
87 void kasan_enable_current(void)
89 current->kasan_depth++;
92 void kasan_disable_current(void)
94 current->kasan_depth--;
97 void kasan_check_read(const volatile void *p, unsigned int size)
99 check_memory_region((unsigned long)p, size, false, _RET_IP_);
101 EXPORT_SYMBOL(kasan_check_read);
103 void kasan_check_write(const volatile void *p, unsigned int size)
105 check_memory_region((unsigned long)p, size, true, _RET_IP_);
107 EXPORT_SYMBOL(kasan_check_write);
110 void *memset(void *addr, int c, size_t len)
112 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
114 return __memset(addr, c, len);
118 void *memmove(void *dest, const void *src, size_t len)
120 check_memory_region((unsigned long)src, len, false, _RET_IP_);
121 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
123 return __memmove(dest, src, len);
127 void *memcpy(void *dest, const void *src, size_t len)
129 check_memory_region((unsigned long)src, len, false, _RET_IP_);
130 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
132 return __memcpy(dest, src, len);
136 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
137 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
139 void kasan_poison_shadow(const void *address, size_t size, u8 value)
141 void *shadow_start, *shadow_end;
143 shadow_start = kasan_mem_to_shadow(address);
144 shadow_end = kasan_mem_to_shadow(address + size);
146 __memset(shadow_start, value, shadow_end - shadow_start);
149 void kasan_unpoison_shadow(const void *address, size_t size)
151 kasan_poison_shadow(address, size, 0);
153 if (size & KASAN_SHADOW_MASK) {
154 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
155 *shadow = size & KASAN_SHADOW_MASK;
159 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
161 void *base = task_stack_page(task);
162 size_t size = sp - base;
164 kasan_unpoison_shadow(base, size);
167 /* Unpoison the entire stack for a task. */
168 void kasan_unpoison_task_stack(struct task_struct *task)
170 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
173 /* Unpoison the stack for the current task beyond a watermark sp value. */
174 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
177 * Calculate the task stack base address. Avoid using 'current'
178 * because this function is called by early resume code which hasn't
179 * yet set up the percpu register (%gs).
181 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
183 kasan_unpoison_shadow(base, watermark - base);
187 * Clear all poison for the region between the current SP and a provided
188 * watermark value, as is sometimes required prior to hand-crafted asm function
189 * returns in the middle of functions.
191 void kasan_unpoison_stack_above_sp_to(const void *watermark)
193 const void *sp = __builtin_frame_address(0);
194 size_t size = watermark - sp;
196 if (WARN_ON(sp > watermark))
198 kasan_unpoison_shadow(sp, size);
201 void kasan_alloc_pages(struct page *page, unsigned int order)
203 if (likely(!PageHighMem(page)))
204 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
207 void kasan_free_pages(struct page *page, unsigned int order)
209 if (likely(!PageHighMem(page)))
210 kasan_poison_shadow(page_address(page),
216 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
217 * For larger allocations larger redzones are used.
219 static inline unsigned int optimal_redzone(unsigned int object_size)
222 object_size <= 64 - 16 ? 16 :
223 object_size <= 128 - 32 ? 32 :
224 object_size <= 512 - 64 ? 64 :
225 object_size <= 4096 - 128 ? 128 :
226 object_size <= (1 << 14) - 256 ? 256 :
227 object_size <= (1 << 15) - 512 ? 512 :
228 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
231 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
234 unsigned int orig_size = *size;
237 /* Add alloc meta. */
238 cache->kasan_info.alloc_meta_offset = *size;
239 *size += sizeof(struct kasan_alloc_meta);
242 if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
243 cache->object_size < sizeof(struct kasan_free_meta)) {
244 cache->kasan_info.free_meta_offset = *size;
245 *size += sizeof(struct kasan_free_meta);
247 redzone_adjust = optimal_redzone(cache->object_size) -
248 (*size - cache->object_size);
250 if (redzone_adjust > 0)
251 *size += redzone_adjust;
253 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
254 max(*size, cache->object_size +
255 optimal_redzone(cache->object_size)));
258 * If the metadata doesn't fit, don't enable KASAN at all.
260 if (*size <= cache->kasan_info.alloc_meta_offset ||
261 *size <= cache->kasan_info.free_meta_offset) {
262 cache->kasan_info.alloc_meta_offset = 0;
263 cache->kasan_info.free_meta_offset = 0;
268 *flags |= SLAB_KASAN;
271 size_t kasan_metadata_size(struct kmem_cache *cache)
273 return (cache->kasan_info.alloc_meta_offset ?
274 sizeof(struct kasan_alloc_meta) : 0) +
275 (cache->kasan_info.free_meta_offset ?
276 sizeof(struct kasan_free_meta) : 0);
279 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
282 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
283 return (void *)object + cache->kasan_info.alloc_meta_offset;
286 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
289 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
290 return (void *)object + cache->kasan_info.free_meta_offset;
293 void kasan_poison_slab(struct page *page)
295 kasan_poison_shadow(page_address(page),
296 PAGE_SIZE << compound_order(page),
297 KASAN_KMALLOC_REDZONE);
300 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
302 kasan_unpoison_shadow(object, cache->object_size);
305 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
307 kasan_poison_shadow(object,
308 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
309 KASAN_KMALLOC_REDZONE);
312 void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
314 struct kasan_alloc_meta *alloc_info;
316 if (!(cache->flags & SLAB_KASAN))
317 return (void *)object;
319 alloc_info = get_alloc_info(cache, object);
320 __memset(alloc_info, 0, sizeof(*alloc_info));
322 return (void *)object;
325 void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
327 return kasan_kmalloc(cache, object, cache->object_size, flags);
330 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
331 unsigned long ip, bool quarantine)
334 unsigned long rounded_up_size;
336 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
338 kasan_report_invalid_free(object, ip);
342 /* RCU slabs could be legally used after free within the RCU period */
343 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
346 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
347 if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
348 kasan_report_invalid_free(object, ip);
352 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
353 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
355 if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
358 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
359 quarantine_put(get_free_info(cache, object), cache);
363 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
365 return __kasan_slab_free(cache, object, ip, true);
368 void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
371 unsigned long redzone_start;
372 unsigned long redzone_end;
374 if (gfpflags_allow_blocking(flags))
377 if (unlikely(object == NULL))
380 redzone_start = round_up((unsigned long)(object + size),
381 KASAN_SHADOW_SCALE_SIZE);
382 redzone_end = round_up((unsigned long)object + cache->object_size,
383 KASAN_SHADOW_SCALE_SIZE);
385 kasan_unpoison_shadow(object, size);
386 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
387 KASAN_KMALLOC_REDZONE);
389 if (cache->flags & SLAB_KASAN)
390 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
392 return (void *)object;
394 EXPORT_SYMBOL(kasan_kmalloc);
396 void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
399 unsigned long redzone_start;
400 unsigned long redzone_end;
402 if (gfpflags_allow_blocking(flags))
405 if (unlikely(ptr == NULL))
408 page = virt_to_page(ptr);
409 redzone_start = round_up((unsigned long)(ptr + size),
410 KASAN_SHADOW_SCALE_SIZE);
411 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
413 kasan_unpoison_shadow(ptr, size);
414 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
420 void *kasan_krealloc(const void *object, size_t size, gfp_t flags)
424 if (unlikely(object == ZERO_SIZE_PTR))
425 return (void *)object;
427 page = virt_to_head_page(object);
429 if (unlikely(!PageSlab(page)))
430 return kasan_kmalloc_large(object, size, flags);
432 return kasan_kmalloc(page->slab_cache, object, size, flags);
435 void kasan_poison_kfree(void *ptr, unsigned long ip)
439 page = virt_to_head_page(ptr);
441 if (unlikely(!PageSlab(page))) {
442 if (ptr != page_address(page)) {
443 kasan_report_invalid_free(ptr, ip);
446 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
449 __kasan_slab_free(page->slab_cache, ptr, ip, false);
453 void kasan_kfree_large(void *ptr, unsigned long ip)
455 if (ptr != page_address(virt_to_head_page(ptr)))
456 kasan_report_invalid_free(ptr, ip);
457 /* The object will be poisoned by page_alloc. */
460 int kasan_module_alloc(void *addr, size_t size)
465 unsigned long shadow_start;
467 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
468 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
469 shadow_size = round_up(scaled_size, PAGE_SIZE);
471 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
474 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
475 shadow_start + shadow_size,
476 GFP_KERNEL | __GFP_ZERO,
477 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
478 __builtin_return_address(0));
481 find_vm_area(addr)->flags |= VM_KASAN;
482 kmemleak_ignore(ret);
489 void kasan_free_shadow(const struct vm_struct *vm)
491 if (vm->flags & VM_KASAN)
492 vfree(kasan_mem_to_shadow(vm->addr));
495 #ifdef CONFIG_MEMORY_HOTPLUG
496 static bool shadow_mapped(unsigned long addr)
498 pgd_t *pgd = pgd_offset_k(addr);
506 p4d = p4d_offset(pgd, addr);
509 pud = pud_offset(p4d, addr);
514 * We can't use pud_large() or pud_huge(), the first one is
515 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
516 * pud_bad(), if pud is bad then it's bad because it's huge.
520 pmd = pmd_offset(pud, addr);
526 pte = pte_offset_kernel(pmd, addr);
527 return !pte_none(*pte);
530 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
531 unsigned long action, void *data)
533 struct memory_notify *mem_data = data;
534 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
535 unsigned long shadow_end, shadow_size;
537 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
538 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
539 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
540 shadow_size = nr_shadow_pages << PAGE_SHIFT;
541 shadow_end = shadow_start + shadow_size;
543 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
544 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
548 case MEM_GOING_ONLINE: {
552 * If shadow is mapped already than it must have been mapped
553 * during the boot. This could happen if we onlining previously
556 if (shadow_mapped(shadow_start))
559 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
560 shadow_end, GFP_KERNEL,
561 PAGE_KERNEL, VM_NO_GUARD,
562 pfn_to_nid(mem_data->start_pfn),
563 __builtin_return_address(0));
567 kmemleak_ignore(ret);
570 case MEM_CANCEL_ONLINE:
572 struct vm_struct *vm;
575 * shadow_start was either mapped during boot by kasan_init()
576 * or during memory online by __vmalloc_node_range().
577 * In the latter case we can use vfree() to free shadow.
578 * Non-NULL result of the find_vm_area() will tell us if
579 * that was the second case.
581 * Currently it's not possible to free shadow mapped
582 * during boot by kasan_init(). It's because the code
583 * to do that hasn't been written yet. So we'll just
586 vm = find_vm_area((void *)shadow_start);
588 vfree((void *)shadow_start);
595 static int __init kasan_memhotplug_init(void)
597 hotplug_memory_notifier(kasan_mem_notifier, 0);
602 core_initcall(kasan_memhotplug_init);