1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains core generic KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
12 #include <linux/export.h>
13 #include <linux/interrupt.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/linkage.h>
20 #include <linux/memblock.h>
21 #include <linux/memory.h>
23 #include <linux/module.h>
24 #include <linux/printk.h>
25 #include <linux/sched.h>
26 #include <linux/sched/task_stack.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/stackdepot.h>
30 #include <linux/stacktrace.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/vmalloc.h>
34 #include <linux/bug.h>
40 * All functions below always inlined so compiler could
41 * perform better optimizations in each of __asan_loadX/__assn_storeX
42 * depending on memory access size X.
45 static __always_inline bool memory_is_poisoned_1(const void *addr)
47 s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
49 if (unlikely(shadow_value)) {
50 s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
51 return unlikely(last_accessible_byte >= shadow_value);
57 static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
60 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
63 * Access crosses 8(shadow size)-byte boundary. Such access maps
64 * into 2 shadow bytes, so we need to check them both.
66 if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
67 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
69 return memory_is_poisoned_1(addr + size - 1);
72 static __always_inline bool memory_is_poisoned_16(const void *addr)
74 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
76 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
77 if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
78 return *shadow_addr || memory_is_poisoned_1(addr + 15);
83 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
88 return (unsigned long)start;
96 static __always_inline unsigned long memory_is_nonzero(const void *start,
101 unsigned int prefix = (unsigned long)start % 8;
103 if (end - start <= 16)
104 return bytes_is_nonzero(start, end - start);
108 ret = bytes_is_nonzero(start, prefix);
114 words = (end - start) / 8;
116 if (unlikely(*(u64 *)start))
117 return bytes_is_nonzero(start, 8);
122 return bytes_is_nonzero(start, (end - start) % 8);
125 static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
129 ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
130 kasan_mem_to_shadow(addr + size - 1) + 1);
133 const void *last_byte = addr + size - 1;
134 s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
135 s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
137 if (unlikely(ret != (unsigned long)last_shadow ||
138 last_accessible_byte >= *last_shadow))
144 static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
146 if (__builtin_constant_p(size)) {
149 return memory_is_poisoned_1(addr);
153 return memory_is_poisoned_2_4_8(addr, size);
155 return memory_is_poisoned_16(addr);
161 return memory_is_poisoned_n(addr, size);
164 static __always_inline bool check_region_inline(const void *addr,
165 size_t size, bool write,
166 unsigned long ret_ip)
168 if (!kasan_arch_is_ready())
171 if (unlikely(size == 0))
174 if (unlikely(addr + size < addr))
175 return !kasan_report(addr, size, write, ret_ip);
177 if (unlikely(!addr_has_metadata(addr)))
178 return !kasan_report(addr, size, write, ret_ip);
180 if (likely(!memory_is_poisoned(addr, size)))
183 return !kasan_report(addr, size, write, ret_ip);
186 bool kasan_check_range(const void *addr, size_t size, bool write,
187 unsigned long ret_ip)
189 return check_region_inline(addr, size, write, ret_ip);
192 bool kasan_byte_accessible(const void *addr)
196 if (!kasan_arch_is_ready())
199 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
201 return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
204 void kasan_cache_shrink(struct kmem_cache *cache)
206 kasan_quarantine_remove_cache(cache);
209 void kasan_cache_shutdown(struct kmem_cache *cache)
211 if (!__kmem_cache_empty(cache))
212 kasan_quarantine_remove_cache(cache);
215 static void register_global(struct kasan_global *global)
217 size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
219 kasan_unpoison(global->beg, global->size, false);
221 kasan_poison(global->beg + aligned_size,
222 global->size_with_redzone - aligned_size,
223 KASAN_GLOBAL_REDZONE, false);
226 void __asan_register_globals(void *ptr, ssize_t size)
229 struct kasan_global *globals = ptr;
231 for (i = 0; i < size; i++)
232 register_global(&globals[i]);
234 EXPORT_SYMBOL(__asan_register_globals);
236 void __asan_unregister_globals(void *ptr, ssize_t size)
239 EXPORT_SYMBOL(__asan_unregister_globals);
241 #define DEFINE_ASAN_LOAD_STORE(size) \
242 void __asan_load##size(void *addr) \
244 check_region_inline(addr, size, false, _RET_IP_); \
246 EXPORT_SYMBOL(__asan_load##size); \
247 __alias(__asan_load##size) \
248 void __asan_load##size##_noabort(void *); \
249 EXPORT_SYMBOL(__asan_load##size##_noabort); \
250 void __asan_store##size(void *addr) \
252 check_region_inline(addr, size, true, _RET_IP_); \
254 EXPORT_SYMBOL(__asan_store##size); \
255 __alias(__asan_store##size) \
256 void __asan_store##size##_noabort(void *); \
257 EXPORT_SYMBOL(__asan_store##size##_noabort)
259 DEFINE_ASAN_LOAD_STORE(1);
260 DEFINE_ASAN_LOAD_STORE(2);
261 DEFINE_ASAN_LOAD_STORE(4);
262 DEFINE_ASAN_LOAD_STORE(8);
263 DEFINE_ASAN_LOAD_STORE(16);
265 void __asan_loadN(void *addr, ssize_t size)
267 kasan_check_range(addr, size, false, _RET_IP_);
269 EXPORT_SYMBOL(__asan_loadN);
271 __alias(__asan_loadN)
272 void __asan_loadN_noabort(void *, ssize_t);
273 EXPORT_SYMBOL(__asan_loadN_noabort);
275 void __asan_storeN(void *addr, ssize_t size)
277 kasan_check_range(addr, size, true, _RET_IP_);
279 EXPORT_SYMBOL(__asan_storeN);
281 __alias(__asan_storeN)
282 void __asan_storeN_noabort(void *, ssize_t);
283 EXPORT_SYMBOL(__asan_storeN_noabort);
285 /* to shut up compiler complaints */
286 void __asan_handle_no_return(void) {}
287 EXPORT_SYMBOL(__asan_handle_no_return);
289 /* Emitted by compiler to poison alloca()ed objects. */
290 void __asan_alloca_poison(void *addr, ssize_t size)
292 size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
293 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
295 size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
297 const void *left_redzone = (const void *)(addr -
298 KASAN_ALLOCA_REDZONE_SIZE);
299 const void *right_redzone = (const void *)(addr + rounded_up_size);
301 WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
303 kasan_unpoison((const void *)(addr + rounded_down_size),
304 size - rounded_down_size, false);
305 kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
306 KASAN_ALLOCA_LEFT, false);
307 kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
308 KASAN_ALLOCA_RIGHT, false);
310 EXPORT_SYMBOL(__asan_alloca_poison);
312 /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
313 void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
315 if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
318 kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
320 EXPORT_SYMBOL(__asan_allocas_unpoison);
322 /* Emitted by the compiler to [un]poison local variables. */
323 #define DEFINE_ASAN_SET_SHADOW(byte) \
324 void __asan_set_shadow_##byte(const void *addr, ssize_t size) \
326 __memset((void *)addr, 0x##byte, size); \
328 EXPORT_SYMBOL(__asan_set_shadow_##byte)
330 DEFINE_ASAN_SET_SHADOW(00);
331 DEFINE_ASAN_SET_SHADOW(f1);
332 DEFINE_ASAN_SET_SHADOW(f2);
333 DEFINE_ASAN_SET_SHADOW(f3);
334 DEFINE_ASAN_SET_SHADOW(f5);
335 DEFINE_ASAN_SET_SHADOW(f8);
337 /* Only allow cache merging when no per-object metadata is present. */
338 slab_flags_t kasan_never_merge(void)
340 if (!kasan_requires_meta())
346 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
347 * For larger allocations larger redzones are used.
349 static inline unsigned int optimal_redzone(unsigned int object_size)
352 object_size <= 64 - 16 ? 16 :
353 object_size <= 128 - 32 ? 32 :
354 object_size <= 512 - 64 ? 64 :
355 object_size <= 4096 - 128 ? 128 :
356 object_size <= (1 << 14) - 256 ? 256 :
357 object_size <= (1 << 15) - 512 ? 512 :
358 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
361 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
364 unsigned int ok_size;
365 unsigned int optimal_size;
366 unsigned int rem_free_meta_size;
367 unsigned int orig_alloc_meta_offset;
369 if (!kasan_requires_meta())
373 * SLAB_KASAN is used to mark caches that are sanitized by KASAN
374 * and that thus have per-object metadata.
375 * Currently this flag is used in two places:
376 * 1. In slab_ksize() to account for per-object metadata when
377 * calculating the size of the accessible memory within the object.
378 * 2. In slab_common.c via kasan_never_merge() to prevent merging of
379 * caches with per-object metadata.
381 *flags |= SLAB_KASAN;
385 /* Add alloc meta into the redzone. */
386 cache->kasan_info.alloc_meta_offset = *size;
387 *size += sizeof(struct kasan_alloc_meta);
389 /* If alloc meta doesn't fit, don't add it. */
390 if (*size > KMALLOC_MAX_SIZE) {
391 cache->kasan_info.alloc_meta_offset = 0;
393 /* Continue, since free meta might still fit. */
397 orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
400 * Store free meta in the redzone when it's not possible to store
401 * it in the object. This is the case when:
402 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
403 * be touched after it was freed, or
404 * 2. Object has a constructor, which means it's expected to
405 * retain its content until the next allocation.
407 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
408 cache->kasan_info.free_meta_offset = *size;
409 *size += sizeof(struct kasan_free_meta);
410 goto free_meta_added;
414 * Otherwise, if the object is large enough to contain free meta,
415 * store it within the object.
417 if (sizeof(struct kasan_free_meta) <= cache->object_size) {
418 /* cache->kasan_info.free_meta_offset = 0 is implied. */
419 goto free_meta_added;
423 * For smaller objects, store the beginning of free meta within the
424 * object and the end in the redzone. And thus shift the location of
425 * alloc meta to free up space for free meta.
426 * This is only possible when slub_debug is disabled, as otherwise
427 * the end of free meta will overlap with slub_debug metadata.
429 if (!__slub_debug_enabled()) {
430 rem_free_meta_size = sizeof(struct kasan_free_meta) -
432 *size += rem_free_meta_size;
433 if (cache->kasan_info.alloc_meta_offset != 0)
434 cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
435 goto free_meta_added;
439 * If the object is small and slub_debug is enabled, store free meta
440 * in the redzone after alloc meta.
442 cache->kasan_info.free_meta_offset = *size;
443 *size += sizeof(struct kasan_free_meta);
446 /* If free meta doesn't fit, don't add it. */
447 if (*size > KMALLOC_MAX_SIZE) {
448 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
449 cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
453 /* Calculate size with optimal redzone. */
454 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
455 /* Limit it with KMALLOC_MAX_SIZE. */
456 if (optimal_size > KMALLOC_MAX_SIZE)
457 optimal_size = KMALLOC_MAX_SIZE;
458 /* Use optimal size if the size with added metas is not large enough. */
459 if (*size < optimal_size)
460 *size = optimal_size;
463 struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
466 if (!cache->kasan_info.alloc_meta_offset)
468 return (void *)object + cache->kasan_info.alloc_meta_offset;
471 struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
474 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
475 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
477 return (void *)object + cache->kasan_info.free_meta_offset;
480 void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
482 struct kasan_alloc_meta *alloc_meta;
484 alloc_meta = kasan_get_alloc_meta(cache, object);
486 /* Zero out alloc meta to mark it as invalid. */
487 __memset(alloc_meta, 0, sizeof(*alloc_meta));
490 * Prepare the lock for saving auxiliary stack traces.
491 * Temporarily disable KASAN bug reporting to allow instrumented
492 * raw_spin_lock_init to access aux_lock, which resides inside
495 kasan_disable_current();
496 raw_spin_lock_init(&alloc_meta->aux_lock);
497 kasan_enable_current();
501 * Explicitly marking free meta as invalid is not required: the shadow
502 * value for the first 8 bytes of a newly allocated object is not
503 * KASAN_SLAB_FREE_META.
507 static void release_alloc_meta(struct kasan_alloc_meta *meta)
509 /* Evict the stack traces from stack depot. */
510 stack_depot_put(meta->alloc_track.stack);
511 stack_depot_put(meta->aux_stack[0]);
512 stack_depot_put(meta->aux_stack[1]);
515 * Zero out alloc meta to mark it as invalid but keep aux_lock
516 * initialized to avoid having to reinitialize it when another object
517 * is allocated in the same slot.
519 __memset(&meta->alloc_track, 0, sizeof(meta->alloc_track));
520 __memset(meta->aux_stack, 0, sizeof(meta->aux_stack));
523 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
525 /* Check if free meta is valid. */
526 if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
529 /* Evict the stack trace from the stack depot. */
530 stack_depot_put(meta->free_track.stack);
532 /* Mark free meta as invalid. */
533 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
536 void kasan_release_object_meta(struct kmem_cache *cache, const void *object)
538 struct kasan_alloc_meta *alloc_meta;
539 struct kasan_free_meta *free_meta;
541 alloc_meta = kasan_get_alloc_meta(cache, object);
543 release_alloc_meta(alloc_meta);
545 free_meta = kasan_get_free_meta(cache, object);
547 release_free_meta(object, free_meta);
550 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
552 struct kasan_cache *info = &cache->kasan_info;
554 if (!kasan_requires_meta())
558 return (info->free_meta_offset ?
559 0 : sizeof(struct kasan_free_meta));
561 return (info->alloc_meta_offset ?
562 sizeof(struct kasan_alloc_meta) : 0) +
563 ((info->free_meta_offset &&
564 info->free_meta_offset != KASAN_NO_FREE_META) ?
565 sizeof(struct kasan_free_meta) : 0);
568 static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
570 struct slab *slab = kasan_addr_to_slab(addr);
571 struct kmem_cache *cache;
572 struct kasan_alloc_meta *alloc_meta;
574 depot_stack_handle_t new_handle, old_handle;
577 if (is_kfence_address(addr) || !slab)
580 cache = slab->slab_cache;
581 object = nearest_obj(cache, slab, addr);
582 alloc_meta = kasan_get_alloc_meta(cache, object);
586 new_handle = kasan_save_stack(0, depot_flags);
589 * Temporarily disable KASAN bug reporting to allow instrumented
590 * spinlock functions to access aux_lock, which resides inside of a
593 kasan_disable_current();
594 raw_spin_lock_irqsave(&alloc_meta->aux_lock, flags);
595 old_handle = alloc_meta->aux_stack[1];
596 alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
597 alloc_meta->aux_stack[0] = new_handle;
598 raw_spin_unlock_irqrestore(&alloc_meta->aux_lock, flags);
599 kasan_enable_current();
601 stack_depot_put(old_handle);
604 void kasan_record_aux_stack(void *addr)
606 return __kasan_record_aux_stack(addr,
607 STACK_DEPOT_FLAG_CAN_ALLOC | STACK_DEPOT_FLAG_GET);
610 void kasan_record_aux_stack_noalloc(void *addr)
612 return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_GET);
615 void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
617 struct kasan_alloc_meta *alloc_meta;
619 alloc_meta = kasan_get_alloc_meta(cache, object);
623 /* Evict previous stack traces (might exist for krealloc or mempool). */
624 release_alloc_meta(alloc_meta);
626 kasan_save_track(&alloc_meta->alloc_track, flags);
629 void kasan_save_free_info(struct kmem_cache *cache, void *object)
631 struct kasan_free_meta *free_meta;
633 free_meta = kasan_get_free_meta(cache, object);
637 /* Evict previous stack trace (might exist for mempool). */
638 release_free_meta(object, free_meta);
640 kasan_save_track(&free_meta->free_track, 0);
642 /* Mark free meta as valid. */
643 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META;