]>
Commit | Line | Data |
---|---|---|
e886bf9d | 1 | // SPDX-License-Identifier: GPL-2.0 |
0b24becc | 2 | /* |
2bd926b4 | 3 | * This file contains core generic KASAN code. |
0b24becc AR |
4 | * |
5 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | |
2baf9e89 | 6 | * Author: Andrey Ryabinin <[email protected]> |
0b24becc | 7 | * |
5d0926ef | 8 | * Some code borrowed from https://github.com/xairy/kasan-prototype by |
5f21f3a8 | 9 | * Andrey Konovalov <[email protected]> |
0b24becc AR |
10 | */ |
11 | ||
0b24becc | 12 | #include <linux/export.h> |
cd11016e | 13 | #include <linux/interrupt.h> |
0b24becc | 14 | #include <linux/init.h> |
cd11016e | 15 | #include <linux/kasan.h> |
0b24becc | 16 | #include <linux/kernel.h> |
2b830526 | 17 | #include <linux/kfence.h> |
45937254 | 18 | #include <linux/kmemleak.h> |
e3ae1163 | 19 | #include <linux/linkage.h> |
0b24becc | 20 | #include <linux/memblock.h> |
786a8959 | 21 | #include <linux/memory.h> |
0b24becc | 22 | #include <linux/mm.h> |
bebf56a1 | 23 | #include <linux/module.h> |
0b24becc AR |
24 | #include <linux/printk.h> |
25 | #include <linux/sched.h> | |
68db0cf1 | 26 | #include <linux/sched/task_stack.h> |
0b24becc | 27 | #include <linux/slab.h> |
a414d428 | 28 | #include <linux/spinlock.h> |
022012dc | 29 | #include <linux/stackdepot.h> |
0b24becc AR |
30 | #include <linux/stacktrace.h> |
31 | #include <linux/string.h> | |
32 | #include <linux/types.h> | |
a5af5aa8 | 33 | #include <linux/vmalloc.h> |
9f7d416c | 34 | #include <linux/bug.h> |
0b24becc AR |
35 | |
36 | #include "kasan.h" | |
0316bec2 | 37 | #include "../slab.h" |
0b24becc | 38 | |
0b24becc AR |
39 | /* |
40 | * All functions below always inlined so compiler could | |
41 | * perform better optimizations in each of __asan_loadX/__assn_storeX | |
42 | * depending on memory access size X. | |
43 | */ | |
44 | ||
bb6e04a1 | 45 | static __always_inline bool memory_is_poisoned_1(const void *addr) |
0b24becc | 46 | { |
bb6e04a1 | 47 | s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr); |
0b24becc AR |
48 | |
49 | if (unlikely(shadow_value)) { | |
bb6e04a1 | 50 | s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK; |
0b24becc AR |
51 | return unlikely(last_accessible_byte >= shadow_value); |
52 | } | |
53 | ||
54 | return false; | |
55 | } | |
56 | ||
bb6e04a1 | 57 | static __always_inline bool memory_is_poisoned_2_4_8(const void *addr, |
c634d807 | 58 | unsigned long size) |
0b24becc | 59 | { |
bb6e04a1 | 60 | u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr); |
0b24becc | 61 | |
c634d807 AR |
62 | /* |
63 | * Access crosses 8(shadow size)-byte boundary. Such access maps | |
64 | * into 2 shadow bytes, so we need to check them both. | |
65 | */ | |
bb6e04a1 | 66 | if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) |
c634d807 | 67 | return *shadow_addr || memory_is_poisoned_1(addr + size - 1); |
0b24becc | 68 | |
c634d807 | 69 | return memory_is_poisoned_1(addr + size - 1); |
0b24becc AR |
70 | } |
71 | ||
bb6e04a1 | 72 | static __always_inline bool memory_is_poisoned_16(const void *addr) |
0b24becc | 73 | { |
bb6e04a1 | 74 | u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr); |
0b24becc | 75 | |
c634d807 | 76 | /* Unaligned 16-bytes access maps into 3 shadow bytes. */ |
bb6e04a1 | 77 | if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE))) |
c634d807 | 78 | return *shadow_addr || memory_is_poisoned_1(addr + 15); |
0b24becc | 79 | |
c634d807 | 80 | return *shadow_addr; |
0b24becc AR |
81 | } |
82 | ||
f5bd62cd | 83 | static __always_inline unsigned long bytes_is_nonzero(const u8 *start, |
0b24becc AR |
84 | size_t size) |
85 | { | |
86 | while (size) { | |
87 | if (unlikely(*start)) | |
88 | return (unsigned long)start; | |
89 | start++; | |
90 | size--; | |
91 | } | |
92 | ||
93 | return 0; | |
94 | } | |
95 | ||
f5bd62cd | 96 | static __always_inline unsigned long memory_is_nonzero(const void *start, |
0b24becc AR |
97 | const void *end) |
98 | { | |
99 | unsigned int words; | |
100 | unsigned long ret; | |
101 | unsigned int prefix = (unsigned long)start % 8; | |
102 | ||
103 | if (end - start <= 16) | |
f5bd62cd | 104 | return bytes_is_nonzero(start, end - start); |
0b24becc AR |
105 | |
106 | if (prefix) { | |
107 | prefix = 8 - prefix; | |
f5bd62cd | 108 | ret = bytes_is_nonzero(start, prefix); |
0b24becc AR |
109 | if (unlikely(ret)) |
110 | return ret; | |
111 | start += prefix; | |
112 | } | |
113 | ||
114 | words = (end - start) / 8; | |
115 | while (words) { | |
116 | if (unlikely(*(u64 *)start)) | |
f5bd62cd | 117 | return bytes_is_nonzero(start, 8); |
0b24becc AR |
118 | start += 8; |
119 | words--; | |
120 | } | |
121 | ||
f5bd62cd | 122 | return bytes_is_nonzero(start, (end - start) % 8); |
0b24becc AR |
123 | } |
124 | ||
bb6e04a1 | 125 | static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size) |
0b24becc AR |
126 | { |
127 | unsigned long ret; | |
128 | ||
bb6e04a1 AB |
129 | ret = memory_is_nonzero(kasan_mem_to_shadow(addr), |
130 | kasan_mem_to_shadow(addr + size - 1) + 1); | |
0b24becc AR |
131 | |
132 | if (unlikely(ret)) { | |
bb6e04a1 AB |
133 | const void *last_byte = addr + size - 1; |
134 | s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte); | |
05c56e7b | 135 | s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK; |
0b24becc AR |
136 | |
137 | if (unlikely(ret != (unsigned long)last_shadow || | |
05c56e7b | 138 | last_accessible_byte >= *last_shadow)) |
0b24becc AR |
139 | return true; |
140 | } | |
141 | return false; | |
142 | } | |
143 | ||
bb6e04a1 | 144 | static __always_inline bool memory_is_poisoned(const void *addr, size_t size) |
0b24becc AR |
145 | { |
146 | if (__builtin_constant_p(size)) { | |
147 | switch (size) { | |
148 | case 1: | |
149 | return memory_is_poisoned_1(addr); | |
150 | case 2: | |
0b24becc | 151 | case 4: |
0b24becc | 152 | case 8: |
c634d807 | 153 | return memory_is_poisoned_2_4_8(addr, size); |
0b24becc AR |
154 | case 16: |
155 | return memory_is_poisoned_16(addr); | |
156 | default: | |
157 | BUILD_BUG(); | |
158 | } | |
159 | } | |
160 | ||
161 | return memory_is_poisoned_n(addr, size); | |
162 | } | |
163 | ||
bb6e04a1 | 164 | static __always_inline bool check_region_inline(const void *addr, |
936bb4bb AR |
165 | size_t size, bool write, |
166 | unsigned long ret_ip) | |
0b24becc | 167 | { |
af3751f3 DA |
168 | if (!kasan_arch_is_ready()) |
169 | return true; | |
170 | ||
0b24becc | 171 | if (unlikely(size == 0)) |
b5f6e0fc | 172 | return true; |
0b24becc | 173 | |
8cceeff4 WW |
174 | if (unlikely(addr + size < addr)) |
175 | return !kasan_report(addr, size, write, ret_ip); | |
176 | ||
bb6e04a1 | 177 | if (unlikely(!addr_has_metadata(addr))) |
8cceeff4 | 178 | return !kasan_report(addr, size, write, ret_ip); |
0b24becc AR |
179 | |
180 | if (likely(!memory_is_poisoned(addr, size))) | |
b5f6e0fc | 181 | return true; |
0b24becc | 182 | |
8cceeff4 | 183 | return !kasan_report(addr, size, write, ret_ip); |
0b24becc AR |
184 | } |
185 | ||
bb6e04a1 | 186 | bool kasan_check_range(const void *addr, size_t size, bool write, |
f00748bf | 187 | unsigned long ret_ip) |
936bb4bb | 188 | { |
f00748bf | 189 | return check_region_inline(addr, size, write, ret_ip); |
936bb4bb | 190 | } |
393f203f | 191 | |
611806b4 | 192 | bool kasan_byte_accessible(const void *addr) |
2cdbed63 | 193 | { |
55d77bae CL |
194 | s8 shadow_byte; |
195 | ||
196 | if (!kasan_arch_is_ready()) | |
197 | return true; | |
198 | ||
199 | shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr)); | |
2cdbed63 | 200 | |
611806b4 | 201 | return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE; |
2cdbed63 AK |
202 | } |
203 | ||
55834c59 AP |
204 | void kasan_cache_shrink(struct kmem_cache *cache) |
205 | { | |
f00748bf | 206 | kasan_quarantine_remove_cache(cache); |
55834c59 AP |
207 | } |
208 | ||
f9fa1d91 | 209 | void kasan_cache_shutdown(struct kmem_cache *cache) |
55834c59 | 210 | { |
f9e13c0a | 211 | if (!__kmem_cache_empty(cache)) |
f00748bf | 212 | kasan_quarantine_remove_cache(cache); |
55834c59 AP |
213 | } |
214 | ||
bebf56a1 AR |
215 | static void register_global(struct kasan_global *global) |
216 | { | |
1f600626 | 217 | size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE); |
bebf56a1 | 218 | |
aa5c219c | 219 | kasan_unpoison(global->beg, global->size, false); |
bebf56a1 | 220 | |
f00748bf | 221 | kasan_poison(global->beg + aligned_size, |
cebd0eb2 | 222 | global->size_with_redzone - aligned_size, |
aa5c219c | 223 | KASAN_GLOBAL_REDZONE, false); |
bebf56a1 AR |
224 | } |
225 | ||
bb6e04a1 | 226 | void __asan_register_globals(void *ptr, ssize_t size) |
bebf56a1 AR |
227 | { |
228 | int i; | |
bb6e04a1 | 229 | struct kasan_global *globals = ptr; |
bebf56a1 AR |
230 | |
231 | for (i = 0; i < size; i++) | |
232 | register_global(&globals[i]); | |
233 | } | |
234 | EXPORT_SYMBOL(__asan_register_globals); | |
235 | ||
bb6e04a1 | 236 | void __asan_unregister_globals(void *ptr, ssize_t size) |
bebf56a1 AR |
237 | { |
238 | } | |
239 | EXPORT_SYMBOL(__asan_unregister_globals); | |
240 | ||
936bb4bb | 241 | #define DEFINE_ASAN_LOAD_STORE(size) \ |
bb6e04a1 | 242 | void __asan_load##size(void *addr) \ |
936bb4bb | 243 | { \ |
f00748bf | 244 | check_region_inline(addr, size, false, _RET_IP_); \ |
936bb4bb AR |
245 | } \ |
246 | EXPORT_SYMBOL(__asan_load##size); \ | |
247 | __alias(__asan_load##size) \ | |
bb6e04a1 | 248 | void __asan_load##size##_noabort(void *); \ |
936bb4bb | 249 | EXPORT_SYMBOL(__asan_load##size##_noabort); \ |
bb6e04a1 | 250 | void __asan_store##size(void *addr) \ |
936bb4bb | 251 | { \ |
f00748bf | 252 | check_region_inline(addr, size, true, _RET_IP_); \ |
936bb4bb AR |
253 | } \ |
254 | EXPORT_SYMBOL(__asan_store##size); \ | |
255 | __alias(__asan_store##size) \ | |
bb6e04a1 | 256 | void __asan_store##size##_noabort(void *); \ |
0b24becc AR |
257 | EXPORT_SYMBOL(__asan_store##size##_noabort) |
258 | ||
259 | DEFINE_ASAN_LOAD_STORE(1); | |
260 | DEFINE_ASAN_LOAD_STORE(2); | |
261 | DEFINE_ASAN_LOAD_STORE(4); | |
262 | DEFINE_ASAN_LOAD_STORE(8); | |
263 | DEFINE_ASAN_LOAD_STORE(16); | |
264 | ||
bb6e04a1 | 265 | void __asan_loadN(void *addr, ssize_t size) |
0b24becc | 266 | { |
f00748bf | 267 | kasan_check_range(addr, size, false, _RET_IP_); |
0b24becc AR |
268 | } |
269 | EXPORT_SYMBOL(__asan_loadN); | |
270 | ||
271 | __alias(__asan_loadN) | |
bb6e04a1 | 272 | void __asan_loadN_noabort(void *, ssize_t); |
0b24becc AR |
273 | EXPORT_SYMBOL(__asan_loadN_noabort); |
274 | ||
bb6e04a1 | 275 | void __asan_storeN(void *addr, ssize_t size) |
0b24becc | 276 | { |
f00748bf | 277 | kasan_check_range(addr, size, true, _RET_IP_); |
0b24becc AR |
278 | } |
279 | EXPORT_SYMBOL(__asan_storeN); | |
280 | ||
281 | __alias(__asan_storeN) | |
bb6e04a1 | 282 | void __asan_storeN_noabort(void *, ssize_t); |
0b24becc AR |
283 | EXPORT_SYMBOL(__asan_storeN_noabort); |
284 | ||
285 | /* to shut up compiler complaints */ | |
286 | void __asan_handle_no_return(void) {} | |
287 | EXPORT_SYMBOL(__asan_handle_no_return); | |
786a8959 | 288 | |
342061ee | 289 | /* Emitted by compiler to poison alloca()ed objects. */ |
bb6e04a1 | 290 | void __asan_alloca_poison(void *addr, ssize_t size) |
342061ee | 291 | { |
1f600626 | 292 | size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE); |
342061ee PL |
293 | size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - |
294 | rounded_up_size; | |
1f600626 | 295 | size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE); |
342061ee PL |
296 | |
297 | const void *left_redzone = (const void *)(addr - | |
298 | KASAN_ALLOCA_REDZONE_SIZE); | |
299 | const void *right_redzone = (const void *)(addr + rounded_up_size); | |
300 | ||
bb6e04a1 | 301 | WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE)); |
342061ee | 302 | |
f00748bf | 303 | kasan_unpoison((const void *)(addr + rounded_down_size), |
aa5c219c | 304 | size - rounded_down_size, false); |
f00748bf | 305 | kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE, |
aa5c219c | 306 | KASAN_ALLOCA_LEFT, false); |
f00748bf | 307 | kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE, |
aa5c219c | 308 | KASAN_ALLOCA_RIGHT, false); |
342061ee PL |
309 | } |
310 | EXPORT_SYMBOL(__asan_alloca_poison); | |
311 | ||
312 | /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ | |
bb6e04a1 | 313 | void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom) |
342061ee | 314 | { |
bb6e04a1 | 315 | if (unlikely(!stack_top || stack_top > (void *)stack_bottom)) |
342061ee PL |
316 | return; |
317 | ||
bb6e04a1 | 318 | kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false); |
342061ee PL |
319 | } |
320 | EXPORT_SYMBOL(__asan_allocas_unpoison); | |
321 | ||
d321599c AP |
322 | /* Emitted by the compiler to [un]poison local variables. */ |
323 | #define DEFINE_ASAN_SET_SHADOW(byte) \ | |
bb6e04a1 | 324 | void __asan_set_shadow_##byte(const void *addr, ssize_t size) \ |
d321599c AP |
325 | { \ |
326 | __memset((void *)addr, 0x##byte, size); \ | |
327 | } \ | |
328 | EXPORT_SYMBOL(__asan_set_shadow_##byte) | |
329 | ||
330 | DEFINE_ASAN_SET_SHADOW(00); | |
331 | DEFINE_ASAN_SET_SHADOW(f1); | |
332 | DEFINE_ASAN_SET_SHADOW(f2); | |
333 | DEFINE_ASAN_SET_SHADOW(f3); | |
334 | DEFINE_ASAN_SET_SHADOW(f5); | |
335 | DEFINE_ASAN_SET_SHADOW(f8); | |
26e760c9 | 336 | |
5935143d AK |
337 | /* |
338 | * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. | |
339 | * For larger allocations larger redzones are used. | |
340 | */ | |
341 | static inline unsigned int optimal_redzone(unsigned int object_size) | |
342 | { | |
343 | return | |
344 | object_size <= 64 - 16 ? 16 : | |
345 | object_size <= 128 - 32 ? 32 : | |
346 | object_size <= 512 - 64 ? 64 : | |
347 | object_size <= 4096 - 128 ? 128 : | |
348 | object_size <= (1 << 14) - 256 ? 256 : | |
349 | object_size <= (1 << 15) - 512 ? 512 : | |
350 | object_size <= (1 << 16) - 1024 ? 1024 : 2048; | |
351 | } | |
352 | ||
682ed089 AK |
353 | void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, |
354 | slab_flags_t *flags) | |
5935143d AK |
355 | { |
356 | unsigned int ok_size; | |
357 | unsigned int optimal_size; | |
a5989d4e JD |
358 | unsigned int rem_free_meta_size; |
359 | unsigned int orig_alloc_meta_offset; | |
5935143d | 360 | |
682ed089 AK |
361 | if (!kasan_requires_meta()) |
362 | return; | |
363 | ||
364 | /* | |
96d8dbb6 VB |
365 | * SLAB_KASAN is used to mark caches that are sanitized by KASAN and |
366 | * that thus have per-object metadata. Currently, this flag is used in | |
367 | * slab_ksize() to account for per-object metadata when calculating the | |
368 | * size of the accessible memory within the object. Additionally, we use | |
369 | * SLAB_NO_MERGE to prevent merging of caches with per-object metadata. | |
682ed089 | 370 | */ |
96d8dbb6 | 371 | *flags |= SLAB_KASAN | SLAB_NO_MERGE; |
682ed089 | 372 | |
5935143d AK |
373 | ok_size = *size; |
374 | ||
f6940e8a | 375 | /* Add alloc meta into the redzone. */ |
5935143d AK |
376 | cache->kasan_info.alloc_meta_offset = *size; |
377 | *size += sizeof(struct kasan_alloc_meta); | |
378 | ||
f6940e8a | 379 | /* If alloc meta doesn't fit, don't add it. */ |
5935143d AK |
380 | if (*size > KMALLOC_MAX_SIZE) { |
381 | cache->kasan_info.alloc_meta_offset = 0; | |
382 | *size = ok_size; | |
383 | /* Continue, since free meta might still fit. */ | |
384 | } | |
385 | ||
a5989d4e JD |
386 | ok_size = *size; |
387 | orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset; | |
388 | ||
5935143d | 389 | /* |
f6940e8a | 390 | * Store free meta in the redzone when it's not possible to store |
5935143d AK |
391 | * it in the object. This is the case when: |
392 | * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can | |
393 | * be touched after it was freed, or | |
394 | * 2. Object has a constructor, which means it's expected to | |
f6940e8a | 395 | * retain its content until the next allocation. |
5935143d | 396 | */ |
a5989d4e | 397 | if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) { |
5935143d AK |
398 | cache->kasan_info.free_meta_offset = *size; |
399 | *size += sizeof(struct kasan_free_meta); | |
f6940e8a AK |
400 | goto free_meta_added; |
401 | } | |
402 | ||
403 | /* | |
404 | * Otherwise, if the object is large enough to contain free meta, | |
405 | * store it within the object. | |
406 | */ | |
407 | if (sizeof(struct kasan_free_meta) <= cache->object_size) { | |
408 | /* cache->kasan_info.free_meta_offset = 0 is implied. */ | |
409 | goto free_meta_added; | |
5935143d AK |
410 | } |
411 | ||
f6940e8a AK |
412 | /* |
413 | * For smaller objects, store the beginning of free meta within the | |
414 | * object and the end in the redzone. And thus shift the location of | |
415 | * alloc meta to free up space for free meta. | |
416 | * This is only possible when slub_debug is disabled, as otherwise | |
417 | * the end of free meta will overlap with slub_debug metadata. | |
418 | */ | |
419 | if (!__slub_debug_enabled()) { | |
420 | rem_free_meta_size = sizeof(struct kasan_free_meta) - | |
421 | cache->object_size; | |
422 | *size += rem_free_meta_size; | |
423 | if (cache->kasan_info.alloc_meta_offset != 0) | |
424 | cache->kasan_info.alloc_meta_offset += rem_free_meta_size; | |
425 | goto free_meta_added; | |
426 | } | |
427 | ||
428 | /* | |
429 | * If the object is small and slub_debug is enabled, store free meta | |
430 | * in the redzone after alloc meta. | |
431 | */ | |
432 | cache->kasan_info.free_meta_offset = *size; | |
433 | *size += sizeof(struct kasan_free_meta); | |
434 | ||
435 | free_meta_added: | |
a5989d4e JD |
436 | /* If free meta doesn't fit, don't add it. */ |
437 | if (*size > KMALLOC_MAX_SIZE) { | |
438 | cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META; | |
439 | cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset; | |
440 | *size = ok_size; | |
441 | } | |
442 | ||
5935143d AK |
443 | /* Calculate size with optimal redzone. */ |
444 | optimal_size = cache->object_size + optimal_redzone(cache->object_size); | |
f6940e8a | 445 | /* Limit it with KMALLOC_MAX_SIZE. */ |
5935143d AK |
446 | if (optimal_size > KMALLOC_MAX_SIZE) |
447 | optimal_size = KMALLOC_MAX_SIZE; | |
448 | /* Use optimal size if the size with added metas is not large enough. */ | |
449 | if (*size < optimal_size) | |
450 | *size = optimal_size; | |
451 | } | |
452 | ||
2f356801 AK |
453 | struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache, |
454 | const void *object) | |
455 | { | |
456 | if (!cache->kasan_info.alloc_meta_offset) | |
457 | return NULL; | |
458 | return (void *)object + cache->kasan_info.alloc_meta_offset; | |
459 | } | |
460 | ||
461 | struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache, | |
462 | const void *object) | |
463 | { | |
464 | BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32); | |
465 | if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META) | |
466 | return NULL; | |
467 | return (void *)object + cache->kasan_info.free_meta_offset; | |
468 | } | |
469 | ||
836daba0 AK |
470 | void kasan_init_object_meta(struct kmem_cache *cache, const void *object) |
471 | { | |
472 | struct kasan_alloc_meta *alloc_meta; | |
473 | ||
474 | alloc_meta = kasan_get_alloc_meta(cache, object); | |
a414d428 | 475 | if (alloc_meta) { |
63b85ac5 | 476 | /* Zero out alloc meta to mark it as invalid. */ |
836daba0 | 477 | __memset(alloc_meta, 0, sizeof(*alloc_meta)); |
a414d428 | 478 | } |
63b85ac5 AK |
479 | |
480 | /* | |
481 | * Explicitly marking free meta as invalid is not required: the shadow | |
482 | * value for the first 8 bytes of a newly allocated object is not | |
483 | * KASAN_SLAB_FREE_META. | |
484 | */ | |
485 | } | |
486 | ||
487 | static void release_alloc_meta(struct kasan_alloc_meta *meta) | |
488 | { | |
711d3491 ME |
489 | /* Zero out alloc meta to mark it as invalid. */ |
490 | __memset(meta, 0, sizeof(*meta)); | |
63b85ac5 AK |
491 | } |
492 | ||
493 | static void release_free_meta(const void *object, struct kasan_free_meta *meta) | |
494 | { | |
2597c994 BG |
495 | if (!kasan_arch_is_ready()) |
496 | return; | |
497 | ||
63b85ac5 AK |
498 | /* Check if free meta is valid. */ |
499 | if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META) | |
500 | return; | |
501 | ||
63b85ac5 AK |
502 | /* Mark free meta as invalid. */ |
503 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE; | |
504 | } | |
505 | ||
5d1ba310 | 506 | size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) |
f372bde9 | 507 | { |
5d1ba310 FT |
508 | struct kasan_cache *info = &cache->kasan_info; |
509 | ||
f372bde9 AK |
510 | if (!kasan_requires_meta()) |
511 | return 0; | |
5d1ba310 FT |
512 | |
513 | if (in_object) | |
514 | return (info->free_meta_offset ? | |
515 | 0 : sizeof(struct kasan_free_meta)); | |
516 | else | |
517 | return (info->alloc_meta_offset ? | |
518 | sizeof(struct kasan_alloc_meta) : 0) + | |
519 | ((info->free_meta_offset && | |
520 | info->free_meta_offset != KASAN_NO_FREE_META) ? | |
521 | sizeof(struct kasan_free_meta) : 0); | |
f372bde9 AK |
522 | } |
523 | ||
022012dc | 524 | static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) |
26e760c9 | 525 | { |
6e48a966 | 526 | struct slab *slab = kasan_addr_to_slab(addr); |
26e760c9 | 527 | struct kmem_cache *cache; |
6476792f | 528 | struct kasan_alloc_meta *alloc_meta; |
26e760c9 WW |
529 | void *object; |
530 | ||
6e48a966 | 531 | if (is_kfence_address(addr) || !slab) |
26e760c9 WW |
532 | return; |
533 | ||
6e48a966 MWO |
534 | cache = slab->slab_cache; |
535 | object = nearest_obj(cache, slab, addr); | |
6476792f | 536 | alloc_meta = kasan_get_alloc_meta(cache, object); |
13384f61 WW |
537 | if (!alloc_meta) |
538 | return; | |
26e760c9 | 539 | |
6476792f | 540 | alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; |
711d3491 | 541 | alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); |
7cb3007c ME |
542 | } |
543 | ||
544 | void kasan_record_aux_stack(void *addr) | |
545 | { | |
711d3491 | 546 | return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC); |
7cb3007c ME |
547 | } |
548 | ||
549 | void kasan_record_aux_stack_noalloc(void *addr) | |
550 | { | |
711d3491 | 551 | return __kasan_record_aux_stack(addr, 0); |
26e760c9 | 552 | } |
e4b7818b | 553 | |
ccf643e6 AK |
554 | void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) |
555 | { | |
556 | struct kasan_alloc_meta *alloc_meta; | |
557 | ||
558 | alloc_meta = kasan_get_alloc_meta(cache, object); | |
773688a6 AK |
559 | if (!alloc_meta) |
560 | return; | |
561 | ||
711d3491 | 562 | /* Invalidate previous stack traces (might exist for krealloc or mempool). */ |
63b85ac5 | 563 | release_alloc_meta(alloc_meta); |
773688a6 | 564 | |
fd4064f6 | 565 | kasan_save_track(&alloc_meta->alloc_track, flags); |
ccf643e6 AK |
566 | } |
567 | ||
6b074349 | 568 | void kasan_save_free_info(struct kmem_cache *cache, void *object) |
e4b7818b WW |
569 | { |
570 | struct kasan_free_meta *free_meta; | |
571 | ||
6476792f | 572 | free_meta = kasan_get_free_meta(cache, object); |
97593cad AK |
573 | if (!free_meta) |
574 | return; | |
e4b7818b | 575 | |
711d3491 | 576 | /* Invalidate previous stack trace (might exist for mempool). */ |
63b85ac5 AK |
577 | release_free_meta(object, free_meta); |
578 | ||
fd4064f6 | 579 | kasan_save_track(&free_meta->free_track, 0); |
63b85ac5 AK |
580 | |
581 | /* Mark free meta as valid. */ | |
582 | *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META; | |
e4b7818b | 583 | } |