]> Git Repo - linux.git/blame - mm/kasan/common.c
Linux 6.14-rc3
[linux.git] / mm / kasan / common.c
CommitLineData
e886bf9d 1// SPDX-License-Identifier: GPL-2.0
bffa986c 2/*
bb359dbc 3 * This file contains common KASAN code.
bffa986c
AK
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <[email protected]>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <[email protected]>
bffa986c
AK
10 */
11
12#include <linux/export.h>
bffa986c
AK
13#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
bffa986c
AK
16#include <linux/linkage.h>
17#include <linux/memblock.h>
18#include <linux/memory.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/printk.h>
22#include <linux/sched.h>
5d4c6ac9 23#include <linux/sched/clock.h>
bffa986c
AK
24#include <linux/sched/task_stack.h>
25#include <linux/slab.h>
022012dc 26#include <linux/stackdepot.h>
bffa986c
AK
27#include <linux/stacktrace.h>
28#include <linux/string.h>
29#include <linux/types.h>
bffa986c
AK
30#include <linux/bug.h>
31
32#include "kasan.h"
33#include "../slab.h"
34
0f282f15
AK
35struct slab *kasan_addr_to_slab(const void *addr)
36{
37 if (virt_addr_valid(addr))
38 return virt_to_slab(addr);
39 return NULL;
40}
41
022012dc 42depot_stack_handle_t kasan_save_stack(gfp_t flags, depot_flags_t depot_flags)
bffa986c
AK
43{
44 unsigned long entries[KASAN_STACK_DEPTH];
880e049c 45 unsigned int nr_entries;
bffa986c 46
880e049c 47 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
022012dc 48 return stack_depot_save_flags(entries, nr_entries, flags, depot_flags);
bffa986c
AK
49}
50
fd4064f6 51void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack)
bffa986c 52{
5d4c6ac9
JD
53#ifdef CONFIG_KASAN_EXTRA_INFO
54 u32 cpu = raw_smp_processor_id();
55 u64 ts_nsec = local_clock();
56
57 track->cpu = cpu;
952237b5 58 track->timestamp = ts_nsec >> 9;
5d4c6ac9 59#endif /* CONFIG_KASAN_EXTRA_INFO */
bffa986c 60 track->pid = current->pid;
fd4064f6
AK
61 track->stack = stack;
62}
63
64void kasan_save_track(struct kasan_track *track, gfp_t flags)
65{
66 depot_stack_handle_t stack;
67
711d3491 68 stack = kasan_save_stack(flags, STACK_DEPOT_FLAG_CAN_ALLOC);
fd4064f6 69 kasan_set_track(track, stack);
bffa986c
AK
70}
71
d73b4936 72#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
bffa986c
AK
73void kasan_enable_current(void)
74{
75 current->kasan_depth++;
76}
1f9f78b1 77EXPORT_SYMBOL(kasan_enable_current);
bffa986c
AK
78
79void kasan_disable_current(void)
80{
81 current->kasan_depth--;
82}
1f9f78b1
OG
83EXPORT_SYMBOL(kasan_disable_current);
84
d73b4936 85#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
bffa986c 86
34303244 87void __kasan_unpoison_range(const void *address, size_t size)
cebd0eb2 88{
99f3fe41
AK
89 if (is_kfence_address(address))
90 return;
91
aa5c219c 92 kasan_unpoison(address, size, false);
cebd0eb2
AK
93}
94
02c58773 95#ifdef CONFIG_KASAN_STACK
bffa986c
AK
96/* Unpoison the entire stack for a task. */
97void kasan_unpoison_task_stack(struct task_struct *task)
98{
77f57c98
AK
99 void *base = task_stack_page(task);
100
aa5c219c 101 kasan_unpoison(base, THREAD_SIZE, false);
bffa986c
AK
102}
103
104/* Unpoison the stack for the current task beyond a watermark sp value. */
105asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
106{
107 /*
108 * Calculate the task stack base address. Avoid using 'current'
109 * because this function is called by early resume code which hasn't
110 * yet set up the percpu register (%gs).
111 */
112 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
113
aa5c219c 114 kasan_unpoison(base, watermark - base, false);
bffa986c 115}
d56a9ef8 116#endif /* CONFIG_KASAN_STACK */
bffa986c 117
44383cef 118bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
bffa986c 119{
2813b9c0
AK
120 u8 tag;
121 unsigned long i;
122
7f94ffbc 123 if (unlikely(PageHighMem(page)))
44383cef
AK
124 return false;
125
126 if (!kasan_sample_page_alloc(order))
127 return false;
2813b9c0 128
f00748bf 129 tag = kasan_random_tag();
ed0a6d1d
CM
130 kasan_unpoison(set_tag(page_address(page), tag),
131 PAGE_SIZE << order, init);
2813b9c0
AK
132 for (i = 0; i < (1 << order); i++)
133 page_kasan_tag_set(page + i, tag);
44383cef
AK
134
135 return true;
bffa986c
AK
136}
137
7a3b8353 138void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
bffa986c
AK
139{
140 if (likely(!PageHighMem(page)))
f00748bf 141 kasan_poison(page_address(page), PAGE_SIZE << order,
06bc4cf6 142 KASAN_PAGE_FREE, init);
bffa986c
AK
143}
144
6e48a966 145void __kasan_poison_slab(struct slab *slab)
bffa986c 146{
6e48a966 147 struct page *page = slab_page(slab);
2813b9c0
AK
148 unsigned long i;
149
d8c6546b 150 for (i = 0; i < compound_nr(page); i++)
2813b9c0 151 page_kasan_tag_reset(page + i);
f00748bf 152 kasan_poison(page_address(page), page_size(page),
06bc4cf6 153 KASAN_SLAB_REDZONE, false);
bffa986c
AK
154}
155
1ce9a052 156void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object)
bffa986c 157{
aa5c219c 158 kasan_unpoison(object, cache->object_size, false);
bffa986c
AK
159}
160
1ce9a052 161void __kasan_poison_new_object(struct kmem_cache *cache, void *object)
bffa986c 162{
cde8a7eb 163 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
06bc4cf6 164 KASAN_SLAB_REDZONE, false);
bffa986c
AK
165}
166
7f94ffbc 167/*
a3fe7cdf
AK
168 * This function assigns a tag to an object considering the following:
169 * 1. A cache might have a constructor, which might save a pointer to a slab
170 * object somewhere (e.g. in the object itself). We preassign a tag for
171 * each object in caches with constructors during slab creation and reuse
172 * the same tag each time a particular object is allocated.
173 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
174 * accessed after being freed. We preassign tags for objects in these
175 * caches as well.
7f94ffbc 176 */
c80a0366
AK
177static inline u8 assign_tag(struct kmem_cache *cache,
178 const void *object, bool init)
7f94ffbc 179{
1ef3133b
AK
180 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
181 return 0xff;
182
a3fe7cdf
AK
183 /*
184 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
185 * set, assign a tag when the object is being allocated (init == false).
186 */
7f94ffbc 187 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
f00748bf 188 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
7f94ffbc 189
a3fe7cdf 190 /*
72786c0a
VB
191 * For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU,
192 * assign a random tag during slab creation, otherwise reuse
a3fe7cdf
AK
193 * the already assigned tag.
194 */
f00748bf 195 return init ? kasan_random_tag() : get_tag(object);
7f94ffbc
AK
196}
197
34303244 198void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
66afc7f1 199 const void *object)
bffa986c 200{
836daba0 201 /* Initialize per-object metadata if it is present. */
284f8590 202 if (kasan_requires_meta())
836daba0 203 kasan_init_object_meta(cache, object);
bffa986c 204
1ef3133b 205 /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
e2db1a9a 206 object = set_tag(object, assign_tag(cache, object, true));
7f94ffbc 207
bffa986c
AK
208 return (void *)object;
209}
210
b3c34245
JH
211/* Returns true when freeing the object is not safe. */
212static bool check_slab_allocation(struct kmem_cache *cache, void *object,
213 unsigned long ip)
bffa986c 214{
b3c34245 215 void *tagged_object = object;
af3751f3 216
c0054c56 217 object = kasan_reset_tag(object);
7f94ffbc 218
b556a462 219 if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
3de0de75 220 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
bffa986c
AK
221 return true;
222 }
223
611806b4 224 if (!kasan_byte_accessible(tagged_object)) {
3de0de75 225 kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
bffa986c
AK
226 return true;
227 }
228
b3c34245
JH
229 return false;
230}
231
232static inline void poison_slab_object(struct kmem_cache *cache, void *object,
b8c8ba73 233 bool init, bool still_accessible)
b3c34245
JH
234{
235 void *tagged_object = object;
236
237 object = kasan_reset_tag(object);
238
239 /* RCU slabs could be legally used after free within the RCU period. */
b8c8ba73 240 if (unlikely(still_accessible))
b3c34245
JH
241 return;
242
cde8a7eb 243 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
06bc4cf6 244 KASAN_SLAB_FREE, init);
bffa986c 245
df54b383 246 if (kasan_stack_collection_enabled())
6b074349 247 kasan_save_free_info(cache, tagged_object);
b3c34245 248}
ae8f06b3 249
b3c34245
JH
250bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
251 unsigned long ip)
252{
253 if (!kasan_arch_is_ready() || is_kfence_address(object))
254 return false;
255 return check_slab_allocation(cache, object, ip);
bffa986c
AK
256}
257
b8c8ba73
JH
258bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
259 bool still_accessible)
bffa986c 260{
b3c34245 261 if (!kasan_arch_is_ready() || is_kfence_address(object))
99f3fe41
AK
262 return false;
263
b8c8ba73 264 poison_slab_object(cache, object, init, still_accessible);
63b85ac5
AK
265
266 /*
267 * If the object is put into quarantine, do not let slab put the object
268 * onto the freelist for now. The object's metadata is kept until the
269 * object gets evicted from quarantine.
270 */
271 if (kasan_quarantine_put(cache, object))
272 return true;
273
274 /*
711d3491
ME
275 * Note: Keep per-object metadata to allow KASAN print stack traces for
276 * use-after-free-before-realloc bugs.
63b85ac5 277 */
b556a462 278
63b85ac5
AK
279 /* Let slab put the object onto the freelist. */
280 return false;
bffa986c
AK
281}
282
2e7c954c 283static inline bool check_page_allocation(void *ptr, unsigned long ip)
200072ce 284{
55d77bae
CL
285 if (!kasan_arch_is_ready())
286 return false;
287
200072ce 288 if (ptr != page_address(virt_to_head_page(ptr))) {
3de0de75 289 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
200072ce
AK
290 return true;
291 }
292
293 if (!kasan_byte_accessible(ptr)) {
3de0de75 294 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
200072ce
AK
295 return true;
296 }
297
200072ce
AK
298 return false;
299}
300
301void __kasan_kfree_large(void *ptr, unsigned long ip)
302{
2e7c954c
AK
303 check_page_allocation(ptr, ip);
304
305 /* The object will be poisoned by kasan_poison_pages(). */
200072ce
AK
306}
307
29d7355a
AK
308static inline void unpoison_slab_object(struct kmem_cache *cache, void *object,
309 gfp_t flags, bool init)
eeb3160c 310{
eeb3160c 311 /*
29d7355a
AK
312 * Unpoison the whole object. For kmalloc() allocations,
313 * poison_kmalloc_redzone() will do precise poisoning.
eeb3160c 314 */
29d7355a 315 kasan_unpoison(object, cache->object_size, init);
6e48a966 316
29d7355a
AK
317 /* Save alloc info (if possible) for non-kmalloc() allocations. */
318 if (kasan_stack_collection_enabled() && !is_kmalloc_cache(cache))
319 kasan_save_alloc_info(cache, object, flags);
eeb3160c
AK
320}
321
e2db1a9a 322void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
da844b78 323 void *object, gfp_t flags, bool init)
e2db1a9a
AK
324{
325 u8 tag;
326 void *tagged_object;
327
328 if (gfpflags_allow_blocking(flags))
329 kasan_quarantine_reduce();
330
331 if (unlikely(object == NULL))
332 return NULL;
333
334 if (is_kfence_address(object))
335 return (void *)object;
336
337 /*
338 * Generate and assign random tag for tag-based modes.
339 * Tag is ignored in set_tag() for the generic mode.
340 */
341 tag = assign_tag(cache, object, false);
342 tagged_object = set_tag(object, tag);
343
29d7355a
AK
344 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
345 unpoison_slab_object(cache, tagged_object, flags, init);
e2db1a9a
AK
346
347 return tagged_object;
348}
349
ce37eec0 350static inline void poison_kmalloc_redzone(struct kmem_cache *cache,
c80a0366 351 const void *object, size_t size, gfp_t flags)
bffa986c
AK
352{
353 unsigned long redzone_start;
354 unsigned long redzone_end;
355
e2db1a9a
AK
356 /*
357 * The redzone has byte-level precision for the generic mode.
358 * Partially poison the last object granule to cover the unaligned
359 * part of the redzone.
360 */
361 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
362 kasan_poison_last_granule((void *)object, size);
363
364 /* Poison the aligned part of the redzone. */
bffa986c 365 redzone_start = round_up((unsigned long)(object + size),
1f600626 366 KASAN_GRANULE_SIZE);
cde8a7eb
AK
367 redzone_end = round_up((unsigned long)(object + cache->object_size),
368 KASAN_GRANULE_SIZE);
f00748bf 369 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
06bc4cf6 370 KASAN_SLAB_REDZONE, false);
bffa986c 371
e2db1a9a
AK
372 /*
373 * Save alloc info (if possible) for kmalloc() allocations.
374 * This also rewrites the alloc info when called from kasan_krealloc().
375 */
bbc61844 376 if (kasan_stack_collection_enabled() && is_kmalloc_cache(cache))
ccf643e6 377 kasan_save_alloc_info(cache, (void *)object, flags);
bffa986c 378
e1db95be
AK
379}
380
34303244
AK
381void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
382 size_t size, gfp_t flags)
a3fe7cdf 383{
ce37eec0
AK
384 if (gfpflags_allow_blocking(flags))
385 kasan_quarantine_reduce();
386
387 if (unlikely(object == NULL))
388 return NULL;
389
99f3fe41 390 if (is_kfence_address(object))
ce37eec0
AK
391 return (void *)object;
392
393 /* The object has already been unpoisoned by kasan_slab_alloc(). */
394 poison_kmalloc_redzone(cache, object, size, flags);
395
396 /* Keep the tag that was set by kasan_slab_alloc(). */
397 return (void *)object;
a3fe7cdf 398}
34303244 399EXPORT_SYMBOL(__kasan_kmalloc);
bffa986c 400
0cc9fdbf 401static inline void poison_kmalloc_large_redzone(const void *ptr, size_t size,
66afc7f1 402 gfp_t flags)
bffa986c 403{
bffa986c
AK
404 unsigned long redzone_start;
405 unsigned long redzone_end;
406
43a219cb
AK
407 /*
408 * The redzone has byte-level precision for the generic mode.
409 * Partially poison the last object granule to cover the unaligned
410 * part of the redzone.
411 */
412 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
413 kasan_poison_last_granule(ptr, size);
414
415 /* Poison the aligned part of the redzone. */
0cc9fdbf 416 redzone_start = round_up((unsigned long)(ptr + size), KASAN_GRANULE_SIZE);
43a219cb 417 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
f00748bf 418 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
aa5c219c 419 KASAN_PAGE_REDZONE, false);
0cc9fdbf 420}
bffa986c 421
0cc9fdbf
AK
422void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
423 gfp_t flags)
424{
425 if (gfpflags_allow_blocking(flags))
426 kasan_quarantine_reduce();
427
428 if (unlikely(ptr == NULL))
429 return NULL;
bffa986c 430
0cc9fdbf
AK
431 /* The object has already been unpoisoned by kasan_unpoison_pages(). */
432 poison_kmalloc_large_redzone(ptr, size, flags);
433
434 /* Keep the tag that was set by alloc_pages(). */
bffa986c
AK
435 return (void *)ptr;
436}
437
34303244 438void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
bffa986c 439{
6e48a966 440 struct slab *slab;
bffa986c 441
0cc9fdbf
AK
442 if (gfpflags_allow_blocking(flags))
443 kasan_quarantine_reduce();
444
bffa986c
AK
445 if (unlikely(object == ZERO_SIZE_PTR))
446 return (void *)object;
447
99f3fe41 448 if (is_kfence_address(object))
ce37eec0
AK
449 return (void *)object;
450
d12d9ad8
AK
451 /*
452 * Unpoison the object's data.
453 * Part of it might already have been unpoisoned, but it's unknown
454 * how big that part is.
455 */
aa5c219c 456 kasan_unpoison(object, size, false);
d12d9ad8 457
6e48a966 458 slab = virt_to_slab(object);
bffa986c 459
d12d9ad8 460 /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
6e48a966 461 if (unlikely(!slab))
0cc9fdbf 462 poison_kmalloc_large_redzone(object, size, flags);
bffa986c 463 else
ce37eec0 464 poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
0cc9fdbf
AK
465
466 return (void *)object;
bffa986c
AK
467}
468
f129c310
AK
469bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
470 unsigned long ip)
471{
472 unsigned long *ptr;
473
474 if (unlikely(PageHighMem(page)))
475 return true;
476
477 /* Bail out if allocation was excluded due to sampling. */
478 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
479 page_kasan_tag(page) == KASAN_TAG_KERNEL)
480 return true;
481
482 ptr = page_address(page);
483
484 if (check_page_allocation(ptr, ip))
485 return false;
486
487 kasan_poison(ptr, PAGE_SIZE << order, KASAN_PAGE_FREE, false);
488
489 return true;
490}
491
9f41c59a
AK
492void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
493 unsigned long ip)
494{
495 __kasan_unpoison_pages(page, order, false);
496}
497
2e7c954c 498bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
9b94fe91 499{
cf0da2af
AK
500 struct folio *folio = virt_to_folio(ptr);
501 struct slab *slab;
9b94fe91
AK
502
503 /*
cf0da2af
AK
504 * This function can be called for large kmalloc allocation that get
505 * their memory from page_alloc. Thus, the folio might not be a slab.
9b94fe91
AK
506 */
507 if (unlikely(!folio_test_slab(folio))) {
2e7c954c
AK
508 if (check_page_allocation(ptr, ip))
509 return false;
9b94fe91 510 kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
2e7c954c 511 return true;
9b94fe91 512 }
cf0da2af 513
b3c34245
JH
514 if (is_kfence_address(ptr) || !kasan_arch_is_ready())
515 return true;
99f3fe41 516
cf0da2af 517 slab = folio_slab(folio);
b3c34245
JH
518
519 if (check_slab_allocation(slab->slab_cache, ptr, ip))
520 return false;
521
b8c8ba73 522 poison_slab_object(slab->slab_cache, ptr, false, false);
b3c34245 523 return true;
9b94fe91
AK
524}
525
19568327
AK
526void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
527{
29d7355a
AK
528 struct slab *slab;
529 gfp_t flags = 0; /* Might be executing under a lock. */
530
29d7355a
AK
531 slab = virt_to_slab(ptr);
532
533 /*
534 * This function can be called for large kmalloc allocation that get
535 * their memory from page_alloc.
536 */
537 if (unlikely(!slab)) {
538 kasan_unpoison(ptr, size, false);
539 poison_kmalloc_large_redzone(ptr, size, flags);
540 return;
541 }
542
99f3fe41
AK
543 if (is_kfence_address(ptr))
544 return;
545
29d7355a 546 /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
1c61990d 547 unpoison_slab_object(slab->slab_cache, ptr, flags, false);
29d7355a
AK
548
549 /* Poison the redzone and save alloc info for kmalloc() allocations. */
550 if (is_kmalloc_cache(slab->slab_cache))
551 poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
bffa986c
AK
552}
553
611806b4
AK
554bool __kasan_check_byte(const void *address, unsigned long ip)
555{
556 if (!kasan_byte_accessible(address)) {
bb6e04a1 557 kasan_report(address, 1, false, ip);
611806b4
AK
558 return false;
559 }
560 return true;
561}
This page took 0.641426 seconds and 4 git commands to generate.