Linux 6.14-rc3
[linux.git] / mm / kfence / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/nodemask.h>
25 #include <linux/notifier.h>
26 #include <linux/panic_notifier.h>
27 #include <linux/random.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/clock.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34
35 #include <asm/kfence.h>
36
37 #include "kfence.h"
38
39 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
40 #define KFENCE_WARN_ON(cond)                                                   \
41         ({                                                                     \
42                 const bool __cond = WARN_ON(cond);                             \
43                 if (unlikely(__cond)) {                                        \
44                         WRITE_ONCE(kfence_enabled, false);                     \
45                         disabled_by_warn = true;                               \
46                 }                                                              \
47                 __cond;                                                        \
48         })
49
50 /* === Data ================================================================= */
51
52 static bool kfence_enabled __read_mostly;
53 static bool disabled_by_warn __read_mostly;
54
55 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
56 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
57
58 #ifdef MODULE_PARAM_PREFIX
59 #undef MODULE_PARAM_PREFIX
60 #endif
61 #define MODULE_PARAM_PREFIX "kfence."
62
63 static int kfence_enable_late(void);
64 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
65 {
66         unsigned long num;
67         int ret = kstrtoul(val, 0, &num);
68
69         if (ret < 0)
70                 return ret;
71
72         /* Using 0 to indicate KFENCE is disabled. */
73         if (!num && READ_ONCE(kfence_enabled)) {
74                 pr_info("disabled\n");
75                 WRITE_ONCE(kfence_enabled, false);
76         }
77
78         *((unsigned long *)kp->arg) = num;
79
80         if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
81                 return disabled_by_warn ? -EINVAL : kfence_enable_late();
82         return 0;
83 }
84
85 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
86 {
87         if (!READ_ONCE(kfence_enabled))
88                 return sprintf(buffer, "0\n");
89
90         return param_get_ulong(buffer, kp);
91 }
92
93 static const struct kernel_param_ops sample_interval_param_ops = {
94         .set = param_set_sample_interval,
95         .get = param_get_sample_interval,
96 };
97 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
98
99 /* Pool usage% threshold when currently covered allocations are skipped. */
100 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
101 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
102
103 /* Allocation burst count: number of excess KFENCE allocations per sample. */
104 static unsigned int kfence_burst __read_mostly;
105 module_param_named(burst, kfence_burst, uint, 0644);
106
107 /* If true, use a deferrable timer. */
108 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
109 module_param_named(deferrable, kfence_deferrable, bool, 0444);
110
111 /* If true, check all canary bytes on panic. */
112 static bool kfence_check_on_panic __read_mostly;
113 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
114
115 /* The pool of pages used for guard pages and objects. */
116 char *__kfence_pool __read_mostly;
117 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
118
119 /*
120  * Per-object metadata, with one-to-one mapping of object metadata to
121  * backing pages (in __kfence_pool).
122  */
123 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
124 struct kfence_metadata *kfence_metadata __read_mostly;
125
126 /*
127  * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
128  * So introduce kfence_metadata_init to initialize metadata, and then make
129  * kfence_metadata visible after initialization is successful. This prevents
130  * potential UAF or access to uninitialized metadata.
131  */
132 static struct kfence_metadata *kfence_metadata_init __read_mostly;
133
134 /* Freelist with available objects. */
135 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
136 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
137
138 /*
139  * The static key to set up a KFENCE allocation; or if static keys are not used
140  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
141  */
142 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
143
144 /* Gates the allocation, ensuring only one succeeds in a given period. */
145 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
146
147 /*
148  * A Counting Bloom filter of allocation coverage: limits currently covered
149  * allocations of the same source filling up the pool.
150  *
151  * Assuming a range of 15%-85% unique allocations in the pool at any point in
152  * time, the below parameters provide a probablity of 0.02-0.33 for false
153  * positive hits respectively:
154  *
155  *      P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
156  */
157 #define ALLOC_COVERED_HNUM      2
158 #define ALLOC_COVERED_ORDER     (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
159 #define ALLOC_COVERED_SIZE      (1 << ALLOC_COVERED_ORDER)
160 #define ALLOC_COVERED_HNEXT(h)  hash_32(h, ALLOC_COVERED_ORDER)
161 #define ALLOC_COVERED_MASK      (ALLOC_COVERED_SIZE - 1)
162 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
163
164 /* Stack depth used to determine uniqueness of an allocation. */
165 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
166
167 /*
168  * Randomness for stack hashes, making the same collisions across reboots and
169  * different machines less likely.
170  */
171 static u32 stack_hash_seed __ro_after_init;
172
173 /* Statistics counters for debugfs. */
174 enum kfence_counter_id {
175         KFENCE_COUNTER_ALLOCATED,
176         KFENCE_COUNTER_ALLOCS,
177         KFENCE_COUNTER_FREES,
178         KFENCE_COUNTER_ZOMBIES,
179         KFENCE_COUNTER_BUGS,
180         KFENCE_COUNTER_SKIP_INCOMPAT,
181         KFENCE_COUNTER_SKIP_CAPACITY,
182         KFENCE_COUNTER_SKIP_COVERED,
183         KFENCE_COUNTER_COUNT,
184 };
185 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
186 static const char *const counter_names[] = {
187         [KFENCE_COUNTER_ALLOCATED]      = "currently allocated",
188         [KFENCE_COUNTER_ALLOCS]         = "total allocations",
189         [KFENCE_COUNTER_FREES]          = "total frees",
190         [KFENCE_COUNTER_ZOMBIES]        = "zombie allocations",
191         [KFENCE_COUNTER_BUGS]           = "total bugs",
192         [KFENCE_COUNTER_SKIP_INCOMPAT]  = "skipped allocations (incompatible)",
193         [KFENCE_COUNTER_SKIP_CAPACITY]  = "skipped allocations (capacity)",
194         [KFENCE_COUNTER_SKIP_COVERED]   = "skipped allocations (covered)",
195 };
196 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
197
198 /* === Internals ============================================================ */
199
200 static inline bool should_skip_covered(void)
201 {
202         unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
203
204         return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
205 }
206
207 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
208 {
209         num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
210         num_entries = filter_irq_stacks(stack_entries, num_entries);
211         return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
212 }
213
214 /*
215  * Adds (or subtracts) count @val for allocation stack trace hash
216  * @alloc_stack_hash from Counting Bloom filter.
217  */
218 static void alloc_covered_add(u32 alloc_stack_hash, int val)
219 {
220         int i;
221
222         for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
223                 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
224                 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
225         }
226 }
227
228 /*
229  * Returns true if the allocation stack trace hash @alloc_stack_hash is
230  * currently contained (non-zero count) in Counting Bloom filter.
231  */
232 static bool alloc_covered_contains(u32 alloc_stack_hash)
233 {
234         int i;
235
236         for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
237                 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
238                         return false;
239                 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
240         }
241
242         return true;
243 }
244
245 static bool kfence_protect(unsigned long addr)
246 {
247         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
248 }
249
250 static bool kfence_unprotect(unsigned long addr)
251 {
252         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
253 }
254
255 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
256 {
257         unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
258         unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
259
260         /* The checks do not affect performance; only called from slow-paths. */
261
262         /* Only call with a pointer into kfence_metadata. */
263         if (KFENCE_WARN_ON(meta < kfence_metadata ||
264                            meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
265                 return 0;
266
267         /*
268          * This metadata object only ever maps to 1 page; verify that the stored
269          * address is in the expected range.
270          */
271         if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
272                 return 0;
273
274         return pageaddr;
275 }
276
277 static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
278 {
279         enum kfence_object_state state = READ_ONCE(meta->state);
280
281         return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING;
282 }
283
284 /*
285  * Update the object's metadata state, including updating the alloc/free stacks
286  * depending on the state transition.
287  */
288 static noinline void
289 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
290                       unsigned long *stack_entries, size_t num_stack_entries)
291 {
292         struct kfence_track *track =
293                 next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track;
294
295         lockdep_assert_held(&meta->lock);
296
297         /* Stack has been saved when calling rcu, skip. */
298         if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING)
299                 goto out;
300
301         if (stack_entries) {
302                 memcpy(track->stack_entries, stack_entries,
303                        num_stack_entries * sizeof(stack_entries[0]));
304         } else {
305                 /*
306                  * Skip over 1 (this) functions; noinline ensures we do not
307                  * accidentally skip over the caller by never inlining.
308                  */
309                 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
310         }
311         track->num_stack_entries = num_stack_entries;
312         track->pid = task_pid_nr(current);
313         track->cpu = raw_smp_processor_id();
314         track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
315
316 out:
317         /*
318          * Pairs with READ_ONCE() in
319          *      kfence_shutdown_cache(),
320          *      kfence_handle_page_fault().
321          */
322         WRITE_ONCE(meta->state, next);
323 }
324
325 #ifdef CONFIG_KMSAN
326 #define check_canary_attributes noinline __no_kmsan_checks
327 #else
328 #define check_canary_attributes inline
329 #endif
330
331 /* Check canary byte at @addr. */
332 static check_canary_attributes bool check_canary_byte(u8 *addr)
333 {
334         struct kfence_metadata *meta;
335         unsigned long flags;
336
337         if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
338                 return true;
339
340         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
341
342         meta = addr_to_metadata((unsigned long)addr);
343         raw_spin_lock_irqsave(&meta->lock, flags);
344         kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
345         raw_spin_unlock_irqrestore(&meta->lock, flags);
346
347         return false;
348 }
349
350 static inline void set_canary(const struct kfence_metadata *meta)
351 {
352         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
353         unsigned long addr = pageaddr;
354
355         /*
356          * The canary may be written to part of the object memory, but it does
357          * not affect it. The user should initialize the object before using it.
358          */
359         for (; addr < meta->addr; addr += sizeof(u64))
360                 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
361
362         addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
363         for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
364                 *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
365 }
366
367 static check_canary_attributes void
368 check_canary(const struct kfence_metadata *meta)
369 {
370         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
371         unsigned long addr = pageaddr;
372
373         /*
374          * We'll iterate over each canary byte per-side until a corrupted byte
375          * is found. However, we'll still iterate over the canary bytes to the
376          * right of the object even if there was an error in the canary bytes to
377          * the left of the object. Specifically, if check_canary_byte()
378          * generates an error, showing both sides might give more clues as to
379          * what the error is about when displaying which bytes were corrupted.
380          */
381
382         /* Apply to left of object. */
383         for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
384                 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
385                         break;
386         }
387
388         /*
389          * If the canary is corrupted in a certain 64 bytes, or the canary
390          * memory cannot be completely covered by multiple consecutive 64 bytes,
391          * it needs to be checked one by one.
392          */
393         for (; addr < meta->addr; addr++) {
394                 if (unlikely(!check_canary_byte((u8 *)addr)))
395                         break;
396         }
397
398         /* Apply to right of object. */
399         for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
400                 if (unlikely(!check_canary_byte((u8 *)addr)))
401                         return;
402         }
403         for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
404                 if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
405
406                         for (; addr - pageaddr < PAGE_SIZE; addr++) {
407                                 if (!check_canary_byte((u8 *)addr))
408                                         return;
409                         }
410                 }
411         }
412 }
413
414 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
415                                   unsigned long *stack_entries, size_t num_stack_entries,
416                                   u32 alloc_stack_hash)
417 {
418         struct kfence_metadata *meta = NULL;
419         unsigned long flags;
420         struct slab *slab;
421         void *addr;
422         const bool random_right_allocate = get_random_u32_below(2);
423         const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
424                                   !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
425
426         /* Try to obtain a free object. */
427         raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
428         if (!list_empty(&kfence_freelist)) {
429                 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
430                 list_del_init(&meta->list);
431         }
432         raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
433         if (!meta) {
434                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
435                 return NULL;
436         }
437
438         if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
439                 /*
440                  * This is extremely unlikely -- we are reporting on a
441                  * use-after-free, which locked meta->lock, and the reporting
442                  * code via printk calls kmalloc() which ends up in
443                  * kfence_alloc() and tries to grab the same object that we're
444                  * reporting on. While it has never been observed, lockdep does
445                  * report that there is a possibility of deadlock. Fix it by
446                  * using trylock and bailing out gracefully.
447                  */
448                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
449                 /* Put the object back on the freelist. */
450                 list_add_tail(&meta->list, &kfence_freelist);
451                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
452
453                 return NULL;
454         }
455
456         meta->addr = metadata_to_pageaddr(meta);
457         /* Unprotect if we're reusing this page. */
458         if (meta->state == KFENCE_OBJECT_FREED)
459                 kfence_unprotect(meta->addr);
460
461         /*
462          * Note: for allocations made before RNG initialization, will always
463          * return zero. We still benefit from enabling KFENCE as early as
464          * possible, even when the RNG is not yet available, as this will allow
465          * KFENCE to detect bugs due to earlier allocations. The only downside
466          * is that the out-of-bounds accesses detected are deterministic for
467          * such allocations.
468          */
469         if (random_right_allocate) {
470                 /* Allocate on the "right" side, re-calculate address. */
471                 meta->addr += PAGE_SIZE - size;
472                 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
473         }
474
475         addr = (void *)meta->addr;
476
477         /* Update remaining metadata. */
478         metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
479         /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
480         WRITE_ONCE(meta->cache, cache);
481         meta->size = size;
482         meta->alloc_stack_hash = alloc_stack_hash;
483         raw_spin_unlock_irqrestore(&meta->lock, flags);
484
485         alloc_covered_add(alloc_stack_hash, 1);
486
487         /* Set required slab fields. */
488         slab = virt_to_slab((void *)meta->addr);
489         slab->slab_cache = cache;
490         slab->objects = 1;
491
492         /* Memory initialization. */
493         set_canary(meta);
494
495         /*
496          * We check slab_want_init_on_alloc() ourselves, rather than letting
497          * SL*B do the initialization, as otherwise we might overwrite KFENCE's
498          * redzone.
499          */
500         if (unlikely(slab_want_init_on_alloc(gfp, cache)))
501                 memzero_explicit(addr, size);
502         if (cache->ctor)
503                 cache->ctor(addr);
504
505         if (random_fault)
506                 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
507
508         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
509         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
510
511         return addr;
512 }
513
514 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
515 {
516         struct kcsan_scoped_access assert_page_exclusive;
517         unsigned long flags;
518         bool init;
519
520         raw_spin_lock_irqsave(&meta->lock, flags);
521
522         if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) {
523                 /* Invalid or double-free, bail out. */
524                 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
525                 kfence_report_error((unsigned long)addr, false, NULL, meta,
526                                     KFENCE_ERROR_INVALID_FREE);
527                 raw_spin_unlock_irqrestore(&meta->lock, flags);
528                 return;
529         }
530
531         /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
532         kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
533                                   KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
534                                   &assert_page_exclusive);
535
536         if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
537                 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
538
539         /* Restore page protection if there was an OOB access. */
540         if (meta->unprotected_page) {
541                 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
542                 kfence_protect(meta->unprotected_page);
543                 meta->unprotected_page = 0;
544         }
545
546         /* Mark the object as freed. */
547         metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
548         init = slab_want_init_on_free(meta->cache);
549         raw_spin_unlock_irqrestore(&meta->lock, flags);
550
551         alloc_covered_add(meta->alloc_stack_hash, -1);
552
553         /* Check canary bytes for memory corruption. */
554         check_canary(meta);
555
556         /*
557          * Clear memory if init-on-free is set. While we protect the page, the
558          * data is still there, and after a use-after-free is detected, we
559          * unprotect the page, so the data is still accessible.
560          */
561         if (!zombie && unlikely(init))
562                 memzero_explicit(addr, meta->size);
563
564         /* Protect to detect use-after-frees. */
565         kfence_protect((unsigned long)addr);
566
567         kcsan_end_scoped_access(&assert_page_exclusive);
568         if (!zombie) {
569                 /* Add it to the tail of the freelist for reuse. */
570                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
571                 KFENCE_WARN_ON(!list_empty(&meta->list));
572                 list_add_tail(&meta->list, &kfence_freelist);
573                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
574
575                 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
576                 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
577         } else {
578                 /* See kfence_shutdown_cache(). */
579                 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
580         }
581 }
582
583 static void rcu_guarded_free(struct rcu_head *h)
584 {
585         struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
586
587         kfence_guarded_free((void *)meta->addr, meta, false);
588 }
589
590 /*
591  * Initialization of the KFENCE pool after its allocation.
592  * Returns 0 on success; otherwise returns the address up to
593  * which partial initialization succeeded.
594  */
595 static unsigned long kfence_init_pool(void)
596 {
597         unsigned long addr;
598         struct page *pages;
599         int i;
600
601         if (!arch_kfence_init_pool())
602                 return (unsigned long)__kfence_pool;
603
604         addr = (unsigned long)__kfence_pool;
605         pages = virt_to_page(__kfence_pool);
606
607         /*
608          * Set up object pages: they must have PG_slab set, to avoid freeing
609          * these as real pages.
610          *
611          * We also want to avoid inserting kfence_free() in the kfree()
612          * fast-path in SLUB, and therefore need to ensure kfree() correctly
613          * enters __slab_free() slow-path.
614          */
615         for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
616                 struct slab *slab = page_slab(nth_page(pages, i));
617
618                 if (!i || (i % 2))
619                         continue;
620
621                 __folio_set_slab(slab_folio(slab));
622 #ifdef CONFIG_MEMCG
623                 slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
624                                  MEMCG_DATA_OBJEXTS;
625 #endif
626         }
627
628         /*
629          * Protect the first 2 pages. The first page is mostly unnecessary, and
630          * merely serves as an extended guard page. However, adding one
631          * additional page in the beginning gives us an even number of pages,
632          * which simplifies the mapping of address to metadata index.
633          */
634         for (i = 0; i < 2; i++) {
635                 if (unlikely(!kfence_protect(addr)))
636                         return addr;
637
638                 addr += PAGE_SIZE;
639         }
640
641         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
642                 struct kfence_metadata *meta = &kfence_metadata_init[i];
643
644                 /* Initialize metadata. */
645                 INIT_LIST_HEAD(&meta->list);
646                 raw_spin_lock_init(&meta->lock);
647                 meta->state = KFENCE_OBJECT_UNUSED;
648                 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
649                 list_add_tail(&meta->list, &kfence_freelist);
650
651                 /* Protect the right redzone. */
652                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
653                         goto reset_slab;
654
655                 addr += 2 * PAGE_SIZE;
656         }
657
658         /*
659          * Make kfence_metadata visible only when initialization is successful.
660          * Otherwise, if the initialization fails and kfence_metadata is freed,
661          * it may cause UAF in kfence_shutdown_cache().
662          */
663         smp_store_release(&kfence_metadata, kfence_metadata_init);
664         return 0;
665
666 reset_slab:
667         for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
668                 struct slab *slab = page_slab(nth_page(pages, i));
669
670                 if (!i || (i % 2))
671                         continue;
672 #ifdef CONFIG_MEMCG
673                 slab->obj_exts = 0;
674 #endif
675                 __folio_clear_slab(slab_folio(slab));
676         }
677
678         return addr;
679 }
680
681 static bool __init kfence_init_pool_early(void)
682 {
683         unsigned long addr;
684
685         if (!__kfence_pool)
686                 return false;
687
688         addr = kfence_init_pool();
689
690         if (!addr) {
691                 /*
692                  * The pool is live and will never be deallocated from this point on.
693                  * Ignore the pool object from the kmemleak phys object tree, as it would
694                  * otherwise overlap with allocations returned by kfence_alloc(), which
695                  * are registered with kmemleak through the slab post-alloc hook.
696                  */
697                 kmemleak_ignore_phys(__pa(__kfence_pool));
698                 return true;
699         }
700
701         /*
702          * Only release unprotected pages, and do not try to go back and change
703          * page attributes due to risk of failing to do so as well. If changing
704          * page attributes for some pages fails, it is very likely that it also
705          * fails for the first page, and therefore expect addr==__kfence_pool in
706          * most failure cases.
707          */
708         memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
709         __kfence_pool = NULL;
710
711         memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
712         kfence_metadata_init = NULL;
713
714         return false;
715 }
716
717 /* === DebugFS Interface ==================================================== */
718
719 static int stats_show(struct seq_file *seq, void *v)
720 {
721         int i;
722
723         seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
724         for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
725                 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
726
727         return 0;
728 }
729 DEFINE_SHOW_ATTRIBUTE(stats);
730
731 /*
732  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
733  * start_object() and next_object() return the object index + 1, because NULL is used
734  * to stop iteration.
735  */
736 static void *start_object(struct seq_file *seq, loff_t *pos)
737 {
738         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
739                 return (void *)((long)*pos + 1);
740         return NULL;
741 }
742
743 static void stop_object(struct seq_file *seq, void *v)
744 {
745 }
746
747 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
748 {
749         ++*pos;
750         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
751                 return (void *)((long)*pos + 1);
752         return NULL;
753 }
754
755 static int show_object(struct seq_file *seq, void *v)
756 {
757         struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
758         unsigned long flags;
759
760         raw_spin_lock_irqsave(&meta->lock, flags);
761         kfence_print_object(seq, meta);
762         raw_spin_unlock_irqrestore(&meta->lock, flags);
763         seq_puts(seq, "---------------------------------\n");
764
765         return 0;
766 }
767
768 static const struct seq_operations objects_sops = {
769         .start = start_object,
770         .next = next_object,
771         .stop = stop_object,
772         .show = show_object,
773 };
774 DEFINE_SEQ_ATTRIBUTE(objects);
775
776 static int kfence_debugfs_init(void)
777 {
778         struct dentry *kfence_dir;
779
780         if (!READ_ONCE(kfence_enabled))
781                 return 0;
782
783         kfence_dir = debugfs_create_dir("kfence", NULL);
784         debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
785         debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
786         return 0;
787 }
788
789 late_initcall(kfence_debugfs_init);
790
791 /* === Panic Notifier ====================================================== */
792
793 static void kfence_check_all_canary(void)
794 {
795         int i;
796
797         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
798                 struct kfence_metadata *meta = &kfence_metadata[i];
799
800                 if (kfence_obj_allocated(meta))
801                         check_canary(meta);
802         }
803 }
804
805 static int kfence_check_canary_callback(struct notifier_block *nb,
806                                         unsigned long reason, void *arg)
807 {
808         kfence_check_all_canary();
809         return NOTIFY_OK;
810 }
811
812 static struct notifier_block kfence_check_canary_notifier = {
813         .notifier_call = kfence_check_canary_callback,
814 };
815
816 /* === Allocation Gate Timer ================================================ */
817
818 static struct delayed_work kfence_timer;
819
820 #ifdef CONFIG_KFENCE_STATIC_KEYS
821 /* Wait queue to wake up allocation-gate timer task. */
822 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
823
824 static void wake_up_kfence_timer(struct irq_work *work)
825 {
826         wake_up(&allocation_wait);
827 }
828 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
829 #endif
830
831 /*
832  * Set up delayed work, which will enable and disable the static key. We need to
833  * use a work queue (rather than a simple timer), since enabling and disabling a
834  * static key cannot be done from an interrupt.
835  *
836  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
837  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
838  * more aggressive sampling intervals), we could get away with a variant that
839  * avoids IPIs, at the cost of not immediately capturing allocations if the
840  * instructions remain cached.
841  */
842 static void toggle_allocation_gate(struct work_struct *work)
843 {
844         if (!READ_ONCE(kfence_enabled))
845                 return;
846
847         atomic_set(&kfence_allocation_gate, -kfence_burst);
848 #ifdef CONFIG_KFENCE_STATIC_KEYS
849         /* Enable static key, and await allocation to happen. */
850         static_branch_enable(&kfence_allocation_key);
851
852         wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate) > 0);
853
854         /* Disable static key and reset timer. */
855         static_branch_disable(&kfence_allocation_key);
856 #endif
857         queue_delayed_work(system_unbound_wq, &kfence_timer,
858                            msecs_to_jiffies(kfence_sample_interval));
859 }
860
861 /* === Public interface ===================================================== */
862
863 void __init kfence_alloc_pool_and_metadata(void)
864 {
865         if (!kfence_sample_interval)
866                 return;
867
868         /*
869          * If the pool has already been initialized by arch, there is no need to
870          * re-allocate the memory pool.
871          */
872         if (!__kfence_pool)
873                 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
874
875         if (!__kfence_pool) {
876                 pr_err("failed to allocate pool\n");
877                 return;
878         }
879
880         /* The memory allocated by memblock has been zeroed out. */
881         kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
882         if (!kfence_metadata_init) {
883                 pr_err("failed to allocate metadata\n");
884                 memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
885                 __kfence_pool = NULL;
886         }
887 }
888
889 static void kfence_init_enable(void)
890 {
891         if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
892                 static_branch_enable(&kfence_allocation_key);
893
894         if (kfence_deferrable)
895                 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
896         else
897                 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
898
899         if (kfence_check_on_panic)
900                 atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
901
902         WRITE_ONCE(kfence_enabled, true);
903         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
904
905         pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
906                 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
907                 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
908 }
909
910 void __init kfence_init(void)
911 {
912         stack_hash_seed = get_random_u32();
913
914         /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
915         if (!kfence_sample_interval)
916                 return;
917
918         if (!kfence_init_pool_early()) {
919                 pr_err("%s failed\n", __func__);
920                 return;
921         }
922
923         kfence_init_enable();
924 }
925
926 static int kfence_init_late(void)
927 {
928         const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
929         const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
930         unsigned long addr = (unsigned long)__kfence_pool;
931         unsigned long free_size = KFENCE_POOL_SIZE;
932         int err = -ENOMEM;
933
934 #ifdef CONFIG_CONTIG_ALLOC
935         struct page *pages;
936
937         pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
938                                    NULL);
939         if (!pages)
940                 return -ENOMEM;
941
942         __kfence_pool = page_to_virt(pages);
943         pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
944                                    NULL);
945         if (pages)
946                 kfence_metadata_init = page_to_virt(pages);
947 #else
948         if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
949             nr_pages_meta > MAX_ORDER_NR_PAGES) {
950                 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
951                 return -EINVAL;
952         }
953
954         __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
955         if (!__kfence_pool)
956                 return -ENOMEM;
957
958         kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
959 #endif
960
961         if (!kfence_metadata_init)
962                 goto free_pool;
963
964         memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
965         addr = kfence_init_pool();
966         if (!addr) {
967                 kfence_init_enable();
968                 kfence_debugfs_init();
969                 return 0;
970         }
971
972         pr_err("%s failed\n", __func__);
973         free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
974         err = -EBUSY;
975
976 #ifdef CONFIG_CONTIG_ALLOC
977         free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
978                           nr_pages_meta);
979 free_pool:
980         free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
981                           free_size / PAGE_SIZE);
982 #else
983         free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
984 free_pool:
985         free_pages_exact((void *)addr, free_size);
986 #endif
987
988         kfence_metadata_init = NULL;
989         __kfence_pool = NULL;
990         return err;
991 }
992
993 static int kfence_enable_late(void)
994 {
995         if (!__kfence_pool)
996                 return kfence_init_late();
997
998         WRITE_ONCE(kfence_enabled, true);
999         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
1000         pr_info("re-enabled\n");
1001         return 0;
1002 }
1003
1004 void kfence_shutdown_cache(struct kmem_cache *s)
1005 {
1006         unsigned long flags;
1007         struct kfence_metadata *meta;
1008         int i;
1009
1010         /* Pairs with release in kfence_init_pool(). */
1011         if (!smp_load_acquire(&kfence_metadata))
1012                 return;
1013
1014         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1015                 bool in_use;
1016
1017                 meta = &kfence_metadata[i];
1018
1019                 /*
1020                  * If we observe some inconsistent cache and state pair where we
1021                  * should have returned false here, cache destruction is racing
1022                  * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1023                  * the lock will not help, as different critical section
1024                  * serialization will have the same outcome.
1025                  */
1026                 if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta))
1027                         continue;
1028
1029                 raw_spin_lock_irqsave(&meta->lock, flags);
1030                 in_use = meta->cache == s && kfence_obj_allocated(meta);
1031                 raw_spin_unlock_irqrestore(&meta->lock, flags);
1032
1033                 if (in_use) {
1034                         /*
1035                          * This cache still has allocations, and we should not
1036                          * release them back into the freelist so they can still
1037                          * safely be used and retain the kernel's default
1038                          * behaviour of keeping the allocations alive (leak the
1039                          * cache); however, they effectively become "zombie
1040                          * allocations" as the KFENCE objects are the only ones
1041                          * still in use and the owning cache is being destroyed.
1042                          *
1043                          * We mark them freed, so that any subsequent use shows
1044                          * more useful error messages that will include stack
1045                          * traces of the user of the object, the original
1046                          * allocation, and caller to shutdown_cache().
1047                          */
1048                         kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1049                 }
1050         }
1051
1052         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1053                 meta = &kfence_metadata[i];
1054
1055                 /* See above. */
1056                 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1057                         continue;
1058
1059                 raw_spin_lock_irqsave(&meta->lock, flags);
1060                 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1061                         meta->cache = NULL;
1062                 raw_spin_unlock_irqrestore(&meta->lock, flags);
1063         }
1064 }
1065
1066 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1067 {
1068         unsigned long stack_entries[KFENCE_STACK_DEPTH];
1069         size_t num_stack_entries;
1070         u32 alloc_stack_hash;
1071         int allocation_gate;
1072
1073         /*
1074          * Perform size check before switching kfence_allocation_gate, so that
1075          * we don't disable KFENCE without making an allocation.
1076          */
1077         if (size > PAGE_SIZE) {
1078                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1079                 return NULL;
1080         }
1081
1082         /*
1083          * Skip allocations from non-default zones, including DMA. We cannot
1084          * guarantee that pages in the KFENCE pool will have the requested
1085          * properties (e.g. reside in DMAable memory).
1086          */
1087         if ((flags & GFP_ZONEMASK) ||
1088             ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
1089             (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1090                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1091                 return NULL;
1092         }
1093
1094         /*
1095          * Skip allocations for this slab, if KFENCE has been disabled for
1096          * this slab.
1097          */
1098         if (s->flags & SLAB_SKIP_KFENCE)
1099                 return NULL;
1100
1101         allocation_gate = atomic_inc_return(&kfence_allocation_gate);
1102         if (allocation_gate > 1)
1103                 return NULL;
1104 #ifdef CONFIG_KFENCE_STATIC_KEYS
1105         /*
1106          * waitqueue_active() is fully ordered after the update of
1107          * kfence_allocation_gate per atomic_inc_return().
1108          */
1109         if (allocation_gate == 1 && waitqueue_active(&allocation_wait)) {
1110                 /*
1111                  * Calling wake_up() here may deadlock when allocations happen
1112                  * from within timer code. Use an irq_work to defer it.
1113                  */
1114                 irq_work_queue(&wake_up_kfence_timer_work);
1115         }
1116 #endif
1117
1118         if (!READ_ONCE(kfence_enabled))
1119                 return NULL;
1120
1121         num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1122
1123         /*
1124          * Do expensive check for coverage of allocation in slow-path after
1125          * allocation_gate has already become non-zero, even though it might
1126          * mean not making any allocation within a given sample interval.
1127          *
1128          * This ensures reasonable allocation coverage when the pool is almost
1129          * full, including avoiding long-lived allocations of the same source
1130          * filling up the pool (e.g. pagecache allocations).
1131          */
1132         alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1133         if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1134                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1135                 return NULL;
1136         }
1137
1138         return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1139                                     alloc_stack_hash);
1140 }
1141
1142 size_t kfence_ksize(const void *addr)
1143 {
1144         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1145
1146         /*
1147          * Read locklessly -- if there is a race with __kfence_alloc(), this is
1148          * either a use-after-free or invalid access.
1149          */
1150         return meta ? meta->size : 0;
1151 }
1152
1153 void *kfence_object_start(const void *addr)
1154 {
1155         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1156
1157         /*
1158          * Read locklessly -- if there is a race with __kfence_alloc(), this is
1159          * either a use-after-free or invalid access.
1160          */
1161         return meta ? (void *)meta->addr : NULL;
1162 }
1163
1164 void __kfence_free(void *addr)
1165 {
1166         struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1167
1168 #ifdef CONFIG_MEMCG
1169         KFENCE_WARN_ON(meta->obj_exts.objcg);
1170 #endif
1171         /*
1172          * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1173          * the object, as the object page may be recycled for other-typed
1174          * objects once it has been freed. meta->cache may be NULL if the cache
1175          * was destroyed.
1176          * Save the stack trace here so that reports show where the user freed
1177          * the object.
1178          */
1179         if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) {
1180                 unsigned long flags;
1181
1182                 raw_spin_lock_irqsave(&meta->lock, flags);
1183                 metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0);
1184                 raw_spin_unlock_irqrestore(&meta->lock, flags);
1185                 call_rcu(&meta->rcu_head, rcu_guarded_free);
1186         } else {
1187                 kfence_guarded_free(addr, meta, false);
1188         }
1189 }
1190
1191 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1192 {
1193         const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1194         struct kfence_metadata *to_report = NULL;
1195         enum kfence_error_type error_type;
1196         unsigned long flags;
1197
1198         if (!is_kfence_address((void *)addr))
1199                 return false;
1200
1201         if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1202                 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1203
1204         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1205
1206         if (page_index % 2) {
1207                 /* This is a redzone, report a buffer overflow. */
1208                 struct kfence_metadata *meta;
1209                 int distance = 0;
1210
1211                 meta = addr_to_metadata(addr - PAGE_SIZE);
1212                 if (meta && kfence_obj_allocated(meta)) {
1213                         to_report = meta;
1214                         /* Data race ok; distance calculation approximate. */
1215                         distance = addr - data_race(meta->addr + meta->size);
1216                 }
1217
1218                 meta = addr_to_metadata(addr + PAGE_SIZE);
1219                 if (meta && kfence_obj_allocated(meta)) {
1220                         /* Data race ok; distance calculation approximate. */
1221                         if (!to_report || distance > data_race(meta->addr) - addr)
1222                                 to_report = meta;
1223                 }
1224
1225                 if (!to_report)
1226                         goto out;
1227
1228                 raw_spin_lock_irqsave(&to_report->lock, flags);
1229                 to_report->unprotected_page = addr;
1230                 error_type = KFENCE_ERROR_OOB;
1231
1232                 /*
1233                  * If the object was freed before we took the look we can still
1234                  * report this as an OOB -- the report will simply show the
1235                  * stacktrace of the free as well.
1236                  */
1237         } else {
1238                 to_report = addr_to_metadata(addr);
1239                 if (!to_report)
1240                         goto out;
1241
1242                 raw_spin_lock_irqsave(&to_report->lock, flags);
1243                 error_type = KFENCE_ERROR_UAF;
1244                 /*
1245                  * We may race with __kfence_alloc(), and it is possible that a
1246                  * freed object may be reallocated. We simply report this as a
1247                  * use-after-free, with the stack trace showing the place where
1248                  * the object was re-allocated.
1249                  */
1250         }
1251
1252 out:
1253         if (to_report) {
1254                 kfence_report_error(addr, is_write, regs, to_report, error_type);
1255                 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1256         } else {
1257                 /* This may be a UAF or OOB access, but we can't be sure. */
1258                 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1259         }
1260
1261         return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1262 }
This page took 0.103262 seconds and 4 git commands to generate.