]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
039363f3 CL |
2 | /* |
3 | * Slab allocator functions that are independent of the allocator strategy | |
4 | * | |
5 | * (C) 2012 Christoph Lameter <[email protected]> | |
6 | */ | |
7 | #include <linux/slab.h> | |
8 | ||
9 | #include <linux/mm.h> | |
10 | #include <linux/poison.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/memory.h> | |
1c99ba29 | 13 | #include <linux/cache.h> |
039363f3 | 14 | #include <linux/compiler.h> |
d3fb45f3 | 15 | #include <linux/kfence.h> |
039363f3 | 16 | #include <linux/module.h> |
20cea968 CL |
17 | #include <linux/cpu.h> |
18 | #include <linux/uaccess.h> | |
b7454ad3 | 19 | #include <linux/seq_file.h> |
963e84b0 | 20 | #include <linux/dma-mapping.h> |
b035f5a6 | 21 | #include <linux/swiotlb.h> |
b7454ad3 | 22 | #include <linux/proc_fs.h> |
fcf8a1e4 | 23 | #include <linux/debugfs.h> |
6011be59 | 24 | #include <linux/kmemleak.h> |
e86f8b09 | 25 | #include <linux/kasan.h> |
039363f3 CL |
26 | #include <asm/cacheflush.h> |
27 | #include <asm/tlbflush.h> | |
28 | #include <asm/page.h> | |
2633d7a0 | 29 | #include <linux/memcontrol.h> |
5cf909c5 | 30 | #include <linux/stackdepot.h> |
928cec9c | 31 | |
44405099 | 32 | #include "internal.h" |
97d06609 CL |
33 | #include "slab.h" |
34 | ||
b347aa7b VA |
35 | #define CREATE_TRACE_POINTS |
36 | #include <trace/events/kmem.h> | |
37 | ||
97d06609 | 38 | enum slab_state slab_state; |
18004c5d CL |
39 | LIST_HEAD(slab_caches); |
40 | DEFINE_MUTEX(slab_mutex); | |
9b030cb8 | 41 | struct kmem_cache *kmem_cache; |
97d06609 | 42 | |
657dc2f9 TH |
43 | static LIST_HEAD(slab_caches_to_rcu_destroy); |
44 | static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); | |
45 | static DECLARE_WORK(slab_caches_to_rcu_destroy_work, | |
46 | slab_caches_to_rcu_destroy_workfn); | |
47 | ||
423c929c JK |
48 | /* |
49 | * Set of flags that will prevent slab merging | |
50 | */ | |
51 | #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
5f0d5a3a | 52 | SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \ |
96d8dbb6 | 53 | SLAB_FAILSLAB | SLAB_NO_MERGE) |
423c929c | 54 | |
230e9fc2 | 55 | #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ |
6d6ea1e9 | 56 | SLAB_CACHE_DMA32 | SLAB_ACCOUNT) |
423c929c JK |
57 | |
58 | /* | |
59 | * Merge control. If this is set then no merging of slab caches will occur. | |
423c929c | 60 | */ |
7660a6fd | 61 | static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT); |
423c929c JK |
62 | |
63 | static int __init setup_slab_nomerge(char *str) | |
64 | { | |
7660a6fd | 65 | slab_nomerge = true; |
423c929c JK |
66 | return 1; |
67 | } | |
68 | ||
82edd9d5 RA |
69 | static int __init setup_slab_merge(char *str) |
70 | { | |
71 | slab_nomerge = false; | |
72 | return 1; | |
73 | } | |
74 | ||
423c929c | 75 | __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); |
82edd9d5 | 76 | __setup_param("slub_merge", slub_merge, setup_slab_merge, 0); |
423c929c JK |
77 | |
78 | __setup("slab_nomerge", setup_slab_nomerge); | |
82edd9d5 | 79 | __setup("slab_merge", setup_slab_merge); |
423c929c | 80 | |
07f361b2 JK |
81 | /* |
82 | * Determine the size of a slab object | |
83 | */ | |
84 | unsigned int kmem_cache_size(struct kmem_cache *s) | |
85 | { | |
86 | return s->object_size; | |
87 | } | |
88 | EXPORT_SYMBOL(kmem_cache_size); | |
89 | ||
77be4b13 | 90 | #ifdef CONFIG_DEBUG_VM |
f4957d5b | 91 | static int kmem_cache_sanity_check(const char *name, unsigned int size) |
039363f3 | 92 | { |
74c1d3e0 | 93 | if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) { |
77be4b13 SK |
94 | pr_err("kmem_cache_create(%s) integrity check failed\n", name); |
95 | return -EINVAL; | |
039363f3 | 96 | } |
b920536a | 97 | |
20cea968 | 98 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ |
77be4b13 SK |
99 | return 0; |
100 | } | |
101 | #else | |
f4957d5b | 102 | static inline int kmem_cache_sanity_check(const char *name, unsigned int size) |
77be4b13 SK |
103 | { |
104 | return 0; | |
105 | } | |
20cea968 CL |
106 | #endif |
107 | ||
692ae74a BL |
108 | /* |
109 | * Figure out what the alignment of the objects will be given a set of | |
110 | * flags, a user specified alignment and the size of the objects. | |
111 | */ | |
f4957d5b AD |
112 | static unsigned int calculate_alignment(slab_flags_t flags, |
113 | unsigned int align, unsigned int size) | |
692ae74a BL |
114 | { |
115 | /* | |
116 | * If the user wants hardware cache aligned objects then follow that | |
117 | * suggestion if the object is sufficiently large. | |
118 | * | |
119 | * The hardware cache alignment cannot override the specified | |
120 | * alignment though. If that is greater then use it. | |
121 | */ | |
122 | if (flags & SLAB_HWCACHE_ALIGN) { | |
f4957d5b | 123 | unsigned int ralign; |
692ae74a BL |
124 | |
125 | ralign = cache_line_size(); | |
126 | while (size <= ralign / 2) | |
127 | ralign /= 2; | |
128 | align = max(align, ralign); | |
129 | } | |
130 | ||
d949a815 | 131 | align = max(align, arch_slab_minalign()); |
692ae74a BL |
132 | |
133 | return ALIGN(align, sizeof(void *)); | |
134 | } | |
135 | ||
423c929c JK |
136 | /* |
137 | * Find a mergeable slab cache | |
138 | */ | |
139 | int slab_unmergeable(struct kmem_cache *s) | |
140 | { | |
141 | if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) | |
142 | return 1; | |
143 | ||
423c929c JK |
144 | if (s->ctor) |
145 | return 1; | |
146 | ||
346907ce | 147 | #ifdef CONFIG_HARDENED_USERCOPY |
8eb8284b DW |
148 | if (s->usersize) |
149 | return 1; | |
346907ce | 150 | #endif |
8eb8284b | 151 | |
423c929c JK |
152 | /* |
153 | * We may have set a slab to be unmergeable during bootstrap. | |
154 | */ | |
155 | if (s->refcount < 0) | |
156 | return 1; | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
f4957d5b | 161 | struct kmem_cache *find_mergeable(unsigned int size, unsigned int align, |
d50112ed | 162 | slab_flags_t flags, const char *name, void (*ctor)(void *)) |
423c929c JK |
163 | { |
164 | struct kmem_cache *s; | |
165 | ||
c6e28895 | 166 | if (slab_nomerge) |
423c929c JK |
167 | return NULL; |
168 | ||
169 | if (ctor) | |
170 | return NULL; | |
171 | ||
172 | size = ALIGN(size, sizeof(void *)); | |
173 | align = calculate_alignment(flags, align, size); | |
174 | size = ALIGN(size, align); | |
303cd693 | 175 | flags = kmem_cache_flags(flags, name); |
423c929c | 176 | |
c6e28895 GM |
177 | if (flags & SLAB_NEVER_MERGE) |
178 | return NULL; | |
179 | ||
c7094406 | 180 | list_for_each_entry_reverse(s, &slab_caches, list) { |
423c929c JK |
181 | if (slab_unmergeable(s)) |
182 | continue; | |
183 | ||
184 | if (size > s->size) | |
185 | continue; | |
186 | ||
187 | if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) | |
188 | continue; | |
189 | /* | |
190 | * Check if alignment is compatible. | |
191 | * Courtesy of Adrian Drzewiecki | |
192 | */ | |
193 | if ((s->size & ~(align - 1)) != s->size) | |
194 | continue; | |
195 | ||
196 | if (s->size - size >= sizeof(void *)) | |
197 | continue; | |
198 | ||
199 | return s; | |
200 | } | |
201 | return NULL; | |
202 | } | |
203 | ||
c9a77a79 | 204 | static struct kmem_cache *create_cache(const char *name, |
d345bd2e CB |
205 | unsigned int object_size, unsigned int freeptr_offset, |
206 | unsigned int align, slab_flags_t flags, | |
207 | unsigned int useroffset, unsigned int usersize, | |
208 | void (*ctor)(void *)) | |
794b1248 VD |
209 | { |
210 | struct kmem_cache *s; | |
211 | int err; | |
212 | ||
8eb8284b DW |
213 | if (WARN_ON(useroffset + usersize > object_size)) |
214 | useroffset = usersize = 0; | |
215 | ||
d345bd2e CB |
216 | /* If a custom freelist pointer is requested make sure it's sane. */ |
217 | err = -EINVAL; | |
218 | if (freeptr_offset != UINT_MAX && | |
219 | (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) || | |
220 | !IS_ALIGNED(freeptr_offset, sizeof(freeptr_t)))) | |
221 | goto out; | |
222 | ||
794b1248 VD |
223 | err = -ENOMEM; |
224 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | |
225 | if (!s) | |
226 | goto out; | |
227 | ||
228 | s->name = name; | |
613a5eb5 | 229 | s->size = s->object_size = object_size; |
d345bd2e | 230 | s->rcu_freeptr_offset = freeptr_offset; |
794b1248 VD |
231 | s->align = align; |
232 | s->ctor = ctor; | |
346907ce | 233 | #ifdef CONFIG_HARDENED_USERCOPY |
8eb8284b DW |
234 | s->useroffset = useroffset; |
235 | s->usersize = usersize; | |
346907ce | 236 | #endif |
794b1248 VD |
237 | err = __kmem_cache_create(s, flags); |
238 | if (err) | |
239 | goto out_free_cache; | |
240 | ||
241 | s->refcount = 1; | |
242 | list_add(&s->list, &slab_caches); | |
794b1248 VD |
243 | return s; |
244 | ||
245 | out_free_cache: | |
7c4da061 | 246 | kmem_cache_free(kmem_cache, s); |
b9dad156 ZL |
247 | out: |
248 | return ERR_PTR(err); | |
794b1248 | 249 | } |
45906855 | 250 | |
d345bd2e CB |
251 | static struct kmem_cache * |
252 | do_kmem_cache_create_usercopy(const char *name, | |
253 | unsigned int size, unsigned int freeptr_offset, | |
254 | unsigned int align, slab_flags_t flags, | |
7bbdb81e | 255 | unsigned int useroffset, unsigned int usersize, |
8eb8284b | 256 | void (*ctor)(void *)) |
77be4b13 | 257 | { |
40911a79 | 258 | struct kmem_cache *s = NULL; |
3dec16ea | 259 | const char *cache_name; |
3965fc36 | 260 | int err; |
039363f3 | 261 | |
afe0c26d VB |
262 | #ifdef CONFIG_SLUB_DEBUG |
263 | /* | |
671776b3 | 264 | * If no slab_debug was enabled globally, the static key is not yet |
afe0c26d VB |
265 | * enabled by setup_slub_debug(). Enable it if the cache is being |
266 | * created with any of the debugging flags passed explicitly. | |
5cf909c5 OG |
267 | * It's also possible that this is the first cache created with |
268 | * SLAB_STORE_USER and we should init stack_depot for it. | |
afe0c26d VB |
269 | */ |
270 | if (flags & SLAB_DEBUG_FLAGS) | |
271 | static_branch_enable(&slub_debug_enabled); | |
5cf909c5 OG |
272 | if (flags & SLAB_STORE_USER) |
273 | stack_depot_init(); | |
afe0c26d VB |
274 | #endif |
275 | ||
77be4b13 | 276 | mutex_lock(&slab_mutex); |
686d550d | 277 | |
794b1248 | 278 | err = kmem_cache_sanity_check(name, size); |
3aa24f51 | 279 | if (err) { |
3965fc36 | 280 | goto out_unlock; |
3aa24f51 | 281 | } |
686d550d | 282 | |
e70954fd TG |
283 | /* Refuse requests with allocator specific flags */ |
284 | if (flags & ~SLAB_FLAGS_PERMITTED) { | |
285 | err = -EINVAL; | |
286 | goto out_unlock; | |
287 | } | |
288 | ||
d8843922 GC |
289 | /* |
290 | * Some allocators will constraint the set of valid flags to a subset | |
291 | * of all flags. We expect them to define CACHE_CREATE_MASK in this | |
292 | * case, and we'll just provide them with a sanitized version of the | |
293 | * passed flags. | |
294 | */ | |
295 | flags &= CACHE_CREATE_MASK; | |
686d550d | 296 | |
8eb8284b | 297 | /* Fail closed on bad usersize of useroffset values. */ |
346907ce VB |
298 | if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) || |
299 | WARN_ON(!usersize && useroffset) || | |
8eb8284b DW |
300 | WARN_ON(size < usersize || size - usersize < useroffset)) |
301 | usersize = useroffset = 0; | |
302 | ||
303 | if (!usersize) | |
304 | s = __kmem_cache_alias(name, size, align, flags, ctor); | |
794b1248 | 305 | if (s) |
3965fc36 | 306 | goto out_unlock; |
2633d7a0 | 307 | |
3dec16ea | 308 | cache_name = kstrdup_const(name, GFP_KERNEL); |
794b1248 VD |
309 | if (!cache_name) { |
310 | err = -ENOMEM; | |
311 | goto out_unlock; | |
312 | } | |
7c9adf5a | 313 | |
d345bd2e | 314 | s = create_cache(cache_name, size, freeptr_offset, |
c9a77a79 | 315 | calculate_alignment(flags, align, size), |
e446f18e | 316 | flags, useroffset, usersize, ctor); |
794b1248 VD |
317 | if (IS_ERR(s)) { |
318 | err = PTR_ERR(s); | |
3dec16ea | 319 | kfree_const(cache_name); |
794b1248 | 320 | } |
3965fc36 VD |
321 | |
322 | out_unlock: | |
20cea968 | 323 | mutex_unlock(&slab_mutex); |
03afc0e2 | 324 | |
ba3253c7 | 325 | if (err) { |
686d550d | 326 | if (flags & SLAB_PANIC) |
4acaa7d5 | 327 | panic("%s: Failed to create slab '%s'. Error %d\n", |
328 | __func__, name, err); | |
686d550d | 329 | else { |
4acaa7d5 | 330 | pr_warn("%s(%s) failed with error %d\n", |
331 | __func__, name, err); | |
686d550d CL |
332 | dump_stack(); |
333 | } | |
686d550d CL |
334 | return NULL; |
335 | } | |
039363f3 CL |
336 | return s; |
337 | } | |
d345bd2e CB |
338 | |
339 | /** | |
340 | * kmem_cache_create_usercopy - Create a cache with a region suitable | |
341 | * for copying to userspace | |
342 | * @name: A string which is used in /proc/slabinfo to identify this cache. | |
343 | * @size: The size of objects to be created in this cache. | |
d345bd2e CB |
344 | * @align: The required alignment for the objects. |
345 | * @flags: SLAB flags | |
346 | * @useroffset: Usercopy region offset | |
347 | * @usersize: Usercopy region size | |
348 | * @ctor: A constructor for the objects. | |
349 | * | |
350 | * Cannot be called within a interrupt, but can be interrupted. | |
351 | * The @ctor is run when new pages are allocated by the cache. | |
352 | * | |
353 | * The flags are | |
354 | * | |
355 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | |
356 | * to catch references to uninitialised memory. | |
357 | * | |
358 | * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check | |
359 | * for buffer overruns. | |
360 | * | |
361 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | |
362 | * cacheline. This can be beneficial if you're counting cycles as closely | |
363 | * as davem. | |
364 | * | |
365 | * Return: a pointer to the cache on success, NULL on failure. | |
366 | */ | |
367 | struct kmem_cache * | |
368 | kmem_cache_create_usercopy(const char *name, unsigned int size, | |
369 | unsigned int align, slab_flags_t flags, | |
370 | unsigned int useroffset, unsigned int usersize, | |
371 | void (*ctor)(void *)) | |
372 | { | |
373 | return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags, | |
374 | useroffset, usersize, ctor); | |
375 | } | |
8eb8284b DW |
376 | EXPORT_SYMBOL(kmem_cache_create_usercopy); |
377 | ||
f496990f MR |
378 | /** |
379 | * kmem_cache_create - Create a cache. | |
380 | * @name: A string which is used in /proc/slabinfo to identify this cache. | |
381 | * @size: The size of objects to be created in this cache. | |
382 | * @align: The required alignment for the objects. | |
383 | * @flags: SLAB flags | |
384 | * @ctor: A constructor for the objects. | |
385 | * | |
386 | * Cannot be called within a interrupt, but can be interrupted. | |
387 | * The @ctor is run when new pages are allocated by the cache. | |
388 | * | |
389 | * The flags are | |
390 | * | |
391 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | |
392 | * to catch references to uninitialised memory. | |
393 | * | |
394 | * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check | |
395 | * for buffer overruns. | |
396 | * | |
397 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | |
398 | * cacheline. This can be beneficial if you're counting cycles as closely | |
399 | * as davem. | |
400 | * | |
401 | * Return: a pointer to the cache on success, NULL on failure. | |
402 | */ | |
8eb8284b | 403 | struct kmem_cache * |
f4957d5b | 404 | kmem_cache_create(const char *name, unsigned int size, unsigned int align, |
8eb8284b DW |
405 | slab_flags_t flags, void (*ctor)(void *)) |
406 | { | |
d345bd2e CB |
407 | return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags, |
408 | 0, 0, ctor); | |
8eb8284b | 409 | } |
794b1248 | 410 | EXPORT_SYMBOL(kmem_cache_create); |
2633d7a0 | 411 | |
d345bd2e CB |
412 | /** |
413 | * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache. | |
414 | * @name: A string which is used in /proc/slabinfo to identify this cache. | |
415 | * @size: The size of objects to be created in this cache. | |
416 | * @freeptr_offset: The offset into the memory to the free pointer | |
417 | * @flags: SLAB flags | |
418 | * | |
419 | * Cannot be called within an interrupt, but can be interrupted. | |
420 | * | |
421 | * See kmem_cache_create() for an explanation of possible @flags. | |
422 | * | |
423 | * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside | |
424 | * of the object. This might cause the object to grow in size. Callers | |
425 | * that have a reason to avoid this can specify a custom free pointer | |
426 | * offset in their struct where the free pointer will be placed. | |
427 | * | |
428 | * Note that placing the free pointer inside the object requires the | |
429 | * caller to ensure that no fields are invalidated that are required to | |
430 | * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for | |
431 | * details.). | |
432 | * | |
433 | * Using zero as a value for @freeptr_offset is valid. To request no | |
434 | * offset UINT_MAX must be specified. | |
435 | * | |
436 | * Note that @ctor isn't supported with custom free pointers as a @ctor | |
437 | * requires an external free pointer. | |
438 | * | |
439 | * Return: a pointer to the cache on success, NULL on failure. | |
440 | */ | |
441 | struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size, | |
442 | unsigned int freeptr_offset, | |
443 | slab_flags_t flags) | |
444 | { | |
445 | return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0, | |
446 | flags | SLAB_TYPESAFE_BY_RCU, 0, 0, | |
447 | NULL); | |
448 | } | |
449 | EXPORT_SYMBOL(kmem_cache_create_rcu); | |
450 | ||
b32801d1 KC |
451 | static struct kmem_cache *kmem_buckets_cache __ro_after_init; |
452 | ||
453 | /** | |
454 | * kmem_buckets_create - Create a set of caches that handle dynamic sized | |
455 | * allocations via kmem_buckets_alloc() | |
456 | * @name: A prefix string which is used in /proc/slabinfo to identify this | |
457 | * cache. The individual caches with have their sizes as the suffix. | |
458 | * @flags: SLAB flags (see kmem_cache_create() for details). | |
459 | * @useroffset: Starting offset within an allocation that may be copied | |
460 | * to/from userspace. | |
461 | * @usersize: How many bytes, starting at @useroffset, may be copied | |
462 | * to/from userspace. | |
463 | * @ctor: A constructor for the objects, run when new allocations are made. | |
464 | * | |
465 | * Cannot be called within an interrupt, but can be interrupted. | |
466 | * | |
467 | * Return: a pointer to the cache on success, NULL on failure. When | |
468 | * CONFIG_SLAB_BUCKETS is not enabled, ZERO_SIZE_PTR is returned, and | |
469 | * subsequent calls to kmem_buckets_alloc() will fall back to kmalloc(). | |
470 | * (i.e. callers only need to check for NULL on failure.) | |
471 | */ | |
472 | kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, | |
473 | unsigned int useroffset, | |
474 | unsigned int usersize, | |
475 | void (*ctor)(void *)) | |
476 | { | |
477 | kmem_buckets *b; | |
478 | int idx; | |
479 | ||
480 | /* | |
481 | * When the separate buckets API is not built in, just return | |
482 | * a non-NULL value for the kmem_buckets pointer, which will be | |
483 | * unused when performing allocations. | |
484 | */ | |
485 | if (!IS_ENABLED(CONFIG_SLAB_BUCKETS)) | |
486 | return ZERO_SIZE_PTR; | |
487 | ||
488 | if (WARN_ON(!kmem_buckets_cache)) | |
489 | return NULL; | |
490 | ||
491 | b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO); | |
492 | if (WARN_ON(!b)) | |
493 | return NULL; | |
494 | ||
495 | flags |= SLAB_NO_MERGE; | |
496 | ||
497 | for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) { | |
498 | char *short_size, *cache_name; | |
499 | unsigned int cache_useroffset, cache_usersize; | |
500 | unsigned int size; | |
501 | ||
502 | if (!kmalloc_caches[KMALLOC_NORMAL][idx]) | |
503 | continue; | |
504 | ||
505 | size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size; | |
506 | if (!size) | |
507 | continue; | |
508 | ||
509 | short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-'); | |
510 | if (WARN_ON(!short_size)) | |
511 | goto fail; | |
512 | ||
513 | cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1); | |
514 | if (WARN_ON(!cache_name)) | |
515 | goto fail; | |
516 | ||
517 | if (useroffset >= size) { | |
518 | cache_useroffset = 0; | |
519 | cache_usersize = 0; | |
520 | } else { | |
521 | cache_useroffset = useroffset; | |
522 | cache_usersize = min(size - cache_useroffset, usersize); | |
523 | } | |
524 | (*b)[idx] = kmem_cache_create_usercopy(cache_name, size, | |
525 | 0, flags, cache_useroffset, | |
526 | cache_usersize, ctor); | |
527 | kfree(cache_name); | |
528 | if (WARN_ON(!(*b)[idx])) | |
529 | goto fail; | |
530 | } | |
531 | ||
532 | return b; | |
533 | ||
534 | fail: | |
535 | for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) | |
536 | kmem_cache_destroy((*b)[idx]); | |
537 | kfree(b); | |
538 | ||
539 | return NULL; | |
540 | } | |
541 | EXPORT_SYMBOL(kmem_buckets_create); | |
542 | ||
0495e337 WL |
543 | #ifdef SLAB_SUPPORTS_SYSFS |
544 | /* | |
545 | * For a given kmem_cache, kmem_cache_destroy() should only be called | |
546 | * once or there will be a use-after-free problem. The actual deletion | |
547 | * and release of the kobject does not need slab_mutex or cpu_hotplug_lock | |
548 | * protection. So they are now done without holding those locks. | |
549 | * | |
550 | * Note that there will be a slight delay in the deletion of sysfs files | |
551 | * if kmem_cache_release() is called indrectly from a work function. | |
552 | */ | |
553 | static void kmem_cache_release(struct kmem_cache *s) | |
554 | { | |
011568eb XW |
555 | if (slab_state >= FULL) { |
556 | sysfs_slab_unlink(s); | |
557 | sysfs_slab_release(s); | |
558 | } else { | |
559 | slab_kmem_cache_release(s); | |
560 | } | |
0495e337 WL |
561 | } |
562 | #else | |
563 | static void kmem_cache_release(struct kmem_cache *s) | |
564 | { | |
565 | slab_kmem_cache_release(s); | |
566 | } | |
567 | #endif | |
568 | ||
657dc2f9 | 569 | static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) |
d5b3cf71 | 570 | { |
657dc2f9 TH |
571 | LIST_HEAD(to_destroy); |
572 | struct kmem_cache *s, *s2; | |
d5b3cf71 | 573 | |
657dc2f9 | 574 | /* |
5f0d5a3a | 575 | * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the |
657dc2f9 | 576 | * @slab_caches_to_rcu_destroy list. The slab pages are freed |
081a06fa | 577 | * through RCU and the associated kmem_cache are dereferenced |
657dc2f9 TH |
578 | * while freeing the pages, so the kmem_caches should be freed only |
579 | * after the pending RCU operations are finished. As rcu_barrier() | |
580 | * is a pretty slow operation, we batch all pending destructions | |
581 | * asynchronously. | |
582 | */ | |
583 | mutex_lock(&slab_mutex); | |
584 | list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy); | |
585 | mutex_unlock(&slab_mutex); | |
d5b3cf71 | 586 | |
657dc2f9 TH |
587 | if (list_empty(&to_destroy)) |
588 | return; | |
589 | ||
590 | rcu_barrier(); | |
591 | ||
592 | list_for_each_entry_safe(s, s2, &to_destroy, list) { | |
64dd6849 | 593 | debugfs_slab_release(s); |
d3fb45f3 | 594 | kfence_shutdown_cache(s); |
0495e337 | 595 | kmem_cache_release(s); |
657dc2f9 | 596 | } |
d5b3cf71 VD |
597 | } |
598 | ||
657dc2f9 | 599 | static int shutdown_cache(struct kmem_cache *s) |
d5b3cf71 | 600 | { |
f9fa1d91 GT |
601 | /* free asan quarantined objects */ |
602 | kasan_cache_shutdown(s); | |
603 | ||
657dc2f9 TH |
604 | if (__kmem_cache_shutdown(s) != 0) |
605 | return -EBUSY; | |
d5b3cf71 | 606 | |
657dc2f9 | 607 | list_del(&s->list); |
d5b3cf71 | 608 | |
5f0d5a3a | 609 | if (s->flags & SLAB_TYPESAFE_BY_RCU) { |
657dc2f9 TH |
610 | list_add_tail(&s->list, &slab_caches_to_rcu_destroy); |
611 | schedule_work(&slab_caches_to_rcu_destroy_work); | |
612 | } else { | |
d3fb45f3 | 613 | kfence_shutdown_cache(s); |
64dd6849 | 614 | debugfs_slab_release(s); |
d5b3cf71 | 615 | } |
657dc2f9 TH |
616 | |
617 | return 0; | |
d5b3cf71 VD |
618 | } |
619 | ||
41a21285 CL |
620 | void slab_kmem_cache_release(struct kmem_cache *s) |
621 | { | |
52b4b950 | 622 | __kmem_cache_release(s); |
3dec16ea | 623 | kfree_const(s->name); |
41a21285 CL |
624 | kmem_cache_free(kmem_cache, s); |
625 | } | |
626 | ||
945cf2b6 CL |
627 | void kmem_cache_destroy(struct kmem_cache *s) |
628 | { | |
46a9ea66 | 629 | int err = -EBUSY; |
d71608a8 | 630 | bool rcu_set; |
0495e337 | 631 | |
bed0a9b5 | 632 | if (unlikely(!s) || !kasan_check_byte(s)) |
3942d299 SS |
633 | return; |
634 | ||
5a836bf6 | 635 | cpus_read_lock(); |
945cf2b6 | 636 | mutex_lock(&slab_mutex); |
b8529907 | 637 | |
d71608a8 FT |
638 | rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU; |
639 | ||
46a9ea66 RA |
640 | s->refcount--; |
641 | if (s->refcount) | |
b8529907 VD |
642 | goto out_unlock; |
643 | ||
46a9ea66 RA |
644 | err = shutdown_cache(s); |
645 | WARN(err, "%s %s: Slab cache still has objects when called from %pS", | |
7302e91f | 646 | __func__, s->name, (void *)_RET_IP_); |
b8529907 VD |
647 | out_unlock: |
648 | mutex_unlock(&slab_mutex); | |
5a836bf6 | 649 | cpus_read_unlock(); |
46a9ea66 | 650 | if (!err && !rcu_set) |
0495e337 | 651 | kmem_cache_release(s); |
945cf2b6 CL |
652 | } |
653 | EXPORT_SYMBOL(kmem_cache_destroy); | |
654 | ||
03afc0e2 VD |
655 | /** |
656 | * kmem_cache_shrink - Shrink a cache. | |
657 | * @cachep: The cache to shrink. | |
658 | * | |
659 | * Releases as many slabs as possible for a cache. | |
660 | * To help debugging, a zero exit status indicates all slabs were released. | |
a862f68a MR |
661 | * |
662 | * Return: %0 if all slabs were released, non-zero otherwise | |
03afc0e2 VD |
663 | */ |
664 | int kmem_cache_shrink(struct kmem_cache *cachep) | |
665 | { | |
55834c59 | 666 | kasan_cache_shrink(cachep); |
7e1fa93d | 667 | |
610f9c00 | 668 | return __kmem_cache_shrink(cachep); |
03afc0e2 VD |
669 | } |
670 | EXPORT_SYMBOL(kmem_cache_shrink); | |
671 | ||
fda90124 | 672 | bool slab_is_available(void) |
97d06609 CL |
673 | { |
674 | return slab_state >= UP; | |
675 | } | |
b7454ad3 | 676 | |
5bb1bb35 | 677 | #ifdef CONFIG_PRINTK |
2dfe63e6 ME |
678 | static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) |
679 | { | |
680 | if (__kfence_obj_info(kpp, object, slab)) | |
681 | return; | |
682 | __kmem_obj_info(kpp, object, slab); | |
683 | } | |
684 | ||
8e7f37f2 PM |
685 | /** |
686 | * kmem_dump_obj - Print available slab provenance information | |
687 | * @object: slab object for which to find provenance information. | |
688 | * | |
689 | * This function uses pr_cont(), so that the caller is expected to have | |
690 | * printed out whatever preamble is appropriate. The provenance information | |
691 | * depends on the type of object and on how much debugging is enabled. | |
692 | * For a slab-cache object, the fact that it is a slab object is printed, | |
693 | * and, if available, the slab name, return address, and stack trace from | |
e548eaa1 | 694 | * the allocation and last free path of that object. |
8e7f37f2 | 695 | * |
6e284c55 ZL |
696 | * Return: %true if the pointer is to a not-yet-freed object from |
697 | * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer | |
698 | * is to an already-freed object, and %false otherwise. | |
8e7f37f2 | 699 | */ |
6e284c55 | 700 | bool kmem_dump_obj(void *object) |
8e7f37f2 PM |
701 | { |
702 | char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc"; | |
703 | int i; | |
7213230a | 704 | struct slab *slab; |
8e7f37f2 PM |
705 | unsigned long ptroffset; |
706 | struct kmem_obj_info kp = { }; | |
707 | ||
6e284c55 ZL |
708 | /* Some arches consider ZERO_SIZE_PTR to be a valid address. */ |
709 | if (object < (void *)PAGE_SIZE || !virt_addr_valid(object)) | |
710 | return false; | |
7213230a | 711 | slab = virt_to_slab(object); |
6e284c55 ZL |
712 | if (!slab) |
713 | return false; | |
714 | ||
7213230a | 715 | kmem_obj_info(&kp, object, slab); |
8e7f37f2 PM |
716 | if (kp.kp_slab_cache) |
717 | pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name); | |
718 | else | |
719 | pr_cont(" slab%s", cp); | |
2dfe63e6 ME |
720 | if (is_kfence_address(object)) |
721 | pr_cont(" (kfence)"); | |
8e7f37f2 PM |
722 | if (kp.kp_objp) |
723 | pr_cont(" start %px", kp.kp_objp); | |
724 | if (kp.kp_data_offset) | |
725 | pr_cont(" data offset %lu", kp.kp_data_offset); | |
726 | if (kp.kp_objp) { | |
727 | ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset; | |
728 | pr_cont(" pointer offset %lu", ptroffset); | |
729 | } | |
346907ce VB |
730 | if (kp.kp_slab_cache && kp.kp_slab_cache->object_size) |
731 | pr_cont(" size %u", kp.kp_slab_cache->object_size); | |
8e7f37f2 PM |
732 | if (kp.kp_ret) |
733 | pr_cont(" allocated at %pS\n", kp.kp_ret); | |
734 | else | |
735 | pr_cont("\n"); | |
736 | for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) { | |
737 | if (!kp.kp_stack[i]) | |
738 | break; | |
739 | pr_info(" %pS\n", kp.kp_stack[i]); | |
740 | } | |
e548eaa1 MS |
741 | |
742 | if (kp.kp_free_stack[0]) | |
743 | pr_cont(" Free path:\n"); | |
744 | ||
745 | for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) { | |
746 | if (!kp.kp_free_stack[i]) | |
747 | break; | |
748 | pr_info(" %pS\n", kp.kp_free_stack[i]); | |
749 | } | |
750 | ||
6e284c55 | 751 | return true; |
8e7f37f2 | 752 | } |
0d3dd2c8 | 753 | EXPORT_SYMBOL_GPL(kmem_dump_obj); |
5bb1bb35 | 754 | #endif |
8e7f37f2 | 755 | |
45530c44 | 756 | /* Create a cache during boot when no slab services are available yet */ |
361d575e AD |
757 | void __init create_boot_cache(struct kmem_cache *s, const char *name, |
758 | unsigned int size, slab_flags_t flags, | |
759 | unsigned int useroffset, unsigned int usersize) | |
45530c44 CL |
760 | { |
761 | int err; | |
59bb4798 | 762 | unsigned int align = ARCH_KMALLOC_MINALIGN; |
45530c44 CL |
763 | |
764 | s->name = name; | |
765 | s->size = s->object_size = size; | |
59bb4798 VB |
766 | |
767 | /* | |
ad59baa3 VB |
768 | * kmalloc caches guarantee alignment of at least the largest |
769 | * power-of-two divisor of the size. For power-of-two sizes, | |
770 | * it is the size itself. | |
59bb4798 | 771 | */ |
ad59baa3 VB |
772 | if (flags & SLAB_KMALLOC) |
773 | align = max(align, 1U << (ffs(size) - 1)); | |
59bb4798 VB |
774 | s->align = calculate_alignment(flags, align, size); |
775 | ||
346907ce | 776 | #ifdef CONFIG_HARDENED_USERCOPY |
8eb8284b DW |
777 | s->useroffset = useroffset; |
778 | s->usersize = usersize; | |
346907ce | 779 | #endif |
f7ce3190 | 780 | |
45530c44 CL |
781 | err = __kmem_cache_create(s, flags); |
782 | ||
783 | if (err) | |
361d575e | 784 | panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n", |
45530c44 CL |
785 | name, size, err); |
786 | ||
787 | s->refcount = -1; /* Exempt from merging for now */ | |
788 | } | |
789 | ||
0c474d31 CM |
790 | static struct kmem_cache *__init create_kmalloc_cache(const char *name, |
791 | unsigned int size, | |
792 | slab_flags_t flags) | |
45530c44 CL |
793 | { |
794 | struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | |
795 | ||
796 | if (!s) | |
797 | panic("Out of memory when creating slab %s\n", name); | |
798 | ||
0c474d31 | 799 | create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size); |
45530c44 CL |
800 | list_add(&s->list, &slab_caches); |
801 | s->refcount = 1; | |
802 | return s; | |
803 | } | |
804 | ||
72e0fe22 | 805 | kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES] __ro_after_init = |
2947a456 | 806 | { /* initialization for https://llvm.org/pr42570 */ }; |
9425c58e CL |
807 | EXPORT_SYMBOL(kmalloc_caches); |
808 | ||
3c615294 GR |
809 | #ifdef CONFIG_RANDOM_KMALLOC_CACHES |
810 | unsigned long random_kmalloc_seed __ro_after_init; | |
811 | EXPORT_SYMBOL(random_kmalloc_seed); | |
812 | #endif | |
813 | ||
2c59dd65 CL |
814 | /* |
815 | * Conversion table for small slabs sizes / 8 to the index in the | |
816 | * kmalloc array. This is necessary for slabs < 192 since we have non power | |
817 | * of two cache sizes there. The size of larger slabs can be determined using | |
818 | * fls. | |
819 | */ | |
5a9d31d9 | 820 | u8 kmalloc_size_index[24] __ro_after_init = { |
2c59dd65 CL |
821 | 3, /* 8 */ |
822 | 4, /* 16 */ | |
823 | 5, /* 24 */ | |
824 | 5, /* 32 */ | |
825 | 6, /* 40 */ | |
826 | 6, /* 48 */ | |
827 | 6, /* 56 */ | |
828 | 6, /* 64 */ | |
829 | 1, /* 72 */ | |
830 | 1, /* 80 */ | |
831 | 1, /* 88 */ | |
832 | 1, /* 96 */ | |
833 | 7, /* 104 */ | |
834 | 7, /* 112 */ | |
835 | 7, /* 120 */ | |
836 | 7, /* 128 */ | |
837 | 2, /* 136 */ | |
838 | 2, /* 144 */ | |
839 | 2, /* 152 */ | |
840 | 2, /* 160 */ | |
841 | 2, /* 168 */ | |
842 | 2, /* 176 */ | |
843 | 2, /* 184 */ | |
844 | 2 /* 192 */ | |
845 | }; | |
846 | ||
05a94065 KC |
847 | size_t kmalloc_size_roundup(size_t size) |
848 | { | |
8446a4de DL |
849 | if (size && size <= KMALLOC_MAX_CACHE_SIZE) { |
850 | /* | |
851 | * The flags don't matter since size_index is common to all. | |
852 | * Neither does the caller for just getting ->object_size. | |
853 | */ | |
67f2df3b | 854 | return kmalloc_slab(size, NULL, GFP_KERNEL, 0)->object_size; |
8446a4de DL |
855 | } |
856 | ||
05a94065 | 857 | /* Above the smaller buckets, size is a multiple of page size. */ |
8446a4de | 858 | if (size && size <= KMALLOC_MAX_SIZE) |
05a94065 KC |
859 | return PAGE_SIZE << get_order(size); |
860 | ||
3c615294 | 861 | /* |
8446a4de DL |
862 | * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR |
863 | * and very large size - kmalloc() may fail. | |
3c615294 | 864 | */ |
8446a4de DL |
865 | return size; |
866 | ||
05a94065 KC |
867 | } |
868 | EXPORT_SYMBOL(kmalloc_size_roundup); | |
869 | ||
cb5d9fb3 | 870 | #ifdef CONFIG_ZONE_DMA |
494c1dfe WL |
871 | #define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz, |
872 | #else | |
873 | #define KMALLOC_DMA_NAME(sz) | |
874 | #endif | |
875 | ||
3a3b7fec | 876 | #ifdef CONFIG_MEMCG |
494c1dfe | 877 | #define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz, |
cb5d9fb3 | 878 | #else |
494c1dfe WL |
879 | #define KMALLOC_CGROUP_NAME(sz) |
880 | #endif | |
881 | ||
2f7c1c13 VB |
882 | #ifndef CONFIG_SLUB_TINY |
883 | #define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, | |
884 | #else | |
885 | #define KMALLOC_RCL_NAME(sz) | |
886 | #endif | |
887 | ||
3c615294 GR |
888 | #ifdef CONFIG_RANDOM_KMALLOC_CACHES |
889 | #define __KMALLOC_RANDOM_CONCAT(a, b) a ## b | |
890 | #define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz) | |
891 | #define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz, | |
892 | #define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz, | |
893 | #define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz, | |
894 | #define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz, | |
895 | #define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz, | |
896 | #define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz, | |
897 | #define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz, | |
898 | #define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz, | |
899 | #define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz, | |
900 | #define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz, | |
901 | #define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz, | |
902 | #define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz, | |
903 | #define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz, | |
904 | #define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz, | |
905 | #define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz, | |
906 | #else // CONFIG_RANDOM_KMALLOC_CACHES | |
907 | #define KMALLOC_RANDOM_NAME(N, sz) | |
908 | #endif | |
909 | ||
cb5d9fb3 PL |
910 | #define INIT_KMALLOC_INFO(__size, __short_size) \ |
911 | { \ | |
912 | .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ | |
2f7c1c13 | 913 | KMALLOC_RCL_NAME(__short_size) \ |
494c1dfe WL |
914 | KMALLOC_CGROUP_NAME(__short_size) \ |
915 | KMALLOC_DMA_NAME(__short_size) \ | |
3c615294 | 916 | KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \ |
cb5d9fb3 PL |
917 | .size = __size, \ |
918 | } | |
cb5d9fb3 | 919 | |
4066c33d | 920 | /* |
671776b3 | 921 | * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time. |
d6a71648 HY |
922 | * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is |
923 | * kmalloc-2M. | |
4066c33d | 924 | */ |
af3b5f87 | 925 | const struct kmalloc_info_struct kmalloc_info[] __initconst = { |
cb5d9fb3 PL |
926 | INIT_KMALLOC_INFO(0, 0), |
927 | INIT_KMALLOC_INFO(96, 96), | |
928 | INIT_KMALLOC_INFO(192, 192), | |
929 | INIT_KMALLOC_INFO(8, 8), | |
930 | INIT_KMALLOC_INFO(16, 16), | |
931 | INIT_KMALLOC_INFO(32, 32), | |
932 | INIT_KMALLOC_INFO(64, 64), | |
933 | INIT_KMALLOC_INFO(128, 128), | |
934 | INIT_KMALLOC_INFO(256, 256), | |
935 | INIT_KMALLOC_INFO(512, 512), | |
936 | INIT_KMALLOC_INFO(1024, 1k), | |
937 | INIT_KMALLOC_INFO(2048, 2k), | |
938 | INIT_KMALLOC_INFO(4096, 4k), | |
939 | INIT_KMALLOC_INFO(8192, 8k), | |
940 | INIT_KMALLOC_INFO(16384, 16k), | |
941 | INIT_KMALLOC_INFO(32768, 32k), | |
942 | INIT_KMALLOC_INFO(65536, 64k), | |
943 | INIT_KMALLOC_INFO(131072, 128k), | |
944 | INIT_KMALLOC_INFO(262144, 256k), | |
945 | INIT_KMALLOC_INFO(524288, 512k), | |
946 | INIT_KMALLOC_INFO(1048576, 1M), | |
d6a71648 | 947 | INIT_KMALLOC_INFO(2097152, 2M) |
4066c33d GG |
948 | }; |
949 | ||
f97d5f63 | 950 | /* |
34cc6990 DS |
951 | * Patch up the size_index table if we have strange large alignment |
952 | * requirements for the kmalloc array. This is only the case for | |
953 | * MIPS it seems. The standard arches will not generate any code here. | |
954 | * | |
955 | * Largest permitted alignment is 256 bytes due to the way we | |
956 | * handle the index determination for the smaller caches. | |
957 | * | |
958 | * Make sure that nothing crazy happens if someone starts tinkering | |
959 | * around with ARCH_KMALLOC_MINALIGN | |
f97d5f63 | 960 | */ |
34cc6990 | 961 | void __init setup_kmalloc_cache_index_table(void) |
f97d5f63 | 962 | { |
ac914d08 | 963 | unsigned int i; |
f97d5f63 | 964 | |
2c59dd65 | 965 | BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || |
7d6b6cc3 | 966 | !is_power_of_2(KMALLOC_MIN_SIZE)); |
2c59dd65 CL |
967 | |
968 | for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { | |
ac914d08 | 969 | unsigned int elem = size_index_elem(i); |
2c59dd65 | 970 | |
5a9d31d9 | 971 | if (elem >= ARRAY_SIZE(kmalloc_size_index)) |
2c59dd65 | 972 | break; |
5a9d31d9 | 973 | kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW; |
2c59dd65 CL |
974 | } |
975 | ||
976 | if (KMALLOC_MIN_SIZE >= 64) { | |
977 | /* | |
0b8f0d87 | 978 | * The 96 byte sized cache is not used if the alignment |
2c59dd65 CL |
979 | * is 64 byte. |
980 | */ | |
981 | for (i = 64 + 8; i <= 96; i += 8) | |
5a9d31d9 | 982 | kmalloc_size_index[size_index_elem(i)] = 7; |
2c59dd65 CL |
983 | |
984 | } | |
985 | ||
986 | if (KMALLOC_MIN_SIZE >= 128) { | |
987 | /* | |
988 | * The 192 byte sized cache is not used if the alignment | |
989 | * is 128 byte. Redirect kmalloc to use the 256 byte cache | |
990 | * instead. | |
991 | */ | |
992 | for (i = 128 + 8; i <= 192; i += 8) | |
5a9d31d9 | 993 | kmalloc_size_index[size_index_elem(i)] = 8; |
2c59dd65 | 994 | } |
34cc6990 DS |
995 | } |
996 | ||
963e84b0 CM |
997 | static unsigned int __kmalloc_minalign(void) |
998 | { | |
c15cdea5 CM |
999 | unsigned int minalign = dma_get_cache_alignment(); |
1000 | ||
05ee7741 PT |
1001 | if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && |
1002 | is_swiotlb_allocated()) | |
c15cdea5 CM |
1003 | minalign = ARCH_KMALLOC_MINALIGN; |
1004 | ||
1005 | return max(minalign, arch_slab_minalign()); | |
963e84b0 CM |
1006 | } |
1007 | ||
66b3dc1f ZY |
1008 | static void __init |
1009 | new_kmalloc_cache(int idx, enum kmalloc_cache_type type) | |
a9730fca | 1010 | { |
66b3dc1f | 1011 | slab_flags_t flags = 0; |
963e84b0 CM |
1012 | unsigned int minalign = __kmalloc_minalign(); |
1013 | unsigned int aligned_size = kmalloc_info[idx].size; | |
1014 | int aligned_idx = idx; | |
1015 | ||
2f7c1c13 | 1016 | if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { |
1291523f | 1017 | flags |= SLAB_RECLAIM_ACCOUNT; |
3a3b7fec | 1018 | } else if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_CGROUP)) { |
17c17367 | 1019 | if (mem_cgroup_kmem_disabled()) { |
494c1dfe WL |
1020 | kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx]; |
1021 | return; | |
1022 | } | |
1023 | flags |= SLAB_ACCOUNT; | |
33647783 OK |
1024 | } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) { |
1025 | flags |= SLAB_CACHE_DMA; | |
494c1dfe | 1026 | } |
1291523f | 1027 | |
3c615294 GR |
1028 | #ifdef CONFIG_RANDOM_KMALLOC_CACHES |
1029 | if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END) | |
1030 | flags |= SLAB_NO_MERGE; | |
1031 | #endif | |
1032 | ||
13e680fb | 1033 | /* |
3a3b7fec | 1034 | * If CONFIG_MEMCG is enabled, disable cache merging for |
13e680fb WL |
1035 | * KMALLOC_NORMAL caches. |
1036 | */ | |
3a3b7fec | 1037 | if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_NORMAL)) |
d5bf4857 VB |
1038 | flags |= SLAB_NO_MERGE; |
1039 | ||
963e84b0 CM |
1040 | if (minalign > ARCH_KMALLOC_MINALIGN) { |
1041 | aligned_size = ALIGN(aligned_size, minalign); | |
1042 | aligned_idx = __kmalloc_index(aligned_size, false); | |
1043 | } | |
1044 | ||
1045 | if (!kmalloc_caches[type][aligned_idx]) | |
1046 | kmalloc_caches[type][aligned_idx] = create_kmalloc_cache( | |
1047 | kmalloc_info[aligned_idx].name[type], | |
1048 | aligned_size, flags); | |
1049 | if (idx != aligned_idx) | |
1050 | kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx]; | |
a9730fca CL |
1051 | } |
1052 | ||
34cc6990 DS |
1053 | /* |
1054 | * Create the kmalloc array. Some of the regular kmalloc arrays | |
1055 | * may already have been created because they were needed to | |
1056 | * enable allocations for slab creation. | |
1057 | */ | |
66b3dc1f | 1058 | void __init create_kmalloc_caches(void) |
34cc6990 | 1059 | { |
13657d0a PL |
1060 | int i; |
1061 | enum kmalloc_cache_type type; | |
34cc6990 | 1062 | |
494c1dfe | 1063 | /* |
3a3b7fec | 1064 | * Including KMALLOC_CGROUP if CONFIG_MEMCG defined |
494c1dfe | 1065 | */ |
33647783 | 1066 | for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) { |
306c4ac9 | 1067 | /* Caches that are NOT of the two-to-the-power-of size. */ |
7338999c | 1068 | if (KMALLOC_MIN_SIZE <= 32) |
306c4ac9 | 1069 | new_kmalloc_cache(1, type); |
7338999c | 1070 | if (KMALLOC_MIN_SIZE <= 64) |
306c4ac9 HL |
1071 | new_kmalloc_cache(2, type); |
1072 | ||
1073 | /* Caches that are of the two-to-the-power-of size. */ | |
7338999c HL |
1074 | for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) |
1075 | new_kmalloc_cache(i, type); | |
8a965b3b | 1076 | } |
3c615294 GR |
1077 | #ifdef CONFIG_RANDOM_KMALLOC_CACHES |
1078 | random_kmalloc_seed = get_random_u64(); | |
1079 | #endif | |
8a965b3b | 1080 | |
f97d5f63 CL |
1081 | /* Kmalloc array is now usable */ |
1082 | slab_state = UP; | |
b32801d1 KC |
1083 | |
1084 | if (IS_ENABLED(CONFIG_SLAB_BUCKETS)) | |
1085 | kmem_buckets_cache = kmem_cache_create("kmalloc_buckets", | |
1086 | sizeof(kmem_buckets), | |
1087 | 0, SLAB_NO_MERGE, NULL); | |
f97d5f63 | 1088 | } |
d6a71648 | 1089 | |
445d41d7 VB |
1090 | /** |
1091 | * __ksize -- Report full size of underlying allocation | |
a2076201 | 1092 | * @object: pointer to the object |
445d41d7 VB |
1093 | * |
1094 | * This should only be used internally to query the true size of allocations. | |
1095 | * It is not meant to be a way to discover the usable size of an allocation | |
1096 | * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond | |
1097 | * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, | |
1098 | * and/or FORTIFY_SOURCE. | |
1099 | * | |
a2076201 | 1100 | * Return: size of the actual memory used by @object in bytes |
445d41d7 | 1101 | */ |
b1405135 HY |
1102 | size_t __ksize(const void *object) |
1103 | { | |
1104 | struct folio *folio; | |
1105 | ||
1106 | if (unlikely(object == ZERO_SIZE_PTR)) | |
1107 | return 0; | |
1108 | ||
1109 | folio = virt_to_folio(object); | |
1110 | ||
d5eff736 HY |
1111 | if (unlikely(!folio_test_slab(folio))) { |
1112 | if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)) | |
1113 | return 0; | |
1114 | if (WARN_ON(object != folio_address(folio))) | |
1115 | return 0; | |
b1405135 | 1116 | return folio_size(folio); |
d5eff736 | 1117 | } |
b1405135 | 1118 | |
946fa0db FT |
1119 | #ifdef CONFIG_SLUB_DEBUG |
1120 | skip_orig_size_check(folio_slab(folio)->slab_cache, object); | |
1121 | #endif | |
1122 | ||
b1405135 HY |
1123 | return slab_ksize(folio_slab(folio)->slab_cache); |
1124 | } | |
26a40990 | 1125 | |
44405099 LL |
1126 | gfp_t kmalloc_fix_flags(gfp_t flags) |
1127 | { | |
1128 | gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; | |
1129 | ||
1130 | flags &= ~GFP_SLAB_BUG_MASK; | |
1131 | pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", | |
1132 | invalid_mask, &invalid_mask, flags, &flags); | |
1133 | dump_stack(); | |
1134 | ||
1135 | return flags; | |
1136 | } | |
1137 | ||
7c00fce9 TG |
1138 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
1139 | /* Randomize a generic freelist */ | |
ffe4dfe0 | 1140 | static void freelist_randomize(unsigned int *list, |
302d55d5 | 1141 | unsigned int count) |
7c00fce9 | 1142 | { |
7c00fce9 | 1143 | unsigned int rand; |
302d55d5 | 1144 | unsigned int i; |
7c00fce9 TG |
1145 | |
1146 | for (i = 0; i < count; i++) | |
1147 | list[i] = i; | |
1148 | ||
1149 | /* Fisher-Yates shuffle */ | |
1150 | for (i = count - 1; i > 0; i--) { | |
ffe4dfe0 | 1151 | rand = get_random_u32_below(i + 1); |
7c00fce9 TG |
1152 | swap(list[i], list[rand]); |
1153 | } | |
1154 | } | |
1155 | ||
1156 | /* Create a random sequence per cache */ | |
1157 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, | |
1158 | gfp_t gfp) | |
1159 | { | |
7c00fce9 TG |
1160 | |
1161 | if (count < 2 || cachep->random_seq) | |
1162 | return 0; | |
1163 | ||
1164 | cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); | |
1165 | if (!cachep->random_seq) | |
1166 | return -ENOMEM; | |
1167 | ||
ffe4dfe0 | 1168 | freelist_randomize(cachep->random_seq, count); |
7c00fce9 TG |
1169 | return 0; |
1170 | } | |
1171 | ||
1172 | /* Destroy the per-cache random freelist sequence */ | |
1173 | void cache_random_seq_destroy(struct kmem_cache *cachep) | |
1174 | { | |
1175 | kfree(cachep->random_seq); | |
1176 | cachep->random_seq = NULL; | |
1177 | } | |
1178 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
1179 | ||
a9e0b9f2 | 1180 | #ifdef CONFIG_SLUB_DEBUG |
0825a6f9 | 1181 | #define SLABINFO_RIGHTS (0400) |
e9b4db2b | 1182 | |
b047501c | 1183 | static void print_slabinfo_header(struct seq_file *m) |
bcee6e2a GC |
1184 | { |
1185 | /* | |
1186 | * Output format version, so at least we can change it | |
1187 | * without _too_ many complaints. | |
1188 | */ | |
bcee6e2a | 1189 | seq_puts(m, "slabinfo - version: 2.1\n"); |
756a025f | 1190 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); |
bcee6e2a GC |
1191 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); |
1192 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | |
bcee6e2a GC |
1193 | seq_putc(m, '\n'); |
1194 | } | |
1195 | ||
c29b5b3d | 1196 | static void *slab_start(struct seq_file *m, loff_t *pos) |
b7454ad3 | 1197 | { |
b7454ad3 | 1198 | mutex_lock(&slab_mutex); |
c7094406 | 1199 | return seq_list_start(&slab_caches, *pos); |
b7454ad3 GC |
1200 | } |
1201 | ||
c29b5b3d | 1202 | static void *slab_next(struct seq_file *m, void *p, loff_t *pos) |
b7454ad3 | 1203 | { |
c7094406 | 1204 | return seq_list_next(p, &slab_caches, pos); |
b7454ad3 GC |
1205 | } |
1206 | ||
c29b5b3d | 1207 | static void slab_stop(struct seq_file *m, void *p) |
b7454ad3 GC |
1208 | { |
1209 | mutex_unlock(&slab_mutex); | |
1210 | } | |
1211 | ||
b047501c | 1212 | static void cache_show(struct kmem_cache *s, struct seq_file *m) |
b7454ad3 | 1213 | { |
0d7561c6 GC |
1214 | struct slabinfo sinfo; |
1215 | ||
1216 | memset(&sinfo, 0, sizeof(sinfo)); | |
1217 | get_slabinfo(s, &sinfo); | |
1218 | ||
1219 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | |
10befea9 | 1220 | s->name, sinfo.active_objs, sinfo.num_objs, s->size, |
0d7561c6 GC |
1221 | sinfo.objects_per_slab, (1 << sinfo.cache_order)); |
1222 | ||
1223 | seq_printf(m, " : tunables %4u %4u %4u", | |
1224 | sinfo.limit, sinfo.batchcount, sinfo.shared); | |
1225 | seq_printf(m, " : slabdata %6lu %6lu %6lu", | |
1226 | sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); | |
0d7561c6 | 1227 | seq_putc(m, '\n'); |
b7454ad3 GC |
1228 | } |
1229 | ||
1df3b26f | 1230 | static int slab_show(struct seq_file *m, void *p) |
749c5415 | 1231 | { |
c7094406 | 1232 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); |
749c5415 | 1233 | |
c7094406 | 1234 | if (p == slab_caches.next) |
1df3b26f | 1235 | print_slabinfo_header(m); |
10befea9 | 1236 | cache_show(s, m); |
b047501c VD |
1237 | return 0; |
1238 | } | |
1239 | ||
852d8be0 YS |
1240 | void dump_unreclaimable_slab(void) |
1241 | { | |
7714304f | 1242 | struct kmem_cache *s; |
852d8be0 YS |
1243 | struct slabinfo sinfo; |
1244 | ||
1245 | /* | |
1246 | * Here acquiring slab_mutex is risky since we don't prefer to get | |
1247 | * sleep in oom path. But, without mutex hold, it may introduce a | |
1248 | * risk of crash. | |
1249 | * Use mutex_trylock to protect the list traverse, dump nothing | |
1250 | * without acquiring the mutex. | |
1251 | */ | |
1252 | if (!mutex_trylock(&slab_mutex)) { | |
1253 | pr_warn("excessive unreclaimable slab but cannot dump stats\n"); | |
1254 | return; | |
1255 | } | |
1256 | ||
1257 | pr_info("Unreclaimable slab info:\n"); | |
1258 | pr_info("Name Used Total\n"); | |
1259 | ||
7714304f | 1260 | list_for_each_entry(s, &slab_caches, list) { |
10befea9 | 1261 | if (s->flags & SLAB_RECLAIM_ACCOUNT) |
852d8be0 YS |
1262 | continue; |
1263 | ||
1264 | get_slabinfo(s, &sinfo); | |
1265 | ||
1266 | if (sinfo.num_objs > 0) | |
10befea9 | 1267 | pr_info("%-17s %10luKB %10luKB\n", s->name, |
852d8be0 YS |
1268 | (sinfo.active_objs * s->size) / 1024, |
1269 | (sinfo.num_objs * s->size) / 1024); | |
1270 | } | |
1271 | mutex_unlock(&slab_mutex); | |
1272 | } | |
1273 | ||
b7454ad3 GC |
1274 | /* |
1275 | * slabinfo_op - iterator that generates /proc/slabinfo | |
1276 | * | |
1277 | * Output layout: | |
1278 | * cache-name | |
1279 | * num-active-objs | |
1280 | * total-objs | |
1281 | * object size | |
1282 | * num-active-slabs | |
1283 | * total-slabs | |
1284 | * num-pages-per-slab | |
1285 | * + further values on SMP and with statistics enabled | |
1286 | */ | |
1287 | static const struct seq_operations slabinfo_op = { | |
1df3b26f | 1288 | .start = slab_start, |
276a2439 WL |
1289 | .next = slab_next, |
1290 | .stop = slab_stop, | |
1df3b26f | 1291 | .show = slab_show, |
b7454ad3 GC |
1292 | }; |
1293 | ||
1294 | static int slabinfo_open(struct inode *inode, struct file *file) | |
1295 | { | |
1296 | return seq_open(file, &slabinfo_op); | |
1297 | } | |
1298 | ||
97a32539 | 1299 | static const struct proc_ops slabinfo_proc_ops = { |
d919b33d | 1300 | .proc_flags = PROC_ENTRY_PERMANENT, |
97a32539 AD |
1301 | .proc_open = slabinfo_open, |
1302 | .proc_read = seq_read, | |
97a32539 AD |
1303 | .proc_lseek = seq_lseek, |
1304 | .proc_release = seq_release, | |
b7454ad3 GC |
1305 | }; |
1306 | ||
1307 | static int __init slab_proc_init(void) | |
1308 | { | |
97a32539 | 1309 | proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops); |
b7454ad3 GC |
1310 | return 0; |
1311 | } | |
1312 | module_init(slab_proc_init); | |
fcf8a1e4 | 1313 | |
a9e0b9f2 | 1314 | #endif /* CONFIG_SLUB_DEBUG */ |
928cec9c | 1315 | |
9ed9cac1 KC |
1316 | static __always_inline __realloc_size(2) void * |
1317 | __do_krealloc(const void *p, size_t new_size, gfp_t flags) | |
928cec9c AR |
1318 | { |
1319 | void *ret; | |
fa9ba3aa | 1320 | size_t ks; |
928cec9c | 1321 | |
38931d89 | 1322 | /* Check for double-free before calling ksize. */ |
d12d9ad8 AK |
1323 | if (likely(!ZERO_OR_NULL_PTR(p))) { |
1324 | if (!kasan_check_byte(p)) | |
1325 | return NULL; | |
38931d89 | 1326 | ks = ksize(p); |
d12d9ad8 AK |
1327 | } else |
1328 | ks = 0; | |
928cec9c | 1329 | |
d12d9ad8 | 1330 | /* If the object still fits, repoison it precisely. */ |
0316bec2 | 1331 | if (ks >= new_size) { |
0116523c | 1332 | p = kasan_krealloc((void *)p, new_size, flags); |
928cec9c | 1333 | return (void *)p; |
0316bec2 | 1334 | } |
928cec9c | 1335 | |
7bd230a2 | 1336 | ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_); |
d12d9ad8 AK |
1337 | if (ret && p) { |
1338 | /* Disable KASAN checks as the object's redzone is accessed. */ | |
1339 | kasan_disable_current(); | |
1340 | memcpy(ret, kasan_reset_tag(p), ks); | |
1341 | kasan_enable_current(); | |
1342 | } | |
928cec9c AR |
1343 | |
1344 | return ret; | |
1345 | } | |
1346 | ||
928cec9c AR |
1347 | /** |
1348 | * krealloc - reallocate memory. The contents will remain unchanged. | |
1349 | * @p: object to reallocate memory for. | |
1350 | * @new_size: how many bytes of memory are required. | |
1351 | * @flags: the type of memory to allocate. | |
1352 | * | |
1353 | * The contents of the object pointed to are preserved up to the | |
15d5de49 BG |
1354 | * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored). |
1355 | * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size | |
1356 | * is 0 and @p is not a %NULL pointer, the object pointed to is freed. | |
a862f68a MR |
1357 | * |
1358 | * Return: pointer to the allocated memory or %NULL in case of error | |
928cec9c | 1359 | */ |
7bd230a2 | 1360 | void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags) |
928cec9c AR |
1361 | { |
1362 | void *ret; | |
1363 | ||
1364 | if (unlikely(!new_size)) { | |
1365 | kfree(p); | |
1366 | return ZERO_SIZE_PTR; | |
1367 | } | |
1368 | ||
1369 | ret = __do_krealloc(p, new_size, flags); | |
772a2fa5 | 1370 | if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret)) |
928cec9c AR |
1371 | kfree(p); |
1372 | ||
1373 | return ret; | |
1374 | } | |
7bd230a2 | 1375 | EXPORT_SYMBOL(krealloc_noprof); |
928cec9c AR |
1376 | |
1377 | /** | |
453431a5 | 1378 | * kfree_sensitive - Clear sensitive information in memory before freeing |
928cec9c AR |
1379 | * @p: object to free memory of |
1380 | * | |
1381 | * The memory of the object @p points to is zeroed before freed. | |
453431a5 | 1382 | * If @p is %NULL, kfree_sensitive() does nothing. |
928cec9c AR |
1383 | * |
1384 | * Note: this function zeroes the whole allocated buffer which can be a good | |
1385 | * deal bigger than the requested buffer size passed to kmalloc(). So be | |
1386 | * careful when using this function in performance sensitive code. | |
1387 | */ | |
453431a5 | 1388 | void kfree_sensitive(const void *p) |
928cec9c AR |
1389 | { |
1390 | size_t ks; | |
1391 | void *mem = (void *)p; | |
1392 | ||
928cec9c | 1393 | ks = ksize(mem); |
38931d89 KC |
1394 | if (ks) { |
1395 | kasan_unpoison_range(mem, ks); | |
fa9ba3aa | 1396 | memzero_explicit(mem, ks); |
38931d89 | 1397 | } |
928cec9c AR |
1398 | kfree(mem); |
1399 | } | |
453431a5 | 1400 | EXPORT_SYMBOL(kfree_sensitive); |
928cec9c | 1401 | |
10d1f8cb ME |
1402 | size_t ksize(const void *objp) |
1403 | { | |
0d4ca4c9 | 1404 | /* |
38931d89 KC |
1405 | * We need to first check that the pointer to the object is valid. |
1406 | * The KASAN report printed from ksize() is more useful, then when | |
1407 | * it's printed later when the behaviour could be undefined due to | |
1408 | * a potential use-after-free or double-free. | |
0d4ca4c9 | 1409 | * |
611806b4 AK |
1410 | * We use kasan_check_byte(), which is supported for the hardware |
1411 | * tag-based KASAN mode, unlike kasan_check_read/write(). | |
1412 | * | |
1413 | * If the pointed to memory is invalid, we return 0 to avoid users of | |
0d4ca4c9 ME |
1414 | * ksize() writing to and potentially corrupting the memory region. |
1415 | * | |
1416 | * We want to perform the check before __ksize(), to avoid potentially | |
1417 | * crashing in __ksize() due to accessing invalid metadata. | |
1418 | */ | |
611806b4 | 1419 | if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) |
0d4ca4c9 ME |
1420 | return 0; |
1421 | ||
38931d89 | 1422 | return kfence_ksize(objp) ?: __ksize(objp); |
10d1f8cb ME |
1423 | } |
1424 | EXPORT_SYMBOL(ksize); | |
1425 | ||
928cec9c AR |
1426 | /* Tracepoints definitions. */ |
1427 | EXPORT_TRACEPOINT_SYMBOL(kmalloc); | |
1428 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); | |
928cec9c AR |
1429 | EXPORT_TRACEPOINT_SYMBOL(kfree); |
1430 | EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free); | |
4f6923fb | 1431 |