]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
97d06609 CL |
2 | #ifndef MM_SLAB_H |
3 | #define MM_SLAB_H | |
4 | /* | |
5 | * Internal slab definitions | |
6 | */ | |
7 | ||
d122019b MWO |
8 | /* Reuses the bits in struct page */ |
9 | struct slab { | |
10 | unsigned long __page_flags; | |
401fb12c VB |
11 | |
12 | #if defined(CONFIG_SLAB) | |
13 | ||
130d4df5 | 14 | struct kmem_cache *slab_cache; |
d122019b | 15 | union { |
130d4df5 VB |
16 | struct { |
17 | struct list_head slab_list; | |
18 | void *freelist; /* array of free object indexes */ | |
19 | void *s_mem; /* first object */ | |
20 | }; | |
401fb12c VB |
21 | struct rcu_head rcu_head; |
22 | }; | |
401fb12c VB |
23 | unsigned int active; |
24 | ||
25 | #elif defined(CONFIG_SLUB) | |
26 | ||
401fb12c | 27 | struct kmem_cache *slab_cache; |
d122019b | 28 | union { |
401fb12c | 29 | struct { |
130d4df5 VB |
30 | union { |
31 | struct list_head slab_list; | |
32 | #ifdef CONFIG_SLUB_CPU_PARTIAL | |
33 | struct { | |
34 | struct slab *next; | |
35 | int slabs; /* Nr of slabs left */ | |
36 | }; | |
37 | #endif | |
38 | }; | |
39 | /* Double-word boundary */ | |
40 | void *freelist; /* first free object */ | |
41 | union { | |
42 | unsigned long counters; | |
43 | struct { | |
44 | unsigned inuse:16; | |
45 | unsigned objects:15; | |
46 | unsigned frozen:1; | |
47 | }; | |
48 | }; | |
d122019b | 49 | }; |
130d4df5 | 50 | struct rcu_head rcu_head; |
d122019b | 51 | }; |
401fb12c VB |
52 | unsigned int __unused; |
53 | ||
54 | #elif defined(CONFIG_SLOB) | |
55 | ||
56 | struct list_head slab_list; | |
57 | void *__unused_1; | |
58 | void *freelist; /* first free block */ | |
b01af5c0 HY |
59 | long units; |
60 | unsigned int __unused_2; | |
401fb12c VB |
61 | |
62 | #else | |
63 | #error "Unexpected slab allocator configured" | |
64 | #endif | |
d122019b | 65 | |
d122019b MWO |
66 | atomic_t __page_refcount; |
67 | #ifdef CONFIG_MEMCG | |
68 | unsigned long memcg_data; | |
69 | #endif | |
70 | }; | |
71 | ||
72 | #define SLAB_MATCH(pg, sl) \ | |
73 | static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) | |
74 | SLAB_MATCH(flags, __page_flags); | |
401fb12c | 75 | #ifndef CONFIG_SLOB |
130d4df5 VB |
76 | SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ |
77 | #else | |
78 | SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ | |
401fb12c | 79 | #endif |
d122019b MWO |
80 | SLAB_MATCH(_refcount, __page_refcount); |
81 | #ifdef CONFIG_MEMCG | |
82 | SLAB_MATCH(memcg_data, memcg_data); | |
83 | #endif | |
84 | #undef SLAB_MATCH | |
85 | static_assert(sizeof(struct slab) <= sizeof(struct page)); | |
130d4df5 VB |
86 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && defined(CONFIG_SLUB) |
87 | static_assert(IS_ALIGNED(offsetof(struct slab, freelist), 2*sizeof(void *))); | |
88 | #endif | |
d122019b MWO |
89 | |
90 | /** | |
91 | * folio_slab - Converts from folio to slab. | |
92 | * @folio: The folio. | |
93 | * | |
94 | * Currently struct slab is a different representation of a folio where | |
95 | * folio_test_slab() is true. | |
96 | * | |
97 | * Return: The slab which contains this folio. | |
98 | */ | |
99 | #define folio_slab(folio) (_Generic((folio), \ | |
100 | const struct folio *: (const struct slab *)(folio), \ | |
101 | struct folio *: (struct slab *)(folio))) | |
102 | ||
103 | /** | |
104 | * slab_folio - The folio allocated for a slab | |
105 | * @slab: The slab. | |
106 | * | |
107 | * Slabs are allocated as folios that contain the individual objects and are | |
108 | * using some fields in the first struct page of the folio - those fields are | |
109 | * now accessed by struct slab. It is occasionally necessary to convert back to | |
110 | * a folio in order to communicate with the rest of the mm. Please use this | |
111 | * helper function instead of casting yourself, as the implementation may change | |
112 | * in the future. | |
113 | */ | |
114 | #define slab_folio(s) (_Generic((s), \ | |
115 | const struct slab *: (const struct folio *)s, \ | |
116 | struct slab *: (struct folio *)s)) | |
117 | ||
118 | /** | |
119 | * page_slab - Converts from first struct page to slab. | |
120 | * @p: The first (either head of compound or single) page of slab. | |
121 | * | |
122 | * A temporary wrapper to convert struct page to struct slab in situations where | |
123 | * we know the page is the compound head, or single order-0 page. | |
124 | * | |
125 | * Long-term ideally everything would work with struct slab directly or go | |
126 | * through folio to struct slab. | |
127 | * | |
128 | * Return: The slab which contains this page | |
129 | */ | |
130 | #define page_slab(p) (_Generic((p), \ | |
131 | const struct page *: (const struct slab *)(p), \ | |
132 | struct page *: (struct slab *)(p))) | |
133 | ||
134 | /** | |
135 | * slab_page - The first struct page allocated for a slab | |
136 | * @slab: The slab. | |
137 | * | |
138 | * A convenience wrapper for converting slab to the first struct page of the | |
139 | * underlying folio, to communicate with code not yet converted to folio or | |
140 | * struct slab. | |
141 | */ | |
142 | #define slab_page(s) folio_page(slab_folio(s), 0) | |
143 | ||
144 | /* | |
145 | * If network-based swap is enabled, sl*b must keep track of whether pages | |
146 | * were allocated from pfmemalloc reserves. | |
147 | */ | |
148 | static inline bool slab_test_pfmemalloc(const struct slab *slab) | |
149 | { | |
150 | return folio_test_active((struct folio *)slab_folio(slab)); | |
151 | } | |
152 | ||
153 | static inline void slab_set_pfmemalloc(struct slab *slab) | |
154 | { | |
155 | folio_set_active(slab_folio(slab)); | |
156 | } | |
157 | ||
158 | static inline void slab_clear_pfmemalloc(struct slab *slab) | |
159 | { | |
160 | folio_clear_active(slab_folio(slab)); | |
161 | } | |
162 | ||
163 | static inline void __slab_clear_pfmemalloc(struct slab *slab) | |
164 | { | |
165 | __folio_clear_active(slab_folio(slab)); | |
166 | } | |
167 | ||
168 | static inline void *slab_address(const struct slab *slab) | |
169 | { | |
170 | return folio_address(slab_folio(slab)); | |
171 | } | |
172 | ||
173 | static inline int slab_nid(const struct slab *slab) | |
174 | { | |
175 | return folio_nid(slab_folio(slab)); | |
176 | } | |
177 | ||
178 | static inline pg_data_t *slab_pgdat(const struct slab *slab) | |
179 | { | |
180 | return folio_pgdat(slab_folio(slab)); | |
181 | } | |
182 | ||
183 | static inline struct slab *virt_to_slab(const void *addr) | |
184 | { | |
185 | struct folio *folio = virt_to_folio(addr); | |
186 | ||
187 | if (!folio_test_slab(folio)) | |
188 | return NULL; | |
189 | ||
190 | return folio_slab(folio); | |
191 | } | |
192 | ||
193 | static inline int slab_order(const struct slab *slab) | |
194 | { | |
195 | return folio_order((struct folio *)slab_folio(slab)); | |
196 | } | |
197 | ||
198 | static inline size_t slab_size(const struct slab *slab) | |
199 | { | |
200 | return PAGE_SIZE << slab_order(slab); | |
201 | } | |
202 | ||
07f361b2 JK |
203 | #ifdef CONFIG_SLOB |
204 | /* | |
205 | * Common fields provided in kmem_cache by all slab allocators | |
206 | * This struct is either used directly by the allocator (SLOB) | |
207 | * or the allocator must include definitions for all fields | |
208 | * provided in kmem_cache_common in their definition of kmem_cache. | |
209 | * | |
210 | * Once we can do anonymous structs (C11 standard) we could put a | |
211 | * anonymous struct definition in these allocators so that the | |
212 | * separate allocations in the kmem_cache structure of SLAB and | |
213 | * SLUB is no longer needed. | |
214 | */ | |
215 | struct kmem_cache { | |
216 | unsigned int object_size;/* The original size of the object */ | |
217 | unsigned int size; /* The aligned/padded/added on size */ | |
218 | unsigned int align; /* Alignment as calculated */ | |
d50112ed | 219 | slab_flags_t flags; /* Active flags on the slab */ |
07f361b2 JK |
220 | const char *name; /* Slab name for sysfs */ |
221 | int refcount; /* Use counter */ | |
222 | void (*ctor)(void *); /* Called on object slot creation */ | |
223 | struct list_head list; /* List of all slab caches on the system */ | |
224 | }; | |
225 | ||
226 | #endif /* CONFIG_SLOB */ | |
227 | ||
228 | #ifdef CONFIG_SLAB | |
229 | #include <linux/slab_def.h> | |
230 | #endif | |
231 | ||
232 | #ifdef CONFIG_SLUB | |
233 | #include <linux/slub_def.h> | |
234 | #endif | |
235 | ||
236 | #include <linux/memcontrol.h> | |
11c7aec2 | 237 | #include <linux/fault-inject.h> |
11c7aec2 JDB |
238 | #include <linux/kasan.h> |
239 | #include <linux/kmemleak.h> | |
7c00fce9 | 240 | #include <linux/random.h> |
d92a8cfc | 241 | #include <linux/sched/mm.h> |
88f2ef73 | 242 | #include <linux/list_lru.h> |
07f361b2 | 243 | |
97d06609 CL |
244 | /* |
245 | * State of the slab allocator. | |
246 | * | |
247 | * This is used to describe the states of the allocator during bootup. | |
248 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
249 | * have the problem that the structures used for managing slab caches are | |
250 | * allocated from slab caches themselves. | |
251 | */ | |
252 | enum slab_state { | |
253 | DOWN, /* No slab functionality yet */ | |
254 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
ce8eb6c4 | 255 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
256 | UP, /* Slab caches usable but not all extras yet */ |
257 | FULL /* Everything is working */ | |
258 | }; | |
259 | ||
260 | extern enum slab_state slab_state; | |
261 | ||
18004c5d CL |
262 | /* The slab cache mutex protects the management structures during changes */ |
263 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
264 | |
265 | /* The list of all slab caches on the system */ | |
18004c5d CL |
266 | extern struct list_head slab_caches; |
267 | ||
9b030cb8 CL |
268 | /* The slab cache that manages slab cache information */ |
269 | extern struct kmem_cache *kmem_cache; | |
270 | ||
af3b5f87 VB |
271 | /* A table of kmalloc cache names and sizes */ |
272 | extern const struct kmalloc_info_struct { | |
cb5d9fb3 | 273 | const char *name[NR_KMALLOC_TYPES]; |
55de8b9c | 274 | unsigned int size; |
af3b5f87 VB |
275 | } kmalloc_info[]; |
276 | ||
f97d5f63 CL |
277 | #ifndef CONFIG_SLOB |
278 | /* Kmalloc array related functions */ | |
34cc6990 | 279 | void setup_kmalloc_cache_index_table(void); |
d50112ed | 280 | void create_kmalloc_caches(slab_flags_t); |
2c59dd65 CL |
281 | |
282 | /* Find the kmalloc slab corresponding for a certain size */ | |
283 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
ed4cd17e HY |
284 | |
285 | void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, | |
286 | int node, size_t orig_size, | |
287 | unsigned long caller); | |
288 | void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); | |
f97d5f63 CL |
289 | #endif |
290 | ||
44405099 | 291 | gfp_t kmalloc_fix_flags(gfp_t flags); |
f97d5f63 | 292 | |
9b030cb8 | 293 | /* Functions provided by the slab allocators */ |
d50112ed | 294 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
97d06609 | 295 | |
55de8b9c AD |
296 | struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, |
297 | slab_flags_t flags, unsigned int useroffset, | |
298 | unsigned int usersize); | |
45530c44 | 299 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
361d575e AD |
300 | unsigned int size, slab_flags_t flags, |
301 | unsigned int useroffset, unsigned int usersize); | |
45530c44 | 302 | |
423c929c | 303 | int slab_unmergeable(struct kmem_cache *s); |
f4957d5b | 304 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
d50112ed | 305 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
12220dea | 306 | #ifndef CONFIG_SLOB |
2633d7a0 | 307 | struct kmem_cache * |
f4957d5b | 308 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 309 | slab_flags_t flags, void (*ctor)(void *)); |
423c929c | 310 | |
0293d1fd | 311 | slab_flags_t kmem_cache_flags(unsigned int object_size, |
37540008 | 312 | slab_flags_t flags, const char *name); |
cbb79694 | 313 | #else |
2633d7a0 | 314 | static inline struct kmem_cache * |
f4957d5b | 315 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 316 | slab_flags_t flags, void (*ctor)(void *)) |
cbb79694 | 317 | { return NULL; } |
423c929c | 318 | |
0293d1fd | 319 | static inline slab_flags_t kmem_cache_flags(unsigned int object_size, |
37540008 | 320 | slab_flags_t flags, const char *name) |
423c929c JK |
321 | { |
322 | return flags; | |
323 | } | |
cbb79694 CL |
324 | #endif |
325 | ||
bb944290 FT |
326 | static inline bool is_kmalloc_cache(struct kmem_cache *s) |
327 | { | |
328 | #ifndef CONFIG_SLOB | |
329 | return (s->flags & SLAB_KMALLOC); | |
330 | #else | |
331 | return false; | |
332 | #endif | |
333 | } | |
cbb79694 | 334 | |
d8843922 | 335 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
6d6ea1e9 NB |
336 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
337 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ | |
5f0d5a3a | 338 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
d8843922 GC |
339 | |
340 | #if defined(CONFIG_DEBUG_SLAB) | |
341 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
342 | #elif defined(CONFIG_SLUB_DEBUG) | |
343 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
becfda68 | 344 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922 GC |
345 | #else |
346 | #define SLAB_DEBUG_FLAGS (0) | |
347 | #endif | |
348 | ||
349 | #if defined(CONFIG_SLAB) | |
350 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
230e9fc2 | 351 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
75f296d9 | 352 | SLAB_ACCOUNT) |
d8843922 GC |
353 | #elif defined(CONFIG_SLUB) |
354 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
6cd6d33c FT |
355 | SLAB_TEMPORARY | SLAB_ACCOUNT | \ |
356 | SLAB_NO_USER_FLAGS | SLAB_KMALLOC) | |
d8843922 | 357 | #else |
34dbc3aa | 358 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) |
d8843922 GC |
359 | #endif |
360 | ||
e70954fd | 361 | /* Common flags available with current configuration */ |
d8843922 GC |
362 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
363 | ||
e70954fd TG |
364 | /* Common flags permitted for kmem_cache_create */ |
365 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ | |
366 | SLAB_RED_ZONE | \ | |
367 | SLAB_POISON | \ | |
368 | SLAB_STORE_USER | \ | |
369 | SLAB_TRACE | \ | |
370 | SLAB_CONSISTENCY_CHECKS | \ | |
371 | SLAB_MEM_SPREAD | \ | |
372 | SLAB_NOLEAKTRACE | \ | |
373 | SLAB_RECLAIM_ACCOUNT | \ | |
374 | SLAB_TEMPORARY | \ | |
a285909f | 375 | SLAB_ACCOUNT | \ |
6cd6d33c | 376 | SLAB_KMALLOC | \ |
a285909f | 377 | SLAB_NO_USER_FLAGS) |
e70954fd | 378 | |
f9e13c0a | 379 | bool __kmem_cache_empty(struct kmem_cache *); |
945cf2b6 | 380 | int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950 | 381 | void __kmem_cache_release(struct kmem_cache *); |
c9fc5864 | 382 | int __kmem_cache_shrink(struct kmem_cache *); |
41a21285 | 383 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 384 | |
b7454ad3 GC |
385 | struct seq_file; |
386 | struct file; | |
b7454ad3 | 387 | |
0d7561c6 GC |
388 | struct slabinfo { |
389 | unsigned long active_objs; | |
390 | unsigned long num_objs; | |
391 | unsigned long active_slabs; | |
392 | unsigned long num_slabs; | |
393 | unsigned long shared_avail; | |
394 | unsigned int limit; | |
395 | unsigned int batchcount; | |
396 | unsigned int shared; | |
397 | unsigned int objects_per_slab; | |
398 | unsigned int cache_order; | |
399 | }; | |
400 | ||
401 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
402 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
403 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
404 | size_t count, loff_t *ppos); | |
ba6c496e | 405 | |
1a984c4e | 406 | static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) |
6cea1d56 RG |
407 | { |
408 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? | |
d42f3245 | 409 | NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; |
6cea1d56 RG |
410 | } |
411 | ||
e42f174e VB |
412 | #ifdef CONFIG_SLUB_DEBUG |
413 | #ifdef CONFIG_SLUB_DEBUG_ON | |
414 | DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); | |
415 | #else | |
416 | DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); | |
417 | #endif | |
418 | extern void print_tracking(struct kmem_cache *s, void *object); | |
1f9f78b1 | 419 | long validate_slab_cache(struct kmem_cache *s); |
0d4a062a ME |
420 | static inline bool __slub_debug_enabled(void) |
421 | { | |
422 | return static_branch_unlikely(&slub_debug_enabled); | |
423 | } | |
e42f174e VB |
424 | #else |
425 | static inline void print_tracking(struct kmem_cache *s, void *object) | |
426 | { | |
427 | } | |
0d4a062a ME |
428 | static inline bool __slub_debug_enabled(void) |
429 | { | |
430 | return false; | |
431 | } | |
e42f174e VB |
432 | #endif |
433 | ||
434 | /* | |
435 | * Returns true if any of the specified slub_debug flags is enabled for the | |
436 | * cache. Use only for flags parsed by setup_slub_debug() as it also enables | |
437 | * the static key. | |
438 | */ | |
439 | static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) | |
440 | { | |
0d4a062a ME |
441 | if (IS_ENABLED(CONFIG_SLUB_DEBUG)) |
442 | VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); | |
443 | if (__slub_debug_enabled()) | |
e42f174e | 444 | return s->flags & flags; |
e42f174e VB |
445 | return false; |
446 | } | |
447 | ||
84c07d11 | 448 | #ifdef CONFIG_MEMCG_KMEM |
4b5f8d9a VB |
449 | /* |
450 | * slab_objcgs - get the object cgroups vector associated with a slab | |
451 | * @slab: a pointer to the slab struct | |
452 | * | |
453 | * Returns a pointer to the object cgroups vector associated with the slab, | |
454 | * or NULL if no such vector has been associated yet. | |
455 | */ | |
456 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) | |
457 | { | |
458 | unsigned long memcg_data = READ_ONCE(slab->memcg_data); | |
459 | ||
460 | VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), | |
461 | slab_page(slab)); | |
462 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); | |
463 | ||
464 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
465 | } | |
466 | ||
467 | int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, | |
468 | gfp_t gfp, bool new_slab); | |
fdbcb2a6 WL |
469 | void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, |
470 | enum node_stat_item idx, int nr); | |
286e04b8 | 471 | |
4b5f8d9a | 472 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
286e04b8 | 473 | { |
4b5f8d9a VB |
474 | kfree(slab_objcgs(slab)); |
475 | slab->memcg_data = 0; | |
286e04b8 RG |
476 | } |
477 | ||
f2fe7b09 RG |
478 | static inline size_t obj_full_size(struct kmem_cache *s) |
479 | { | |
480 | /* | |
481 | * For each accounted object there is an extra space which is used | |
482 | * to store obj_cgroup membership. Charge it too. | |
483 | */ | |
484 | return s->size + sizeof(struct obj_cgroup *); | |
485 | } | |
486 | ||
becaba65 RG |
487 | /* |
488 | * Returns false if the allocation should fail. | |
489 | */ | |
490 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, | |
88f2ef73 | 491 | struct list_lru *lru, |
becaba65 RG |
492 | struct obj_cgroup **objcgp, |
493 | size_t objects, gfp_t flags) | |
f2fe7b09 | 494 | { |
9855609b RG |
495 | struct obj_cgroup *objcg; |
496 | ||
becaba65 RG |
497 | if (!memcg_kmem_enabled()) |
498 | return true; | |
499 | ||
500 | if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) | |
501 | return true; | |
502 | ||
9855609b RG |
503 | objcg = get_obj_cgroup_from_current(); |
504 | if (!objcg) | |
becaba65 | 505 | return true; |
9855609b | 506 | |
88f2ef73 MS |
507 | if (lru) { |
508 | int ret; | |
509 | struct mem_cgroup *memcg; | |
510 | ||
511 | memcg = get_mem_cgroup_from_objcg(objcg); | |
512 | ret = memcg_list_lru_alloc(memcg, lru, flags); | |
513 | css_put(&memcg->css); | |
514 | ||
515 | if (ret) | |
516 | goto out; | |
f2fe7b09 RG |
517 | } |
518 | ||
88f2ef73 MS |
519 | if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) |
520 | goto out; | |
521 | ||
becaba65 RG |
522 | *objcgp = objcg; |
523 | return true; | |
88f2ef73 MS |
524 | out: |
525 | obj_cgroup_put(objcg); | |
526 | return false; | |
f2fe7b09 RG |
527 | } |
528 | ||
964d4bd3 RG |
529 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
530 | struct obj_cgroup *objcg, | |
10befea9 RG |
531 | gfp_t flags, size_t size, |
532 | void **p) | |
964d4bd3 | 533 | { |
4b5f8d9a | 534 | struct slab *slab; |
964d4bd3 RG |
535 | unsigned long off; |
536 | size_t i; | |
537 | ||
becaba65 | 538 | if (!memcg_kmem_enabled() || !objcg) |
10befea9 RG |
539 | return; |
540 | ||
964d4bd3 RG |
541 | for (i = 0; i < size; i++) { |
542 | if (likely(p[i])) { | |
4b5f8d9a | 543 | slab = virt_to_slab(p[i]); |
10befea9 | 544 | |
4b5f8d9a VB |
545 | if (!slab_objcgs(slab) && |
546 | memcg_alloc_slab_cgroups(slab, s, flags, | |
2e9bd483 | 547 | false)) { |
10befea9 RG |
548 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
549 | continue; | |
550 | } | |
551 | ||
4b5f8d9a | 552 | off = obj_to_index(s, slab, p[i]); |
964d4bd3 | 553 | obj_cgroup_get(objcg); |
4b5f8d9a VB |
554 | slab_objcgs(slab)[off] = objcg; |
555 | mod_objcg_state(objcg, slab_pgdat(slab), | |
f2fe7b09 RG |
556 | cache_vmstat_idx(s), obj_full_size(s)); |
557 | } else { | |
558 | obj_cgroup_uncharge(objcg, obj_full_size(s)); | |
964d4bd3 RG |
559 | } |
560 | } | |
561 | obj_cgroup_put(objcg); | |
964d4bd3 RG |
562 | } |
563 | ||
b77d5b1b | 564 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, |
d1b2cf6c | 565 | void **p, int objects) |
964d4bd3 | 566 | { |
270c6a71 | 567 | struct obj_cgroup **objcgs; |
d1b2cf6c | 568 | int i; |
964d4bd3 | 569 | |
10befea9 RG |
570 | if (!memcg_kmem_enabled()) |
571 | return; | |
572 | ||
b77d5b1b MS |
573 | objcgs = slab_objcgs(slab); |
574 | if (!objcgs) | |
575 | return; | |
f2fe7b09 | 576 | |
b77d5b1b MS |
577 | for (i = 0; i < objects; i++) { |
578 | struct obj_cgroup *objcg; | |
579 | unsigned int off; | |
10befea9 | 580 | |
4b5f8d9a | 581 | off = obj_to_index(s, slab, p[i]); |
270c6a71 | 582 | objcg = objcgs[off]; |
d1b2cf6c BR |
583 | if (!objcg) |
584 | continue; | |
f2fe7b09 | 585 | |
270c6a71 | 586 | objcgs[off] = NULL; |
d1b2cf6c | 587 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
4b5f8d9a | 588 | mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), |
d1b2cf6c BR |
589 | -obj_full_size(s)); |
590 | obj_cgroup_put(objcg); | |
591 | } | |
964d4bd3 RG |
592 | } |
593 | ||
84c07d11 | 594 | #else /* CONFIG_MEMCG_KMEM */ |
4b5f8d9a VB |
595 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) |
596 | { | |
597 | return NULL; | |
598 | } | |
599 | ||
9855609b | 600 | static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) |
4d96ba35 RG |
601 | { |
602 | return NULL; | |
603 | } | |
604 | ||
4b5f8d9a | 605 | static inline int memcg_alloc_slab_cgroups(struct slab *slab, |
2e9bd483 | 606 | struct kmem_cache *s, gfp_t gfp, |
4b5f8d9a | 607 | bool new_slab) |
286e04b8 RG |
608 | { |
609 | return 0; | |
610 | } | |
611 | ||
4b5f8d9a | 612 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
286e04b8 RG |
613 | { |
614 | } | |
615 | ||
becaba65 | 616 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
88f2ef73 | 617 | struct list_lru *lru, |
becaba65 RG |
618 | struct obj_cgroup **objcgp, |
619 | size_t objects, gfp_t flags) | |
f2fe7b09 | 620 | { |
becaba65 | 621 | return true; |
f2fe7b09 RG |
622 | } |
623 | ||
964d4bd3 RG |
624 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
625 | struct obj_cgroup *objcg, | |
10befea9 RG |
626 | gfp_t flags, size_t size, |
627 | void **p) | |
964d4bd3 RG |
628 | { |
629 | } | |
630 | ||
b77d5b1b | 631 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, |
d1b2cf6c | 632 | void **p, int objects) |
964d4bd3 RG |
633 | { |
634 | } | |
84c07d11 | 635 | #endif /* CONFIG_MEMCG_KMEM */ |
b9ce5ef4 | 636 | |
401fb12c | 637 | #ifndef CONFIG_SLOB |
a64b5378 KC |
638 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
639 | { | |
82c1775d | 640 | struct slab *slab; |
a64b5378 | 641 | |
82c1775d MWO |
642 | slab = virt_to_slab(obj); |
643 | if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", | |
a64b5378 KC |
644 | __func__)) |
645 | return NULL; | |
82c1775d | 646 | return slab->slab_cache; |
a64b5378 KC |
647 | } |
648 | ||
b918653b MWO |
649 | static __always_inline void account_slab(struct slab *slab, int order, |
650 | struct kmem_cache *s, gfp_t gfp) | |
6cea1d56 | 651 | { |
2e9bd483 | 652 | if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT)) |
4b5f8d9a | 653 | memcg_alloc_slab_cgroups(slab, s, gfp, true); |
2e9bd483 | 654 | |
b918653b | 655 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
f2fe7b09 | 656 | PAGE_SIZE << order); |
6cea1d56 RG |
657 | } |
658 | ||
b918653b MWO |
659 | static __always_inline void unaccount_slab(struct slab *slab, int order, |
660 | struct kmem_cache *s) | |
6cea1d56 | 661 | { |
10befea9 | 662 | if (memcg_kmem_enabled()) |
4b5f8d9a | 663 | memcg_free_slab_cgroups(slab); |
9855609b | 664 | |
b918653b | 665 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
f2fe7b09 | 666 | -(PAGE_SIZE << order)); |
6cea1d56 RG |
667 | } |
668 | ||
e42f174e VB |
669 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
670 | { | |
671 | struct kmem_cache *cachep; | |
672 | ||
673 | if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && | |
e42f174e VB |
674 | !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) |
675 | return s; | |
676 | ||
677 | cachep = virt_to_cache(x); | |
10befea9 | 678 | if (WARN(cachep && cachep != s, |
e42f174e VB |
679 | "%s: Wrong slab cache. %s but object is from %s\n", |
680 | __func__, s->name, cachep->name)) | |
681 | print_tracking(cachep, x); | |
682 | return cachep; | |
683 | } | |
d6a71648 HY |
684 | |
685 | void free_large_kmalloc(struct folio *folio, void *object); | |
686 | ||
401fb12c | 687 | #endif /* CONFIG_SLOB */ |
e42f174e | 688 | |
8dfa9d55 HY |
689 | size_t __ksize(const void *objp); |
690 | ||
11c7aec2 JDB |
691 | static inline size_t slab_ksize(const struct kmem_cache *s) |
692 | { | |
693 | #ifndef CONFIG_SLUB | |
694 | return s->object_size; | |
695 | ||
696 | #else /* CONFIG_SLUB */ | |
697 | # ifdef CONFIG_SLUB_DEBUG | |
698 | /* | |
699 | * Debugging requires use of the padding between object | |
700 | * and whatever may come after it. | |
701 | */ | |
702 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | |
703 | return s->object_size; | |
704 | # endif | |
80a9201a AP |
705 | if (s->flags & SLAB_KASAN) |
706 | return s->object_size; | |
11c7aec2 JDB |
707 | /* |
708 | * If we have the need to store the freelist pointer | |
709 | * back there or track user information then we can | |
710 | * only use the space before that information. | |
711 | */ | |
5f0d5a3a | 712 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
11c7aec2 JDB |
713 | return s->inuse; |
714 | /* | |
715 | * Else we can use all the padding etc for the allocation | |
716 | */ | |
717 | return s->size; | |
718 | #endif | |
719 | } | |
720 | ||
721 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |
88f2ef73 | 722 | struct list_lru *lru, |
964d4bd3 RG |
723 | struct obj_cgroup **objcgp, |
724 | size_t size, gfp_t flags) | |
11c7aec2 JDB |
725 | { |
726 | flags &= gfp_allowed_mask; | |
d92a8cfc | 727 | |
95d6c701 | 728 | might_alloc(flags); |
11c7aec2 | 729 | |
fab9963a | 730 | if (should_failslab(s, flags)) |
11c7aec2 JDB |
731 | return NULL; |
732 | ||
88f2ef73 | 733 | if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags)) |
becaba65 | 734 | return NULL; |
45264778 VD |
735 | |
736 | return s; | |
11c7aec2 JDB |
737 | } |
738 | ||
964d4bd3 | 739 | static inline void slab_post_alloc_hook(struct kmem_cache *s, |
da844b78 | 740 | struct obj_cgroup *objcg, gfp_t flags, |
9ce67395 FT |
741 | size_t size, void **p, bool init, |
742 | unsigned int orig_size) | |
11c7aec2 | 743 | { |
9ce67395 | 744 | unsigned int zero_size = s->object_size; |
11c7aec2 JDB |
745 | size_t i; |
746 | ||
747 | flags &= gfp_allowed_mask; | |
da844b78 | 748 | |
9ce67395 FT |
749 | /* |
750 | * For kmalloc object, the allocated memory size(object_size) is likely | |
751 | * larger than the requested size(orig_size). If redzone check is | |
752 | * enabled for the extra space, don't zero it, as it will be redzoned | |
753 | * soon. The redzone operation for this extra space could be seen as a | |
754 | * replacement of current poisoning under certain debug option, and | |
755 | * won't break other sanity checks. | |
756 | */ | |
757 | if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && | |
758 | (s->flags & SLAB_KMALLOC)) | |
759 | zero_size = orig_size; | |
760 | ||
da844b78 AK |
761 | /* |
762 | * As memory initialization might be integrated into KASAN, | |
763 | * kasan_slab_alloc and initialization memset must be | |
764 | * kept together to avoid discrepancies in behavior. | |
765 | * | |
766 | * As p[i] might get tagged, memset and kmemleak hook come after KASAN. | |
767 | */ | |
11c7aec2 | 768 | for (i = 0; i < size; i++) { |
da844b78 AK |
769 | p[i] = kasan_slab_alloc(s, p[i], flags, init); |
770 | if (p[i] && init && !kasan_has_integrated_init()) | |
9ce67395 | 771 | memset(p[i], 0, zero_size); |
53128245 | 772 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
11c7aec2 | 773 | s->flags, flags); |
68ef169a | 774 | kmsan_slab_alloc(s, p[i], flags); |
11c7aec2 | 775 | } |
45264778 | 776 | |
becaba65 | 777 | memcg_slab_post_alloc_hook(s, objcg, flags, size, p); |
11c7aec2 JDB |
778 | } |
779 | ||
44c5356f | 780 | #ifndef CONFIG_SLOB |
ca34956b CL |
781 | /* |
782 | * The slab lists for all objects. | |
783 | */ | |
784 | struct kmem_cache_node { | |
ca34956b | 785 | #ifdef CONFIG_SLAB |
b539ce9f | 786 | raw_spinlock_t list_lock; |
ca34956b CL |
787 | struct list_head slabs_partial; /* partial list first, better asm code */ |
788 | struct list_head slabs_full; | |
789 | struct list_head slabs_free; | |
bf00bd34 DR |
790 | unsigned long total_slabs; /* length of all slab lists */ |
791 | unsigned long free_slabs; /* length of free slab list only */ | |
ca34956b CL |
792 | unsigned long free_objects; |
793 | unsigned int free_limit; | |
794 | unsigned int colour_next; /* Per-node cache coloring */ | |
795 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 796 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
797 | unsigned long next_reap; /* updated without locking */ |
798 | int free_touched; /* updated without locking */ | |
799 | #endif | |
800 | ||
801 | #ifdef CONFIG_SLUB | |
b539ce9f | 802 | spinlock_t list_lock; |
ca34956b CL |
803 | unsigned long nr_partial; |
804 | struct list_head partial; | |
805 | #ifdef CONFIG_SLUB_DEBUG | |
806 | atomic_long_t nr_slabs; | |
807 | atomic_long_t total_objects; | |
808 | struct list_head full; | |
809 | #endif | |
810 | #endif | |
811 | ||
812 | }; | |
e25839f6 | 813 | |
44c5356f CL |
814 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
815 | { | |
816 | return s->node[node]; | |
817 | } | |
818 | ||
819 | /* | |
820 | * Iterator over all nodes. The body will be executed for each node that has | |
821 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
822 | */ | |
823 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
9163582c MP |
824 | for (__node = 0; __node < nr_node_ids; __node++) \ |
825 | if ((__n = get_node(__s, __node))) | |
44c5356f CL |
826 | |
827 | #endif | |
828 | ||
852d8be0 YS |
829 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
830 | void dump_unreclaimable_slab(void); | |
831 | #else | |
832 | static inline void dump_unreclaimable_slab(void) | |
833 | { | |
834 | } | |
835 | #endif | |
836 | ||
55834c59 AP |
837 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
838 | ||
7c00fce9 TG |
839 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
840 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, | |
841 | gfp_t gfp); | |
842 | void cache_random_seq_destroy(struct kmem_cache *cachep); | |
843 | #else | |
844 | static inline int cache_random_seq_create(struct kmem_cache *cachep, | |
845 | unsigned int count, gfp_t gfp) | |
846 | { | |
847 | return 0; | |
848 | } | |
849 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } | |
850 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
851 | ||
6471384a AP |
852 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
853 | { | |
51cba1eb KC |
854 | if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, |
855 | &init_on_alloc)) { | |
6471384a AP |
856 | if (c->ctor) |
857 | return false; | |
858 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) | |
859 | return flags & __GFP_ZERO; | |
860 | return true; | |
861 | } | |
862 | return flags & __GFP_ZERO; | |
863 | } | |
864 | ||
865 | static inline bool slab_want_init_on_free(struct kmem_cache *c) | |
866 | { | |
51cba1eb KC |
867 | if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, |
868 | &init_on_free)) | |
6471384a AP |
869 | return !(c->ctor || |
870 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); | |
871 | return false; | |
872 | } | |
873 | ||
64dd6849 FM |
874 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) |
875 | void debugfs_slab_release(struct kmem_cache *); | |
876 | #else | |
877 | static inline void debugfs_slab_release(struct kmem_cache *s) { } | |
878 | #endif | |
879 | ||
5bb1bb35 | 880 | #ifdef CONFIG_PRINTK |
8e7f37f2 PM |
881 | #define KS_ADDRS_COUNT 16 |
882 | struct kmem_obj_info { | |
883 | void *kp_ptr; | |
7213230a | 884 | struct slab *kp_slab; |
8e7f37f2 PM |
885 | void *kp_objp; |
886 | unsigned long kp_data_offset; | |
887 | struct kmem_cache *kp_slab_cache; | |
888 | void *kp_ret; | |
889 | void *kp_stack[KS_ADDRS_COUNT]; | |
e548eaa1 | 890 | void *kp_free_stack[KS_ADDRS_COUNT]; |
8e7f37f2 | 891 | }; |
2dfe63e6 | 892 | void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); |
5bb1bb35 | 893 | #endif |
8e7f37f2 | 894 | |
0b3eb091 MWO |
895 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR |
896 | void __check_heap_object(const void *ptr, unsigned long n, | |
897 | const struct slab *slab, bool to_user); | |
898 | #else | |
899 | static inline | |
900 | void __check_heap_object(const void *ptr, unsigned long n, | |
901 | const struct slab *slab, bool to_user) | |
902 | { | |
903 | } | |
904 | #endif | |
905 | ||
946fa0db FT |
906 | #ifdef CONFIG_SLUB_DEBUG |
907 | void skip_orig_size_check(struct kmem_cache *s, const void *object); | |
908 | #endif | |
909 | ||
5240ab40 | 910 | #endif /* MM_SLAB_H */ |