]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
97d06609 CL |
2 | #ifndef MM_SLAB_H |
3 | #define MM_SLAB_H | |
4 | /* | |
5 | * Internal slab definitions | |
6 | */ | |
7 | ||
07f361b2 JK |
8 | #ifdef CONFIG_SLOB |
9 | /* | |
10 | * Common fields provided in kmem_cache by all slab allocators | |
11 | * This struct is either used directly by the allocator (SLOB) | |
12 | * or the allocator must include definitions for all fields | |
13 | * provided in kmem_cache_common in their definition of kmem_cache. | |
14 | * | |
15 | * Once we can do anonymous structs (C11 standard) we could put a | |
16 | * anonymous struct definition in these allocators so that the | |
17 | * separate allocations in the kmem_cache structure of SLAB and | |
18 | * SLUB is no longer needed. | |
19 | */ | |
20 | struct kmem_cache { | |
21 | unsigned int object_size;/* The original size of the object */ | |
22 | unsigned int size; /* The aligned/padded/added on size */ | |
23 | unsigned int align; /* Alignment as calculated */ | |
d50112ed | 24 | slab_flags_t flags; /* Active flags on the slab */ |
7bbdb81e AD |
25 | unsigned int useroffset;/* Usercopy region offset */ |
26 | unsigned int usersize; /* Usercopy region size */ | |
07f361b2 JK |
27 | const char *name; /* Slab name for sysfs */ |
28 | int refcount; /* Use counter */ | |
29 | void (*ctor)(void *); /* Called on object slot creation */ | |
30 | struct list_head list; /* List of all slab caches on the system */ | |
31 | }; | |
32 | ||
9adeaa22 WL |
33 | #else /* !CONFIG_SLOB */ |
34 | ||
9adeaa22 WL |
35 | /* |
36 | * This is the main placeholder for memcg-related information in kmem caches. | |
9855609b RG |
37 | * Both the root cache and the child cache will have it. Some fields are used |
38 | * in both cases, other are specific to root caches. | |
9adeaa22 WL |
39 | * |
40 | * @root_cache: Common to root and child caches. NULL for root, pointer to | |
41 | * the root cache for children. | |
42 | * | |
43 | * The following fields are specific to root caches. | |
44 | * | |
9855609b RG |
45 | * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory |
46 | * cgroups. | |
47 | * @root_caches_node: list node for slab_root_caches list. | |
9adeaa22 WL |
48 | */ |
49 | struct memcg_cache_params { | |
50 | struct kmem_cache *root_cache; | |
9855609b RG |
51 | |
52 | struct kmem_cache *memcg_cache; | |
53 | struct list_head __root_caches_node; | |
9adeaa22 | 54 | }; |
07f361b2 JK |
55 | #endif /* CONFIG_SLOB */ |
56 | ||
57 | #ifdef CONFIG_SLAB | |
58 | #include <linux/slab_def.h> | |
59 | #endif | |
60 | ||
61 | #ifdef CONFIG_SLUB | |
62 | #include <linux/slub_def.h> | |
63 | #endif | |
64 | ||
65 | #include <linux/memcontrol.h> | |
11c7aec2 | 66 | #include <linux/fault-inject.h> |
11c7aec2 JDB |
67 | #include <linux/kasan.h> |
68 | #include <linux/kmemleak.h> | |
7c00fce9 | 69 | #include <linux/random.h> |
d92a8cfc | 70 | #include <linux/sched/mm.h> |
286e04b8 | 71 | #include <linux/kmemleak.h> |
07f361b2 | 72 | |
97d06609 CL |
73 | /* |
74 | * State of the slab allocator. | |
75 | * | |
76 | * This is used to describe the states of the allocator during bootup. | |
77 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
78 | * have the problem that the structures used for managing slab caches are | |
79 | * allocated from slab caches themselves. | |
80 | */ | |
81 | enum slab_state { | |
82 | DOWN, /* No slab functionality yet */ | |
83 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
ce8eb6c4 | 84 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
85 | UP, /* Slab caches usable but not all extras yet */ |
86 | FULL /* Everything is working */ | |
87 | }; | |
88 | ||
89 | extern enum slab_state slab_state; | |
90 | ||
18004c5d CL |
91 | /* The slab cache mutex protects the management structures during changes */ |
92 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
93 | |
94 | /* The list of all slab caches on the system */ | |
18004c5d CL |
95 | extern struct list_head slab_caches; |
96 | ||
9b030cb8 CL |
97 | /* The slab cache that manages slab cache information */ |
98 | extern struct kmem_cache *kmem_cache; | |
99 | ||
af3b5f87 VB |
100 | /* A table of kmalloc cache names and sizes */ |
101 | extern const struct kmalloc_info_struct { | |
cb5d9fb3 | 102 | const char *name[NR_KMALLOC_TYPES]; |
55de8b9c | 103 | unsigned int size; |
af3b5f87 VB |
104 | } kmalloc_info[]; |
105 | ||
f97d5f63 CL |
106 | #ifndef CONFIG_SLOB |
107 | /* Kmalloc array related functions */ | |
34cc6990 | 108 | void setup_kmalloc_cache_index_table(void); |
d50112ed | 109 | void create_kmalloc_caches(slab_flags_t); |
2c59dd65 CL |
110 | |
111 | /* Find the kmalloc slab corresponding for a certain size */ | |
112 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
f97d5f63 CL |
113 | #endif |
114 | ||
44405099 | 115 | gfp_t kmalloc_fix_flags(gfp_t flags); |
f97d5f63 | 116 | |
9b030cb8 | 117 | /* Functions provided by the slab allocators */ |
d50112ed | 118 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
97d06609 | 119 | |
55de8b9c AD |
120 | struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, |
121 | slab_flags_t flags, unsigned int useroffset, | |
122 | unsigned int usersize); | |
45530c44 | 123 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
361d575e AD |
124 | unsigned int size, slab_flags_t flags, |
125 | unsigned int useroffset, unsigned int usersize); | |
45530c44 | 126 | |
423c929c | 127 | int slab_unmergeable(struct kmem_cache *s); |
f4957d5b | 128 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
d50112ed | 129 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
12220dea | 130 | #ifndef CONFIG_SLOB |
2633d7a0 | 131 | struct kmem_cache * |
f4957d5b | 132 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 133 | slab_flags_t flags, void (*ctor)(void *)); |
423c929c | 134 | |
0293d1fd | 135 | slab_flags_t kmem_cache_flags(unsigned int object_size, |
d50112ed | 136 | slab_flags_t flags, const char *name, |
423c929c | 137 | void (*ctor)(void *)); |
cbb79694 | 138 | #else |
2633d7a0 | 139 | static inline struct kmem_cache * |
f4957d5b | 140 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 141 | slab_flags_t flags, void (*ctor)(void *)) |
cbb79694 | 142 | { return NULL; } |
423c929c | 143 | |
0293d1fd | 144 | static inline slab_flags_t kmem_cache_flags(unsigned int object_size, |
d50112ed | 145 | slab_flags_t flags, const char *name, |
423c929c JK |
146 | void (*ctor)(void *)) |
147 | { | |
148 | return flags; | |
149 | } | |
cbb79694 CL |
150 | #endif |
151 | ||
152 | ||
d8843922 | 153 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
6d6ea1e9 NB |
154 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
155 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ | |
5f0d5a3a | 156 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
d8843922 GC |
157 | |
158 | #if defined(CONFIG_DEBUG_SLAB) | |
159 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
160 | #elif defined(CONFIG_SLUB_DEBUG) | |
161 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
becfda68 | 162 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922 GC |
163 | #else |
164 | #define SLAB_DEBUG_FLAGS (0) | |
165 | #endif | |
166 | ||
167 | #if defined(CONFIG_SLAB) | |
168 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
230e9fc2 | 169 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
75f296d9 | 170 | SLAB_ACCOUNT) |
d8843922 GC |
171 | #elif defined(CONFIG_SLUB) |
172 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
75f296d9 | 173 | SLAB_TEMPORARY | SLAB_ACCOUNT) |
d8843922 GC |
174 | #else |
175 | #define SLAB_CACHE_FLAGS (0) | |
176 | #endif | |
177 | ||
e70954fd | 178 | /* Common flags available with current configuration */ |
d8843922 GC |
179 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
180 | ||
e70954fd TG |
181 | /* Common flags permitted for kmem_cache_create */ |
182 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ | |
183 | SLAB_RED_ZONE | \ | |
184 | SLAB_POISON | \ | |
185 | SLAB_STORE_USER | \ | |
186 | SLAB_TRACE | \ | |
187 | SLAB_CONSISTENCY_CHECKS | \ | |
188 | SLAB_MEM_SPREAD | \ | |
189 | SLAB_NOLEAKTRACE | \ | |
190 | SLAB_RECLAIM_ACCOUNT | \ | |
191 | SLAB_TEMPORARY | \ | |
e70954fd TG |
192 | SLAB_ACCOUNT) |
193 | ||
f9e13c0a | 194 | bool __kmem_cache_empty(struct kmem_cache *); |
945cf2b6 | 195 | int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950 | 196 | void __kmem_cache_release(struct kmem_cache *); |
c9fc5864 | 197 | int __kmem_cache_shrink(struct kmem_cache *); |
41a21285 | 198 | void slab_kmem_cache_release(struct kmem_cache *); |
04f768a3 | 199 | void kmem_cache_shrink_all(struct kmem_cache *s); |
945cf2b6 | 200 | |
b7454ad3 GC |
201 | struct seq_file; |
202 | struct file; | |
b7454ad3 | 203 | |
0d7561c6 GC |
204 | struct slabinfo { |
205 | unsigned long active_objs; | |
206 | unsigned long num_objs; | |
207 | unsigned long active_slabs; | |
208 | unsigned long num_slabs; | |
209 | unsigned long shared_avail; | |
210 | unsigned int limit; | |
211 | unsigned int batchcount; | |
212 | unsigned int shared; | |
213 | unsigned int objects_per_slab; | |
214 | unsigned int cache_order; | |
215 | }; | |
216 | ||
217 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
218 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
219 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
220 | size_t count, loff_t *ppos); | |
ba6c496e | 221 | |
484748f0 CL |
222 | /* |
223 | * Generic implementation of bulk operations | |
224 | * These are useful for situations in which the allocator cannot | |
9f706d68 | 225 | * perform optimizations. In that case segments of the object listed |
484748f0 CL |
226 | * may be allocated or freed using these operations. |
227 | */ | |
228 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | |
865762a8 | 229 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
484748f0 | 230 | |
6cea1d56 RG |
231 | static inline int cache_vmstat_idx(struct kmem_cache *s) |
232 | { | |
233 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? | |
d42f3245 | 234 | NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; |
6cea1d56 RG |
235 | } |
236 | ||
e42f174e VB |
237 | #ifdef CONFIG_SLUB_DEBUG |
238 | #ifdef CONFIG_SLUB_DEBUG_ON | |
239 | DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); | |
240 | #else | |
241 | DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); | |
242 | #endif | |
243 | extern void print_tracking(struct kmem_cache *s, void *object); | |
244 | #else | |
245 | static inline void print_tracking(struct kmem_cache *s, void *object) | |
246 | { | |
247 | } | |
248 | #endif | |
249 | ||
250 | /* | |
251 | * Returns true if any of the specified slub_debug flags is enabled for the | |
252 | * cache. Use only for flags parsed by setup_slub_debug() as it also enables | |
253 | * the static key. | |
254 | */ | |
255 | static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) | |
256 | { | |
257 | #ifdef CONFIG_SLUB_DEBUG | |
258 | VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); | |
259 | if (static_branch_unlikely(&slub_debug_enabled)) | |
260 | return s->flags & flags; | |
261 | #endif | |
262 | return false; | |
263 | } | |
264 | ||
84c07d11 | 265 | #ifdef CONFIG_MEMCG_KMEM |
510ded33 TH |
266 | |
267 | /* List of all root caches. */ | |
268 | extern struct list_head slab_root_caches; | |
269 | #define root_caches_node memcg_params.__root_caches_node | |
270 | ||
ba6c496e GC |
271 | static inline bool is_root_cache(struct kmem_cache *s) |
272 | { | |
9eeadc8b | 273 | return !s->memcg_params.root_cache; |
ba6c496e | 274 | } |
2633d7a0 | 275 | |
b9ce5ef4 | 276 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
f7ce3190 | 277 | struct kmem_cache *p) |
b9ce5ef4 | 278 | { |
f7ce3190 | 279 | return p == s || p == s->memcg_params.root_cache; |
b9ce5ef4 | 280 | } |
749c5415 GC |
281 | |
282 | /* | |
283 | * We use suffixes to the name in memcg because we can't have caches | |
284 | * created in the system with the same name. But when we print them | |
285 | * locally, better refer to them with the base name | |
286 | */ | |
287 | static inline const char *cache_name(struct kmem_cache *s) | |
288 | { | |
289 | if (!is_root_cache(s)) | |
f7ce3190 | 290 | s = s->memcg_params.root_cache; |
749c5415 GC |
291 | return s->name; |
292 | } | |
293 | ||
943a451a GC |
294 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
295 | { | |
296 | if (is_root_cache(s)) | |
297 | return s; | |
f7ce3190 | 298 | return s->memcg_params.root_cache; |
943a451a | 299 | } |
5dfb4175 | 300 | |
9855609b RG |
301 | static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) |
302 | { | |
303 | if (is_root_cache(s)) | |
304 | return s->memcg_params.memcg_cache; | |
305 | return NULL; | |
306 | } | |
307 | ||
286e04b8 RG |
308 | static inline struct obj_cgroup **page_obj_cgroups(struct page *page) |
309 | { | |
310 | /* | |
311 | * page->mem_cgroup and page->obj_cgroups are sharing the same | |
312 | * space. To distinguish between them in case we don't know for sure | |
313 | * that the page is a slab page (e.g. page_cgroup_ino()), let's | |
314 | * always set the lowest bit of obj_cgroups. | |
315 | */ | |
316 | return (struct obj_cgroup **) | |
317 | ((unsigned long)page->obj_cgroups & ~0x1UL); | |
318 | } | |
319 | ||
9855609b | 320 | static inline bool page_has_obj_cgroups(struct page *page) |
4d96ba35 | 321 | { |
9855609b | 322 | return ((unsigned long)page->obj_cgroups & 0x1UL); |
4d96ba35 RG |
323 | } |
324 | ||
286e04b8 RG |
325 | static inline int memcg_alloc_page_obj_cgroups(struct page *page, |
326 | struct kmem_cache *s, gfp_t gfp) | |
327 | { | |
328 | unsigned int objects = objs_per_slab_page(s, page); | |
329 | void *vec; | |
330 | ||
331 | vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, | |
332 | page_to_nid(page)); | |
333 | if (!vec) | |
334 | return -ENOMEM; | |
335 | ||
336 | kmemleak_not_leak(vec); | |
337 | page->obj_cgroups = (struct obj_cgroup **) ((unsigned long)vec | 0x1UL); | |
338 | return 0; | |
339 | } | |
340 | ||
341 | static inline void memcg_free_page_obj_cgroups(struct page *page) | |
342 | { | |
343 | kfree(page_obj_cgroups(page)); | |
344 | page->obj_cgroups = NULL; | |
345 | } | |
346 | ||
f2fe7b09 RG |
347 | static inline size_t obj_full_size(struct kmem_cache *s) |
348 | { | |
349 | /* | |
350 | * For each accounted object there is an extra space which is used | |
351 | * to store obj_cgroup membership. Charge it too. | |
352 | */ | |
353 | return s->size + sizeof(struct obj_cgroup *); | |
354 | } | |
355 | ||
356 | static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, | |
357 | struct obj_cgroup **objcgp, | |
358 | size_t objects, gfp_t flags) | |
359 | { | |
360 | struct kmem_cache *cachep; | |
9855609b RG |
361 | struct obj_cgroup *objcg; |
362 | ||
363 | if (memcg_kmem_bypass()) | |
364 | return s; | |
f2fe7b09 | 365 | |
9855609b | 366 | cachep = memcg_kmem_get_cache(s); |
f2fe7b09 RG |
367 | if (is_root_cache(cachep)) |
368 | return s; | |
369 | ||
9855609b RG |
370 | objcg = get_obj_cgroup_from_current(); |
371 | if (!objcg) | |
372 | return s; | |
373 | ||
374 | if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { | |
375 | obj_cgroup_put(objcg); | |
f2fe7b09 RG |
376 | cachep = NULL; |
377 | } | |
378 | ||
9855609b | 379 | *objcgp = objcg; |
f2fe7b09 RG |
380 | return cachep; |
381 | } | |
382 | ||
383 | static inline void mod_objcg_state(struct obj_cgroup *objcg, | |
384 | struct pglist_data *pgdat, | |
385 | int idx, int nr) | |
386 | { | |
387 | struct mem_cgroup *memcg; | |
388 | struct lruvec *lruvec; | |
389 | ||
390 | rcu_read_lock(); | |
391 | memcg = obj_cgroup_memcg(objcg); | |
392 | lruvec = mem_cgroup_lruvec(memcg, pgdat); | |
393 | mod_memcg_lruvec_state(lruvec, idx, nr); | |
394 | rcu_read_unlock(); | |
395 | } | |
396 | ||
964d4bd3 RG |
397 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
398 | struct obj_cgroup *objcg, | |
399 | size_t size, void **p) | |
400 | { | |
401 | struct page *page; | |
402 | unsigned long off; | |
403 | size_t i; | |
404 | ||
405 | for (i = 0; i < size; i++) { | |
406 | if (likely(p[i])) { | |
407 | page = virt_to_head_page(p[i]); | |
408 | off = obj_to_index(s, page, p[i]); | |
409 | obj_cgroup_get(objcg); | |
410 | page_obj_cgroups(page)[off] = objcg; | |
f2fe7b09 RG |
411 | mod_objcg_state(objcg, page_pgdat(page), |
412 | cache_vmstat_idx(s), obj_full_size(s)); | |
413 | } else { | |
414 | obj_cgroup_uncharge(objcg, obj_full_size(s)); | |
964d4bd3 RG |
415 | } |
416 | } | |
417 | obj_cgroup_put(objcg); | |
964d4bd3 RG |
418 | } |
419 | ||
420 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, | |
421 | void *p) | |
422 | { | |
423 | struct obj_cgroup *objcg; | |
424 | unsigned int off; | |
425 | ||
426 | if (!memcg_kmem_enabled() || is_root_cache(s)) | |
427 | return; | |
428 | ||
429 | off = obj_to_index(s, page, p); | |
430 | objcg = page_obj_cgroups(page)[off]; | |
431 | page_obj_cgroups(page)[off] = NULL; | |
f2fe7b09 RG |
432 | |
433 | obj_cgroup_uncharge(objcg, obj_full_size(s)); | |
434 | mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s), | |
435 | -obj_full_size(s)); | |
436 | ||
964d4bd3 RG |
437 | obj_cgroup_put(objcg); |
438 | } | |
439 | ||
f7ce3190 | 440 | extern void slab_init_memcg_params(struct kmem_cache *); |
9855609b | 441 | extern void memcg_link_cache(struct kmem_cache *s); |
f7ce3190 | 442 | |
84c07d11 | 443 | #else /* CONFIG_MEMCG_KMEM */ |
f7ce3190 | 444 | |
510ded33 TH |
445 | /* If !memcg, all caches are root. */ |
446 | #define slab_root_caches slab_caches | |
447 | #define root_caches_node list | |
448 | ||
ba6c496e GC |
449 | static inline bool is_root_cache(struct kmem_cache *s) |
450 | { | |
451 | return true; | |
452 | } | |
453 | ||
b9ce5ef4 GC |
454 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
455 | struct kmem_cache *p) | |
456 | { | |
598a0717 | 457 | return s == p; |
b9ce5ef4 | 458 | } |
749c5415 GC |
459 | |
460 | static inline const char *cache_name(struct kmem_cache *s) | |
461 | { | |
462 | return s->name; | |
463 | } | |
464 | ||
943a451a GC |
465 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
466 | { | |
467 | return s; | |
468 | } | |
5dfb4175 | 469 | |
9855609b RG |
470 | static inline struct kmem_cache *memcg_cache(struct kmem_cache *s) |
471 | { | |
472 | return NULL; | |
473 | } | |
474 | ||
475 | static inline bool page_has_obj_cgroups(struct page *page) | |
476 | { | |
477 | return false; | |
478 | } | |
479 | ||
480 | static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) | |
4d96ba35 RG |
481 | { |
482 | return NULL; | |
483 | } | |
484 | ||
286e04b8 RG |
485 | static inline int memcg_alloc_page_obj_cgroups(struct page *page, |
486 | struct kmem_cache *s, gfp_t gfp) | |
487 | { | |
488 | return 0; | |
489 | } | |
490 | ||
491 | static inline void memcg_free_page_obj_cgroups(struct page *page) | |
492 | { | |
493 | } | |
494 | ||
f2fe7b09 RG |
495 | static inline struct kmem_cache *memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
496 | struct obj_cgroup **objcgp, | |
497 | size_t objects, gfp_t flags) | |
498 | { | |
499 | return NULL; | |
500 | } | |
501 | ||
964d4bd3 RG |
502 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
503 | struct obj_cgroup *objcg, | |
504 | size_t size, void **p) | |
505 | { | |
506 | } | |
507 | ||
508 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page, | |
509 | void *p) | |
510 | { | |
511 | } | |
512 | ||
f7ce3190 VD |
513 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
514 | { | |
515 | } | |
510ded33 | 516 | |
9855609b | 517 | static inline void memcg_link_cache(struct kmem_cache *s) |
510ded33 TH |
518 | { |
519 | } | |
520 | ||
84c07d11 | 521 | #endif /* CONFIG_MEMCG_KMEM */ |
b9ce5ef4 | 522 | |
a64b5378 KC |
523 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
524 | { | |
525 | struct page *page; | |
526 | ||
527 | page = virt_to_head_page(obj); | |
528 | if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n", | |
529 | __func__)) | |
530 | return NULL; | |
531 | return page->slab_cache; | |
532 | } | |
533 | ||
6cea1d56 RG |
534 | static __always_inline int charge_slab_page(struct page *page, |
535 | gfp_t gfp, int order, | |
536 | struct kmem_cache *s) | |
537 | { | |
f2fe7b09 RG |
538 | if (memcg_kmem_enabled() && !is_root_cache(s)) { |
539 | int ret; | |
6cea1d56 | 540 | |
f2fe7b09 RG |
541 | ret = memcg_alloc_page_obj_cgroups(page, s, gfp); |
542 | if (ret) | |
543 | return ret; | |
f2fe7b09 | 544 | } |
9855609b | 545 | |
f2fe7b09 RG |
546 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), |
547 | PAGE_SIZE << order); | |
548 | return 0; | |
6cea1d56 RG |
549 | } |
550 | ||
551 | static __always_inline void uncharge_slab_page(struct page *page, int order, | |
552 | struct kmem_cache *s) | |
553 | { | |
9855609b | 554 | if (memcg_kmem_enabled() && !is_root_cache(s)) |
f2fe7b09 | 555 | memcg_free_page_obj_cgroups(page); |
9855609b | 556 | |
f2fe7b09 RG |
557 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), |
558 | -(PAGE_SIZE << order)); | |
6cea1d56 RG |
559 | } |
560 | ||
e42f174e VB |
561 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
562 | { | |
563 | struct kmem_cache *cachep; | |
564 | ||
565 | if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && | |
566 | !memcg_kmem_enabled() && | |
567 | !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) | |
568 | return s; | |
569 | ||
570 | cachep = virt_to_cache(x); | |
571 | if (WARN(cachep && !slab_equal_or_root(cachep, s), | |
572 | "%s: Wrong slab cache. %s but object is from %s\n", | |
573 | __func__, s->name, cachep->name)) | |
574 | print_tracking(cachep, x); | |
575 | return cachep; | |
576 | } | |
577 | ||
11c7aec2 JDB |
578 | static inline size_t slab_ksize(const struct kmem_cache *s) |
579 | { | |
580 | #ifndef CONFIG_SLUB | |
581 | return s->object_size; | |
582 | ||
583 | #else /* CONFIG_SLUB */ | |
584 | # ifdef CONFIG_SLUB_DEBUG | |
585 | /* | |
586 | * Debugging requires use of the padding between object | |
587 | * and whatever may come after it. | |
588 | */ | |
589 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | |
590 | return s->object_size; | |
591 | # endif | |
80a9201a AP |
592 | if (s->flags & SLAB_KASAN) |
593 | return s->object_size; | |
11c7aec2 JDB |
594 | /* |
595 | * If we have the need to store the freelist pointer | |
596 | * back there or track user information then we can | |
597 | * only use the space before that information. | |
598 | */ | |
5f0d5a3a | 599 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
11c7aec2 JDB |
600 | return s->inuse; |
601 | /* | |
602 | * Else we can use all the padding etc for the allocation | |
603 | */ | |
604 | return s->size; | |
605 | #endif | |
606 | } | |
607 | ||
608 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |
964d4bd3 RG |
609 | struct obj_cgroup **objcgp, |
610 | size_t size, gfp_t flags) | |
11c7aec2 JDB |
611 | { |
612 | flags &= gfp_allowed_mask; | |
d92a8cfc PZ |
613 | |
614 | fs_reclaim_acquire(flags); | |
615 | fs_reclaim_release(flags); | |
616 | ||
11c7aec2 JDB |
617 | might_sleep_if(gfpflags_allow_blocking(flags)); |
618 | ||
fab9963a | 619 | if (should_failslab(s, flags)) |
11c7aec2 JDB |
620 | return NULL; |
621 | ||
45264778 VD |
622 | if (memcg_kmem_enabled() && |
623 | ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) | |
f2fe7b09 | 624 | return memcg_slab_pre_alloc_hook(s, objcgp, size, flags); |
45264778 VD |
625 | |
626 | return s; | |
11c7aec2 JDB |
627 | } |
628 | ||
964d4bd3 RG |
629 | static inline void slab_post_alloc_hook(struct kmem_cache *s, |
630 | struct obj_cgroup *objcg, | |
631 | gfp_t flags, size_t size, void **p) | |
11c7aec2 JDB |
632 | { |
633 | size_t i; | |
634 | ||
635 | flags &= gfp_allowed_mask; | |
636 | for (i = 0; i < size; i++) { | |
53128245 | 637 | p[i] = kasan_slab_alloc(s, p[i], flags); |
a2f77575 | 638 | /* As p[i] might get tagged, call kmemleak hook after KASAN. */ |
53128245 | 639 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
11c7aec2 | 640 | s->flags, flags); |
11c7aec2 | 641 | } |
45264778 | 642 | |
964d4bd3 RG |
643 | if (memcg_kmem_enabled() && !is_root_cache(s)) |
644 | memcg_slab_post_alloc_hook(s, objcg, size, p); | |
11c7aec2 JDB |
645 | } |
646 | ||
44c5356f | 647 | #ifndef CONFIG_SLOB |
ca34956b CL |
648 | /* |
649 | * The slab lists for all objects. | |
650 | */ | |
651 | struct kmem_cache_node { | |
652 | spinlock_t list_lock; | |
653 | ||
654 | #ifdef CONFIG_SLAB | |
655 | struct list_head slabs_partial; /* partial list first, better asm code */ | |
656 | struct list_head slabs_full; | |
657 | struct list_head slabs_free; | |
bf00bd34 DR |
658 | unsigned long total_slabs; /* length of all slab lists */ |
659 | unsigned long free_slabs; /* length of free slab list only */ | |
ca34956b CL |
660 | unsigned long free_objects; |
661 | unsigned int free_limit; | |
662 | unsigned int colour_next; /* Per-node cache coloring */ | |
663 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 664 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
665 | unsigned long next_reap; /* updated without locking */ |
666 | int free_touched; /* updated without locking */ | |
667 | #endif | |
668 | ||
669 | #ifdef CONFIG_SLUB | |
670 | unsigned long nr_partial; | |
671 | struct list_head partial; | |
672 | #ifdef CONFIG_SLUB_DEBUG | |
673 | atomic_long_t nr_slabs; | |
674 | atomic_long_t total_objects; | |
675 | struct list_head full; | |
676 | #endif | |
677 | #endif | |
678 | ||
679 | }; | |
e25839f6 | 680 | |
44c5356f CL |
681 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
682 | { | |
683 | return s->node[node]; | |
684 | } | |
685 | ||
686 | /* | |
687 | * Iterator over all nodes. The body will be executed for each node that has | |
688 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
689 | */ | |
690 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
9163582c MP |
691 | for (__node = 0; __node < nr_node_ids; __node++) \ |
692 | if ((__n = get_node(__s, __node))) | |
44c5356f CL |
693 | |
694 | #endif | |
695 | ||
1df3b26f | 696 | void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439 WL |
697 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
698 | void slab_stop(struct seq_file *m, void *p); | |
b047501c | 699 | int memcg_slab_show(struct seq_file *m, void *p); |
5240ab40 | 700 | |
852d8be0 YS |
701 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
702 | void dump_unreclaimable_slab(void); | |
703 | #else | |
704 | static inline void dump_unreclaimable_slab(void) | |
705 | { | |
706 | } | |
707 | #endif | |
708 | ||
55834c59 AP |
709 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
710 | ||
7c00fce9 TG |
711 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
712 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, | |
713 | gfp_t gfp); | |
714 | void cache_random_seq_destroy(struct kmem_cache *cachep); | |
715 | #else | |
716 | static inline int cache_random_seq_create(struct kmem_cache *cachep, | |
717 | unsigned int count, gfp_t gfp) | |
718 | { | |
719 | return 0; | |
720 | } | |
721 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } | |
722 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
723 | ||
6471384a AP |
724 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
725 | { | |
726 | if (static_branch_unlikely(&init_on_alloc)) { | |
727 | if (c->ctor) | |
728 | return false; | |
729 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) | |
730 | return flags & __GFP_ZERO; | |
731 | return true; | |
732 | } | |
733 | return flags & __GFP_ZERO; | |
734 | } | |
735 | ||
736 | static inline bool slab_want_init_on_free(struct kmem_cache *c) | |
737 | { | |
738 | if (static_branch_unlikely(&init_on_free)) | |
739 | return !(c->ctor || | |
740 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); | |
741 | return false; | |
742 | } | |
743 | ||
5240ab40 | 744 | #endif /* MM_SLAB_H */ |