]>
Commit | Line | Data |
---|---|---|
97d06609 CL |
1 | #ifndef MM_SLAB_H |
2 | #define MM_SLAB_H | |
3 | /* | |
4 | * Internal slab definitions | |
5 | */ | |
6 | ||
07f361b2 JK |
7 | #ifdef CONFIG_SLOB |
8 | /* | |
9 | * Common fields provided in kmem_cache by all slab allocators | |
10 | * This struct is either used directly by the allocator (SLOB) | |
11 | * or the allocator must include definitions for all fields | |
12 | * provided in kmem_cache_common in their definition of kmem_cache. | |
13 | * | |
14 | * Once we can do anonymous structs (C11 standard) we could put a | |
15 | * anonymous struct definition in these allocators so that the | |
16 | * separate allocations in the kmem_cache structure of SLAB and | |
17 | * SLUB is no longer needed. | |
18 | */ | |
19 | struct kmem_cache { | |
20 | unsigned int object_size;/* The original size of the object */ | |
21 | unsigned int size; /* The aligned/padded/added on size */ | |
22 | unsigned int align; /* Alignment as calculated */ | |
23 | unsigned long flags; /* Active flags on the slab */ | |
24 | const char *name; /* Slab name for sysfs */ | |
25 | int refcount; /* Use counter */ | |
26 | void (*ctor)(void *); /* Called on object slot creation */ | |
27 | struct list_head list; /* List of all slab caches on the system */ | |
28 | }; | |
29 | ||
30 | #endif /* CONFIG_SLOB */ | |
31 | ||
32 | #ifdef CONFIG_SLAB | |
33 | #include <linux/slab_def.h> | |
34 | #endif | |
35 | ||
36 | #ifdef CONFIG_SLUB | |
37 | #include <linux/slub_def.h> | |
38 | #endif | |
39 | ||
40 | #include <linux/memcontrol.h> | |
41 | ||
97d06609 CL |
42 | /* |
43 | * State of the slab allocator. | |
44 | * | |
45 | * This is used to describe the states of the allocator during bootup. | |
46 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
47 | * have the problem that the structures used for managing slab caches are | |
48 | * allocated from slab caches themselves. | |
49 | */ | |
50 | enum slab_state { | |
51 | DOWN, /* No slab functionality yet */ | |
52 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
ce8eb6c4 | 53 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
54 | UP, /* Slab caches usable but not all extras yet */ |
55 | FULL /* Everything is working */ | |
56 | }; | |
57 | ||
58 | extern enum slab_state slab_state; | |
59 | ||
18004c5d CL |
60 | /* The slab cache mutex protects the management structures during changes */ |
61 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
62 | |
63 | /* The list of all slab caches on the system */ | |
18004c5d CL |
64 | extern struct list_head slab_caches; |
65 | ||
9b030cb8 CL |
66 | /* The slab cache that manages slab cache information */ |
67 | extern struct kmem_cache *kmem_cache; | |
68 | ||
45906855 CL |
69 | unsigned long calculate_alignment(unsigned long flags, |
70 | unsigned long align, unsigned long size); | |
71 | ||
f97d5f63 CL |
72 | #ifndef CONFIG_SLOB |
73 | /* Kmalloc array related functions */ | |
74 | void create_kmalloc_caches(unsigned long); | |
2c59dd65 CL |
75 | |
76 | /* Find the kmalloc slab corresponding for a certain size */ | |
77 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
f97d5f63 CL |
78 | #endif |
79 | ||
80 | ||
9b030cb8 | 81 | /* Functions provided by the slab allocators */ |
8a13a4cc | 82 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
97d06609 | 83 | |
45530c44 CL |
84 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
85 | unsigned long flags); | |
86 | extern void create_boot_cache(struct kmem_cache *, const char *name, | |
87 | size_t size, unsigned long flags); | |
88 | ||
423c929c JK |
89 | int slab_unmergeable(struct kmem_cache *s); |
90 | struct kmem_cache *find_mergeable(size_t size, size_t align, | |
91 | unsigned long flags, const char *name, void (*ctor)(void *)); | |
12220dea | 92 | #ifndef CONFIG_SLOB |
2633d7a0 | 93 | struct kmem_cache * |
a44cb944 VD |
94 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
95 | unsigned long flags, void (*ctor)(void *)); | |
423c929c JK |
96 | |
97 | unsigned long kmem_cache_flags(unsigned long object_size, | |
98 | unsigned long flags, const char *name, | |
99 | void (*ctor)(void *)); | |
cbb79694 | 100 | #else |
2633d7a0 | 101 | static inline struct kmem_cache * |
a44cb944 VD |
102 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
103 | unsigned long flags, void (*ctor)(void *)) | |
cbb79694 | 104 | { return NULL; } |
423c929c JK |
105 | |
106 | static inline unsigned long kmem_cache_flags(unsigned long object_size, | |
107 | unsigned long flags, const char *name, | |
108 | void (*ctor)(void *)) | |
109 | { | |
110 | return flags; | |
111 | } | |
cbb79694 CL |
112 | #endif |
113 | ||
114 | ||
d8843922 GC |
115 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
116 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ | |
117 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) | |
118 | ||
119 | #if defined(CONFIG_DEBUG_SLAB) | |
120 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
121 | #elif defined(CONFIG_SLUB_DEBUG) | |
122 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
123 | SLAB_TRACE | SLAB_DEBUG_FREE) | |
124 | #else | |
125 | #define SLAB_DEBUG_FLAGS (0) | |
126 | #endif | |
127 | ||
128 | #if defined(CONFIG_SLAB) | |
129 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
130 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) | |
131 | #elif defined(CONFIG_SLUB) | |
132 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
133 | SLAB_TEMPORARY | SLAB_NOTRACK) | |
134 | #else | |
135 | #define SLAB_CACHE_FLAGS (0) | |
136 | #endif | |
137 | ||
138 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) | |
139 | ||
945cf2b6 | 140 | int __kmem_cache_shutdown(struct kmem_cache *); |
d6e0b7fa | 141 | int __kmem_cache_shrink(struct kmem_cache *, bool); |
41a21285 | 142 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 143 | |
b7454ad3 GC |
144 | struct seq_file; |
145 | struct file; | |
b7454ad3 | 146 | |
0d7561c6 GC |
147 | struct slabinfo { |
148 | unsigned long active_objs; | |
149 | unsigned long num_objs; | |
150 | unsigned long active_slabs; | |
151 | unsigned long num_slabs; | |
152 | unsigned long shared_avail; | |
153 | unsigned int limit; | |
154 | unsigned int batchcount; | |
155 | unsigned int shared; | |
156 | unsigned int objects_per_slab; | |
157 | unsigned int cache_order; | |
158 | }; | |
159 | ||
160 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
161 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
162 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
163 | size_t count, loff_t *ppos); | |
ba6c496e GC |
164 | |
165 | #ifdef CONFIG_MEMCG_KMEM | |
426589f5 VD |
166 | /* |
167 | * Iterate over all memcg caches of the given root cache. The caller must hold | |
168 | * slab_mutex. | |
169 | */ | |
170 | #define for_each_memcg_cache(iter, root) \ | |
171 | list_for_each_entry(iter, &(root)->memcg_params.list, \ | |
172 | memcg_params.list) | |
173 | ||
174 | #define for_each_memcg_cache_safe(iter, tmp, root) \ | |
175 | list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \ | |
176 | memcg_params.list) | |
177 | ||
ba6c496e GC |
178 | static inline bool is_root_cache(struct kmem_cache *s) |
179 | { | |
f7ce3190 | 180 | return s->memcg_params.is_root_cache; |
ba6c496e | 181 | } |
2633d7a0 | 182 | |
b9ce5ef4 | 183 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
f7ce3190 | 184 | struct kmem_cache *p) |
b9ce5ef4 | 185 | { |
f7ce3190 | 186 | return p == s || p == s->memcg_params.root_cache; |
b9ce5ef4 | 187 | } |
749c5415 GC |
188 | |
189 | /* | |
190 | * We use suffixes to the name in memcg because we can't have caches | |
191 | * created in the system with the same name. But when we print them | |
192 | * locally, better refer to them with the base name | |
193 | */ | |
194 | static inline const char *cache_name(struct kmem_cache *s) | |
195 | { | |
196 | if (!is_root_cache(s)) | |
f7ce3190 | 197 | s = s->memcg_params.root_cache; |
749c5415 GC |
198 | return s->name; |
199 | } | |
200 | ||
f8570263 VD |
201 | /* |
202 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. | |
f7ce3190 VD |
203 | * That said the caller must assure the memcg's cache won't go away by either |
204 | * taking a css reference to the owner cgroup, or holding the slab_mutex. | |
f8570263 | 205 | */ |
2ade4de8 QH |
206 | static inline struct kmem_cache * |
207 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 | 208 | { |
959c8963 | 209 | struct kmem_cache *cachep; |
f7ce3190 | 210 | struct memcg_cache_array *arr; |
f8570263 VD |
211 | |
212 | rcu_read_lock(); | |
f7ce3190 | 213 | arr = rcu_dereference(s->memcg_params.memcg_caches); |
959c8963 VD |
214 | |
215 | /* | |
216 | * Make sure we will access the up-to-date value. The code updating | |
217 | * memcg_caches issues a write barrier to match this (see | |
f7ce3190 | 218 | * memcg_create_kmem_cache()). |
959c8963 | 219 | */ |
f7ce3190 | 220 | cachep = lockless_dereference(arr->entries[idx]); |
8df0c2dc PK |
221 | rcu_read_unlock(); |
222 | ||
959c8963 | 223 | return cachep; |
749c5415 | 224 | } |
943a451a GC |
225 | |
226 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
227 | { | |
228 | if (is_root_cache(s)) | |
229 | return s; | |
f7ce3190 | 230 | return s->memcg_params.root_cache; |
943a451a | 231 | } |
5dfb4175 VD |
232 | |
233 | static __always_inline int memcg_charge_slab(struct kmem_cache *s, | |
234 | gfp_t gfp, int order) | |
235 | { | |
236 | if (!memcg_kmem_enabled()) | |
237 | return 0; | |
238 | if (is_root_cache(s)) | |
239 | return 0; | |
f7ce3190 | 240 | return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order); |
5dfb4175 VD |
241 | } |
242 | ||
243 | static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) | |
244 | { | |
245 | if (!memcg_kmem_enabled()) | |
246 | return; | |
247 | if (is_root_cache(s)) | |
248 | return; | |
f7ce3190 | 249 | memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order); |
5dfb4175 | 250 | } |
f7ce3190 VD |
251 | |
252 | extern void slab_init_memcg_params(struct kmem_cache *); | |
253 | ||
254 | #else /* !CONFIG_MEMCG_KMEM */ | |
255 | ||
426589f5 VD |
256 | #define for_each_memcg_cache(iter, root) \ |
257 | for ((void)(iter), (void)(root); 0; ) | |
258 | #define for_each_memcg_cache_safe(iter, tmp, root) \ | |
259 | for ((void)(iter), (void)(tmp), (void)(root); 0; ) | |
260 | ||
ba6c496e GC |
261 | static inline bool is_root_cache(struct kmem_cache *s) |
262 | { | |
263 | return true; | |
264 | } | |
265 | ||
b9ce5ef4 GC |
266 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
267 | struct kmem_cache *p) | |
268 | { | |
269 | return true; | |
270 | } | |
749c5415 GC |
271 | |
272 | static inline const char *cache_name(struct kmem_cache *s) | |
273 | { | |
274 | return s->name; | |
275 | } | |
276 | ||
2ade4de8 QH |
277 | static inline struct kmem_cache * |
278 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 GC |
279 | { |
280 | return NULL; | |
281 | } | |
943a451a GC |
282 | |
283 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
284 | { | |
285 | return s; | |
286 | } | |
5dfb4175 VD |
287 | |
288 | static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) | |
289 | { | |
290 | return 0; | |
291 | } | |
292 | ||
293 | static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) | |
294 | { | |
295 | } | |
f7ce3190 VD |
296 | |
297 | static inline void slab_init_memcg_params(struct kmem_cache *s) | |
298 | { | |
299 | } | |
300 | #endif /* CONFIG_MEMCG_KMEM */ | |
b9ce5ef4 GC |
301 | |
302 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | |
303 | { | |
304 | struct kmem_cache *cachep; | |
305 | struct page *page; | |
306 | ||
307 | /* | |
308 | * When kmemcg is not being used, both assignments should return the | |
309 | * same value. but we don't want to pay the assignment price in that | |
310 | * case. If it is not compiled in, the compiler should be smart enough | |
311 | * to not do even the assignment. In that case, slab_equal_or_root | |
312 | * will also be a constant. | |
313 | */ | |
314 | if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) | |
315 | return s; | |
316 | ||
317 | page = virt_to_head_page(x); | |
318 | cachep = page->slab_cache; | |
319 | if (slab_equal_or_root(cachep, s)) | |
320 | return cachep; | |
321 | ||
322 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", | |
c42e5715 | 323 | __func__, cachep->name, s->name); |
b9ce5ef4 GC |
324 | WARN_ON_ONCE(1); |
325 | return s; | |
326 | } | |
ca34956b | 327 | |
44c5356f | 328 | #ifndef CONFIG_SLOB |
ca34956b CL |
329 | /* |
330 | * The slab lists for all objects. | |
331 | */ | |
332 | struct kmem_cache_node { | |
333 | spinlock_t list_lock; | |
334 | ||
335 | #ifdef CONFIG_SLAB | |
336 | struct list_head slabs_partial; /* partial list first, better asm code */ | |
337 | struct list_head slabs_full; | |
338 | struct list_head slabs_free; | |
339 | unsigned long free_objects; | |
340 | unsigned int free_limit; | |
341 | unsigned int colour_next; /* Per-node cache coloring */ | |
342 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 343 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
344 | unsigned long next_reap; /* updated without locking */ |
345 | int free_touched; /* updated without locking */ | |
346 | #endif | |
347 | ||
348 | #ifdef CONFIG_SLUB | |
349 | unsigned long nr_partial; | |
350 | struct list_head partial; | |
351 | #ifdef CONFIG_SLUB_DEBUG | |
352 | atomic_long_t nr_slabs; | |
353 | atomic_long_t total_objects; | |
354 | struct list_head full; | |
355 | #endif | |
356 | #endif | |
357 | ||
358 | }; | |
e25839f6 | 359 | |
44c5356f CL |
360 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
361 | { | |
362 | return s->node[node]; | |
363 | } | |
364 | ||
365 | /* | |
366 | * Iterator over all nodes. The body will be executed for each node that has | |
367 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
368 | */ | |
369 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
9163582c MP |
370 | for (__node = 0; __node < nr_node_ids; __node++) \ |
371 | if ((__n = get_node(__s, __node))) | |
44c5356f CL |
372 | |
373 | #endif | |
374 | ||
1df3b26f | 375 | void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439 WL |
376 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
377 | void slab_stop(struct seq_file *m, void *p); | |
b047501c | 378 | int memcg_slab_show(struct seq_file *m, void *p); |
5240ab40 AR |
379 | |
380 | #endif /* MM_SLAB_H */ |