]>
Commit | Line | Data |
---|---|---|
97d06609 CL |
1 | #ifndef MM_SLAB_H |
2 | #define MM_SLAB_H | |
3 | /* | |
4 | * Internal slab definitions | |
5 | */ | |
6 | ||
7 | /* | |
8 | * State of the slab allocator. | |
9 | * | |
10 | * This is used to describe the states of the allocator during bootup. | |
11 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
12 | * have the problem that the structures used for managing slab caches are | |
13 | * allocated from slab caches themselves. | |
14 | */ | |
15 | enum slab_state { | |
16 | DOWN, /* No slab functionality yet */ | |
17 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
18 | PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ | |
ce8eb6c4 | 19 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
20 | UP, /* Slab caches usable but not all extras yet */ |
21 | FULL /* Everything is working */ | |
22 | }; | |
23 | ||
24 | extern enum slab_state slab_state; | |
25 | ||
18004c5d CL |
26 | /* The slab cache mutex protects the management structures during changes */ |
27 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
28 | |
29 | /* The list of all slab caches on the system */ | |
18004c5d CL |
30 | extern struct list_head slab_caches; |
31 | ||
9b030cb8 CL |
32 | /* The slab cache that manages slab cache information */ |
33 | extern struct kmem_cache *kmem_cache; | |
34 | ||
45906855 CL |
35 | unsigned long calculate_alignment(unsigned long flags, |
36 | unsigned long align, unsigned long size); | |
37 | ||
f97d5f63 CL |
38 | #ifndef CONFIG_SLOB |
39 | /* Kmalloc array related functions */ | |
40 | void create_kmalloc_caches(unsigned long); | |
2c59dd65 CL |
41 | |
42 | /* Find the kmalloc slab corresponding for a certain size */ | |
43 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
f97d5f63 CL |
44 | #endif |
45 | ||
46 | ||
9b030cb8 | 47 | /* Functions provided by the slab allocators */ |
8a13a4cc | 48 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
97d06609 | 49 | |
45530c44 CL |
50 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
51 | unsigned long flags); | |
52 | extern void create_boot_cache(struct kmem_cache *, const char *name, | |
53 | size_t size, unsigned long flags); | |
54 | ||
2633d7a0 | 55 | struct mem_cgroup; |
cbb79694 | 56 | #ifdef CONFIG_SLUB |
2633d7a0 | 57 | struct kmem_cache * |
a44cb944 VD |
58 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
59 | unsigned long flags, void (*ctor)(void *)); | |
cbb79694 | 60 | #else |
2633d7a0 | 61 | static inline struct kmem_cache * |
a44cb944 VD |
62 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
63 | unsigned long flags, void (*ctor)(void *)) | |
cbb79694 CL |
64 | { return NULL; } |
65 | #endif | |
66 | ||
67 | ||
d8843922 GC |
68 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
69 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ | |
70 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) | |
71 | ||
72 | #if defined(CONFIG_DEBUG_SLAB) | |
73 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
74 | #elif defined(CONFIG_SLUB_DEBUG) | |
75 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
76 | SLAB_TRACE | SLAB_DEBUG_FREE) | |
77 | #else | |
78 | #define SLAB_DEBUG_FLAGS (0) | |
79 | #endif | |
80 | ||
81 | #if defined(CONFIG_SLAB) | |
82 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
83 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) | |
84 | #elif defined(CONFIG_SLUB) | |
85 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
86 | SLAB_TEMPORARY | SLAB_NOTRACK) | |
87 | #else | |
88 | #define SLAB_CACHE_FLAGS (0) | |
89 | #endif | |
90 | ||
91 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) | |
92 | ||
945cf2b6 | 93 | int __kmem_cache_shutdown(struct kmem_cache *); |
03afc0e2 | 94 | int __kmem_cache_shrink(struct kmem_cache *); |
41a21285 | 95 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 96 | |
b7454ad3 GC |
97 | struct seq_file; |
98 | struct file; | |
b7454ad3 | 99 | |
0d7561c6 GC |
100 | struct slabinfo { |
101 | unsigned long active_objs; | |
102 | unsigned long num_objs; | |
103 | unsigned long active_slabs; | |
104 | unsigned long num_slabs; | |
105 | unsigned long shared_avail; | |
106 | unsigned int limit; | |
107 | unsigned int batchcount; | |
108 | unsigned int shared; | |
109 | unsigned int objects_per_slab; | |
110 | unsigned int cache_order; | |
111 | }; | |
112 | ||
113 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
114 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
115 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
116 | size_t count, loff_t *ppos); | |
ba6c496e GC |
117 | |
118 | #ifdef CONFIG_MEMCG_KMEM | |
119 | static inline bool is_root_cache(struct kmem_cache *s) | |
120 | { | |
121 | return !s->memcg_params || s->memcg_params->is_root_cache; | |
122 | } | |
2633d7a0 | 123 | |
b9ce5ef4 GC |
124 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
125 | struct kmem_cache *p) | |
126 | { | |
127 | return (p == s) || | |
128 | (s->memcg_params && (p == s->memcg_params->root_cache)); | |
129 | } | |
749c5415 GC |
130 | |
131 | /* | |
132 | * We use suffixes to the name in memcg because we can't have caches | |
133 | * created in the system with the same name. But when we print them | |
134 | * locally, better refer to them with the base name | |
135 | */ | |
136 | static inline const char *cache_name(struct kmem_cache *s) | |
137 | { | |
138 | if (!is_root_cache(s)) | |
139 | return s->memcg_params->root_cache->name; | |
140 | return s->name; | |
141 | } | |
142 | ||
f8570263 VD |
143 | /* |
144 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. | |
145 | * That said the caller must assure the memcg's cache won't go away. Since once | |
146 | * created a memcg's cache is destroyed only along with the root cache, it is | |
147 | * true if we are going to allocate from the cache or hold a reference to the | |
148 | * root cache by other means. Otherwise, we should hold either the slab_mutex | |
149 | * or the memcg's slab_caches_mutex while calling this function and accessing | |
150 | * the returned value. | |
151 | */ | |
2ade4de8 QH |
152 | static inline struct kmem_cache * |
153 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 | 154 | { |
959c8963 | 155 | struct kmem_cache *cachep; |
f8570263 | 156 | struct memcg_cache_params *params; |
959c8963 | 157 | |
6f6b8951 AV |
158 | if (!s->memcg_params) |
159 | return NULL; | |
f8570263 VD |
160 | |
161 | rcu_read_lock(); | |
162 | params = rcu_dereference(s->memcg_params); | |
163 | cachep = params->memcg_caches[idx]; | |
164 | rcu_read_unlock(); | |
959c8963 VD |
165 | |
166 | /* | |
167 | * Make sure we will access the up-to-date value. The code updating | |
168 | * memcg_caches issues a write barrier to match this (see | |
169 | * memcg_register_cache()). | |
170 | */ | |
171 | smp_read_barrier_depends(); | |
172 | return cachep; | |
749c5415 | 173 | } |
943a451a GC |
174 | |
175 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
176 | { | |
177 | if (is_root_cache(s)) | |
178 | return s; | |
179 | return s->memcg_params->root_cache; | |
180 | } | |
5dfb4175 VD |
181 | |
182 | static __always_inline int memcg_charge_slab(struct kmem_cache *s, | |
183 | gfp_t gfp, int order) | |
184 | { | |
185 | if (!memcg_kmem_enabled()) | |
186 | return 0; | |
187 | if (is_root_cache(s)) | |
188 | return 0; | |
c67a8a68 | 189 | return __memcg_charge_slab(s, gfp, order); |
5dfb4175 VD |
190 | } |
191 | ||
192 | static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) | |
193 | { | |
194 | if (!memcg_kmem_enabled()) | |
195 | return; | |
196 | if (is_root_cache(s)) | |
197 | return; | |
c67a8a68 | 198 | __memcg_uncharge_slab(s, order); |
5dfb4175 | 199 | } |
ba6c496e GC |
200 | #else |
201 | static inline bool is_root_cache(struct kmem_cache *s) | |
202 | { | |
203 | return true; | |
204 | } | |
205 | ||
b9ce5ef4 GC |
206 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
207 | struct kmem_cache *p) | |
208 | { | |
209 | return true; | |
210 | } | |
749c5415 GC |
211 | |
212 | static inline const char *cache_name(struct kmem_cache *s) | |
213 | { | |
214 | return s->name; | |
215 | } | |
216 | ||
2ade4de8 QH |
217 | static inline struct kmem_cache * |
218 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 GC |
219 | { |
220 | return NULL; | |
221 | } | |
943a451a GC |
222 | |
223 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
224 | { | |
225 | return s; | |
226 | } | |
5dfb4175 VD |
227 | |
228 | static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) | |
229 | { | |
230 | return 0; | |
231 | } | |
232 | ||
233 | static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) | |
234 | { | |
235 | } | |
ba6c496e | 236 | #endif |
b9ce5ef4 GC |
237 | |
238 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | |
239 | { | |
240 | struct kmem_cache *cachep; | |
241 | struct page *page; | |
242 | ||
243 | /* | |
244 | * When kmemcg is not being used, both assignments should return the | |
245 | * same value. but we don't want to pay the assignment price in that | |
246 | * case. If it is not compiled in, the compiler should be smart enough | |
247 | * to not do even the assignment. In that case, slab_equal_or_root | |
248 | * will also be a constant. | |
249 | */ | |
250 | if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) | |
251 | return s; | |
252 | ||
253 | page = virt_to_head_page(x); | |
254 | cachep = page->slab_cache; | |
255 | if (slab_equal_or_root(cachep, s)) | |
256 | return cachep; | |
257 | ||
258 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", | |
c42e5715 | 259 | __func__, cachep->name, s->name); |
b9ce5ef4 GC |
260 | WARN_ON_ONCE(1); |
261 | return s; | |
262 | } | |
ca34956b | 263 | |
44c5356f | 264 | #ifndef CONFIG_SLOB |
ca34956b CL |
265 | /* |
266 | * The slab lists for all objects. | |
267 | */ | |
268 | struct kmem_cache_node { | |
269 | spinlock_t list_lock; | |
270 | ||
271 | #ifdef CONFIG_SLAB | |
272 | struct list_head slabs_partial; /* partial list first, better asm code */ | |
273 | struct list_head slabs_full; | |
274 | struct list_head slabs_free; | |
275 | unsigned long free_objects; | |
276 | unsigned int free_limit; | |
277 | unsigned int colour_next; /* Per-node cache coloring */ | |
278 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 279 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
280 | unsigned long next_reap; /* updated without locking */ |
281 | int free_touched; /* updated without locking */ | |
282 | #endif | |
283 | ||
284 | #ifdef CONFIG_SLUB | |
285 | unsigned long nr_partial; | |
286 | struct list_head partial; | |
287 | #ifdef CONFIG_SLUB_DEBUG | |
288 | atomic_long_t nr_slabs; | |
289 | atomic_long_t total_objects; | |
290 | struct list_head full; | |
291 | #endif | |
292 | #endif | |
293 | ||
294 | }; | |
e25839f6 | 295 | |
44c5356f CL |
296 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
297 | { | |
298 | return s->node[node]; | |
299 | } | |
300 | ||
301 | /* | |
302 | * Iterator over all nodes. The body will be executed for each node that has | |
303 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
304 | */ | |
305 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
306 | for (__node = 0; __n = get_node(__s, __node), __node < nr_node_ids; __node++) \ | |
307 | if (__n) | |
308 | ||
309 | #endif | |
310 | ||
276a2439 WL |
311 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
312 | void slab_stop(struct seq_file *m, void *p); | |
5240ab40 AR |
313 | |
314 | #endif /* MM_SLAB_H */ |