]>
Commit | Line | Data |
---|---|---|
039363f3 CL |
1 | /* |
2 | * Slab allocator functions that are independent of the allocator strategy | |
3 | * | |
4 | * (C) 2012 Christoph Lameter <[email protected]> | |
5 | */ | |
6 | #include <linux/slab.h> | |
7 | ||
8 | #include <linux/mm.h> | |
9 | #include <linux/poison.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/memory.h> | |
12 | #include <linux/compiler.h> | |
13 | #include <linux/module.h> | |
20cea968 CL |
14 | #include <linux/cpu.h> |
15 | #include <linux/uaccess.h> | |
b7454ad3 GC |
16 | #include <linux/seq_file.h> |
17 | #include <linux/proc_fs.h> | |
039363f3 CL |
18 | #include <asm/cacheflush.h> |
19 | #include <asm/tlbflush.h> | |
20 | #include <asm/page.h> | |
21 | ||
97d06609 CL |
22 | #include "slab.h" |
23 | ||
24 | enum slab_state slab_state; | |
18004c5d CL |
25 | LIST_HEAD(slab_caches); |
26 | DEFINE_MUTEX(slab_mutex); | |
9b030cb8 | 27 | struct kmem_cache *kmem_cache; |
97d06609 | 28 | |
77be4b13 SK |
29 | #ifdef CONFIG_DEBUG_VM |
30 | static int kmem_cache_sanity_check(const char *name, size_t size) | |
039363f3 CL |
31 | { |
32 | struct kmem_cache *s = NULL; | |
33 | ||
039363f3 CL |
34 | if (!name || in_interrupt() || size < sizeof(void *) || |
35 | size > KMALLOC_MAX_SIZE) { | |
77be4b13 SK |
36 | pr_err("kmem_cache_create(%s) integrity check failed\n", name); |
37 | return -EINVAL; | |
039363f3 | 38 | } |
b920536a | 39 | |
20cea968 CL |
40 | list_for_each_entry(s, &slab_caches, list) { |
41 | char tmp; | |
42 | int res; | |
43 | ||
44 | /* | |
45 | * This happens when the module gets unloaded and doesn't | |
46 | * destroy its slab cache and no-one else reuses the vmalloc | |
47 | * area of the module. Print a warning. | |
48 | */ | |
49 | res = probe_kernel_address(s->name, tmp); | |
50 | if (res) { | |
77be4b13 | 51 | pr_err("Slab cache with size %d has lost its name\n", |
20cea968 CL |
52 | s->object_size); |
53 | continue; | |
54 | } | |
55 | ||
56 | if (!strcmp(s->name, name)) { | |
77be4b13 SK |
57 | pr_err("%s (%s): Cache name already exists.\n", |
58 | __func__, name); | |
20cea968 CL |
59 | dump_stack(); |
60 | s = NULL; | |
77be4b13 | 61 | return -EINVAL; |
20cea968 CL |
62 | } |
63 | } | |
64 | ||
65 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | |
77be4b13 SK |
66 | return 0; |
67 | } | |
68 | #else | |
69 | static inline int kmem_cache_sanity_check(const char *name, size_t size) | |
70 | { | |
71 | return 0; | |
72 | } | |
20cea968 CL |
73 | #endif |
74 | ||
45906855 CL |
75 | /* |
76 | * Figure out what the alignment of the objects will be given a set of | |
77 | * flags, a user specified alignment and the size of the objects. | |
78 | */ | |
79 | unsigned long calculate_alignment(unsigned long flags, | |
80 | unsigned long align, unsigned long size) | |
81 | { | |
82 | /* | |
83 | * If the user wants hardware cache aligned objects then follow that | |
84 | * suggestion if the object is sufficiently large. | |
85 | * | |
86 | * The hardware cache alignment cannot override the specified | |
87 | * alignment though. If that is greater then use it. | |
88 | */ | |
89 | if (flags & SLAB_HWCACHE_ALIGN) { | |
90 | unsigned long ralign = cache_line_size(); | |
91 | while (size <= ralign / 2) | |
92 | ralign /= 2; | |
93 | align = max(align, ralign); | |
94 | } | |
95 | ||
96 | if (align < ARCH_SLAB_MINALIGN) | |
97 | align = ARCH_SLAB_MINALIGN; | |
98 | ||
99 | return ALIGN(align, sizeof(void *)); | |
100 | } | |
101 | ||
102 | ||
77be4b13 SK |
103 | /* |
104 | * kmem_cache_create - Create a cache. | |
105 | * @name: A string which is used in /proc/slabinfo to identify this cache. | |
106 | * @size: The size of objects to be created in this cache. | |
107 | * @align: The required alignment for the objects. | |
108 | * @flags: SLAB flags | |
109 | * @ctor: A constructor for the objects. | |
110 | * | |
111 | * Returns a ptr to the cache on success, NULL on failure. | |
112 | * Cannot be called within a interrupt, but can be interrupted. | |
113 | * The @ctor is run when new pages are allocated by the cache. | |
114 | * | |
115 | * The flags are | |
116 | * | |
117 | * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) | |
118 | * to catch references to uninitialised memory. | |
119 | * | |
120 | * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check | |
121 | * for buffer overruns. | |
122 | * | |
123 | * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware | |
124 | * cacheline. This can be beneficial if you're counting cycles as closely | |
125 | * as davem. | |
126 | */ | |
127 | ||
128 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, | |
129 | unsigned long flags, void (*ctor)(void *)) | |
130 | { | |
131 | struct kmem_cache *s = NULL; | |
686d550d | 132 | int err = 0; |
039363f3 | 133 | |
77be4b13 SK |
134 | get_online_cpus(); |
135 | mutex_lock(&slab_mutex); | |
686d550d CL |
136 | |
137 | if (!kmem_cache_sanity_check(name, size) == 0) | |
138 | goto out_locked; | |
139 | ||
d8843922 GC |
140 | /* |
141 | * Some allocators will constraint the set of valid flags to a subset | |
142 | * of all flags. We expect them to define CACHE_CREATE_MASK in this | |
143 | * case, and we'll just provide them with a sanitized version of the | |
144 | * passed flags. | |
145 | */ | |
146 | flags &= CACHE_CREATE_MASK; | |
686d550d | 147 | |
cbb79694 CL |
148 | s = __kmem_cache_alias(name, size, align, flags, ctor); |
149 | if (s) | |
150 | goto out_locked; | |
151 | ||
278b1bb1 | 152 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
db265eca | 153 | if (s) { |
8a13a4cc | 154 | s->object_size = s->size = size; |
45906855 | 155 | s->align = calculate_alignment(flags, align, size); |
8a13a4cc CL |
156 | s->ctor = ctor; |
157 | s->name = kstrdup(name, GFP_KERNEL); | |
158 | if (!s->name) { | |
159 | kmem_cache_free(kmem_cache, s); | |
160 | err = -ENOMEM; | |
161 | goto out_locked; | |
162 | } | |
163 | ||
164 | err = __kmem_cache_create(s, flags); | |
cce89f4f | 165 | if (!err) { |
278b1bb1 | 166 | |
cce89f4f | 167 | s->refcount = 1; |
db265eca | 168 | list_add(&s->list, &slab_caches); |
686d550d | 169 | |
cce89f4f | 170 | } else { |
8a13a4cc | 171 | kfree(s->name); |
278b1bb1 CL |
172 | kmem_cache_free(kmem_cache, s); |
173 | } | |
8a13a4cc | 174 | } else |
278b1bb1 | 175 | err = -ENOMEM; |
7c9adf5a | 176 | |
686d550d | 177 | out_locked: |
20cea968 CL |
178 | mutex_unlock(&slab_mutex); |
179 | put_online_cpus(); | |
180 | ||
686d550d CL |
181 | if (err) { |
182 | ||
183 | if (flags & SLAB_PANIC) | |
184 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", | |
185 | name, err); | |
186 | else { | |
187 | printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d", | |
188 | name, err); | |
189 | dump_stack(); | |
190 | } | |
191 | ||
192 | return NULL; | |
193 | } | |
039363f3 CL |
194 | |
195 | return s; | |
196 | } | |
197 | EXPORT_SYMBOL(kmem_cache_create); | |
97d06609 | 198 | |
945cf2b6 CL |
199 | void kmem_cache_destroy(struct kmem_cache *s) |
200 | { | |
201 | get_online_cpus(); | |
202 | mutex_lock(&slab_mutex); | |
203 | s->refcount--; | |
204 | if (!s->refcount) { | |
205 | list_del(&s->list); | |
206 | ||
207 | if (!__kmem_cache_shutdown(s)) { | |
210ed9de | 208 | mutex_unlock(&slab_mutex); |
945cf2b6 CL |
209 | if (s->flags & SLAB_DESTROY_BY_RCU) |
210 | rcu_barrier(); | |
211 | ||
db265eca | 212 | kfree(s->name); |
8f4c765c | 213 | kmem_cache_free(kmem_cache, s); |
945cf2b6 CL |
214 | } else { |
215 | list_add(&s->list, &slab_caches); | |
210ed9de | 216 | mutex_unlock(&slab_mutex); |
945cf2b6 CL |
217 | printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n", |
218 | s->name); | |
219 | dump_stack(); | |
220 | } | |
210ed9de JK |
221 | } else { |
222 | mutex_unlock(&slab_mutex); | |
945cf2b6 | 223 | } |
945cf2b6 CL |
224 | put_online_cpus(); |
225 | } | |
226 | EXPORT_SYMBOL(kmem_cache_destroy); | |
227 | ||
97d06609 CL |
228 | int slab_is_available(void) |
229 | { | |
230 | return slab_state >= UP; | |
231 | } | |
b7454ad3 | 232 | |
45530c44 CL |
233 | #ifndef CONFIG_SLOB |
234 | /* Create a cache during boot when no slab services are available yet */ | |
235 | void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, | |
236 | unsigned long flags) | |
237 | { | |
238 | int err; | |
239 | ||
240 | s->name = name; | |
241 | s->size = s->object_size = size; | |
45906855 | 242 | s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); |
45530c44 CL |
243 | err = __kmem_cache_create(s, flags); |
244 | ||
245 | if (err) | |
246 | panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n", | |
247 | name, size, err); | |
248 | ||
249 | s->refcount = -1; /* Exempt from merging for now */ | |
250 | } | |
251 | ||
252 | struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, | |
253 | unsigned long flags) | |
254 | { | |
255 | struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | |
256 | ||
257 | if (!s) | |
258 | panic("Out of memory when creating slab %s\n", name); | |
259 | ||
260 | create_boot_cache(s, name, size, flags); | |
261 | list_add(&s->list, &slab_caches); | |
262 | s->refcount = 1; | |
263 | return s; | |
264 | } | |
265 | ||
266 | #endif /* !CONFIG_SLOB */ | |
267 | ||
268 | ||
b7454ad3 | 269 | #ifdef CONFIG_SLABINFO |
bcee6e2a GC |
270 | static void print_slabinfo_header(struct seq_file *m) |
271 | { | |
272 | /* | |
273 | * Output format version, so at least we can change it | |
274 | * without _too_ many complaints. | |
275 | */ | |
276 | #ifdef CONFIG_DEBUG_SLAB | |
277 | seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); | |
278 | #else | |
279 | seq_puts(m, "slabinfo - version: 2.1\n"); | |
280 | #endif | |
281 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> " | |
282 | "<objperslab> <pagesperslab>"); | |
283 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); | |
284 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | |
285 | #ifdef CONFIG_DEBUG_SLAB | |
286 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " | |
287 | "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); | |
288 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); | |
289 | #endif | |
290 | seq_putc(m, '\n'); | |
291 | } | |
292 | ||
b7454ad3 GC |
293 | static void *s_start(struct seq_file *m, loff_t *pos) |
294 | { | |
295 | loff_t n = *pos; | |
296 | ||
297 | mutex_lock(&slab_mutex); | |
298 | if (!n) | |
299 | print_slabinfo_header(m); | |
300 | ||
301 | return seq_list_start(&slab_caches, *pos); | |
302 | } | |
303 | ||
304 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | |
305 | { | |
306 | return seq_list_next(p, &slab_caches, pos); | |
307 | } | |
308 | ||
309 | static void s_stop(struct seq_file *m, void *p) | |
310 | { | |
311 | mutex_unlock(&slab_mutex); | |
312 | } | |
313 | ||
314 | static int s_show(struct seq_file *m, void *p) | |
315 | { | |
0d7561c6 GC |
316 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); |
317 | struct slabinfo sinfo; | |
318 | ||
319 | memset(&sinfo, 0, sizeof(sinfo)); | |
320 | get_slabinfo(s, &sinfo); | |
321 | ||
322 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | |
323 | s->name, sinfo.active_objs, sinfo.num_objs, s->size, | |
324 | sinfo.objects_per_slab, (1 << sinfo.cache_order)); | |
325 | ||
326 | seq_printf(m, " : tunables %4u %4u %4u", | |
327 | sinfo.limit, sinfo.batchcount, sinfo.shared); | |
328 | seq_printf(m, " : slabdata %6lu %6lu %6lu", | |
329 | sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); | |
330 | slabinfo_show_stats(m, s); | |
331 | seq_putc(m, '\n'); | |
332 | return 0; | |
b7454ad3 GC |
333 | } |
334 | ||
335 | /* | |
336 | * slabinfo_op - iterator that generates /proc/slabinfo | |
337 | * | |
338 | * Output layout: | |
339 | * cache-name | |
340 | * num-active-objs | |
341 | * total-objs | |
342 | * object size | |
343 | * num-active-slabs | |
344 | * total-slabs | |
345 | * num-pages-per-slab | |
346 | * + further values on SMP and with statistics enabled | |
347 | */ | |
348 | static const struct seq_operations slabinfo_op = { | |
349 | .start = s_start, | |
350 | .next = s_next, | |
351 | .stop = s_stop, | |
352 | .show = s_show, | |
353 | }; | |
354 | ||
355 | static int slabinfo_open(struct inode *inode, struct file *file) | |
356 | { | |
357 | return seq_open(file, &slabinfo_op); | |
358 | } | |
359 | ||
360 | static const struct file_operations proc_slabinfo_operations = { | |
361 | .open = slabinfo_open, | |
362 | .read = seq_read, | |
363 | .write = slabinfo_write, | |
364 | .llseek = seq_lseek, | |
365 | .release = seq_release, | |
366 | }; | |
367 | ||
368 | static int __init slab_proc_init(void) | |
369 | { | |
370 | proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); | |
371 | return 0; | |
372 | } | |
373 | module_init(slab_proc_init); | |
374 | #endif /* CONFIG_SLABINFO */ |