]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/slab.h | |
3 | * Written by Mark Hemment, 1996. | |
4 | * ([email protected]) | |
5 | */ | |
6 | ||
7 | #ifndef _LINUX_SLAB_H | |
8 | #define _LINUX_SLAB_H | |
9 | ||
10 | #if defined(__KERNEL__) | |
11 | ||
12 | typedef struct kmem_cache_s kmem_cache_t; | |
13 | ||
14 | #include <linux/config.h> /* kmalloc_sizes.h needs CONFIG_ options */ | |
15 | #include <linux/gfp.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/types.h> | |
18 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | |
19 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | |
20 | ||
21 | /* flags for kmem_cache_alloc() */ | |
22 | #define SLAB_NOFS GFP_NOFS | |
23 | #define SLAB_NOIO GFP_NOIO | |
24 | #define SLAB_ATOMIC GFP_ATOMIC | |
25 | #define SLAB_USER GFP_USER | |
26 | #define SLAB_KERNEL GFP_KERNEL | |
27 | #define SLAB_DMA GFP_DMA | |
28 | ||
29 | #define SLAB_LEVEL_MASK GFP_LEVEL_MASK | |
30 | ||
31 | #define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */ | |
32 | ||
33 | /* flags to pass to kmem_cache_create(). | |
34 | * The first 3 are only valid when the allocator as been build | |
35 | * SLAB_DEBUG_SUPPORT. | |
36 | */ | |
37 | #define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */ | |
38 | #define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ | |
39 | #define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ | |
40 | #define SLAB_POISON 0x00000800UL /* Poison objects */ | |
41 | #define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */ | |
42 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ | |
43 | #define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ | |
44 | #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ | |
45 | #define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */ | |
46 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate | |
47 | what is reclaimable later*/ | |
48 | #define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */ | |
49 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */ | |
50 | ||
51 | /* flags passed to a constructor func */ | |
52 | #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */ | |
53 | #define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */ | |
54 | #define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */ | |
55 | ||
56 | /* prototypes */ | |
57 | extern void __init kmem_cache_init(void); | |
58 | ||
59 | extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long, | |
60 | void (*)(void *, kmem_cache_t *, unsigned long), | |
61 | void (*)(void *, kmem_cache_t *, unsigned long)); | |
62 | extern int kmem_cache_destroy(kmem_cache_t *); | |
63 | extern int kmem_cache_shrink(kmem_cache_t *); | |
64 | extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); | |
1da177e4 LT |
65 | extern void kmem_cache_free(kmem_cache_t *, void *); |
66 | extern unsigned int kmem_cache_size(kmem_cache_t *); | |
1944972d | 67 | extern const char *kmem_cache_name(kmem_cache_t *); |
0db925af | 68 | extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int __nocast gfpflags); |
1da177e4 LT |
69 | |
70 | /* Size description struct for general caches. */ | |
71 | struct cache_sizes { | |
72 | size_t cs_size; | |
73 | kmem_cache_t *cs_cachep; | |
74 | kmem_cache_t *cs_dmacachep; | |
75 | }; | |
76 | extern struct cache_sizes malloc_sizes[]; | |
77 | extern void *__kmalloc(size_t, unsigned int __nocast); | |
78 | ||
79 | static inline void *kmalloc(size_t size, unsigned int __nocast flags) | |
80 | { | |
81 | if (__builtin_constant_p(size)) { | |
82 | int i = 0; | |
83 | #define CACHE(x) \ | |
84 | if (size <= x) \ | |
85 | goto found; \ | |
86 | else \ | |
87 | i++; | |
88 | #include "kmalloc_sizes.h" | |
89 | #undef CACHE | |
90 | { | |
91 | extern void __you_cannot_kmalloc_that_much(void); | |
92 | __you_cannot_kmalloc_that_much(); | |
93 | } | |
94 | found: | |
95 | return kmem_cache_alloc((flags & GFP_DMA) ? | |
96 | malloc_sizes[i].cs_dmacachep : | |
97 | malloc_sizes[i].cs_cachep, flags); | |
98 | } | |
99 | return __kmalloc(size, flags); | |
100 | } | |
101 | ||
102 | extern void *kcalloc(size_t, size_t, unsigned int __nocast); | |
103 | extern void kfree(const void *); | |
104 | extern unsigned int ksize(const void *); | |
105 | ||
97e2bde4 MS |
106 | #ifdef CONFIG_NUMA |
107 | extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node); | |
0db925af | 108 | extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); |
97e2bde4 MS |
109 | #else |
110 | static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) | |
111 | { | |
112 | return kmem_cache_alloc(cachep, flags); | |
113 | } | |
c10b8736 | 114 | static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) |
97e2bde4 MS |
115 | { |
116 | return kmalloc(size, flags); | |
117 | } | |
118 | #endif | |
119 | ||
1da177e4 LT |
120 | extern int FASTCALL(kmem_cache_reap(int)); |
121 | extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); | |
122 | ||
123 | /* System wide caches */ | |
124 | extern kmem_cache_t *vm_area_cachep; | |
125 | extern kmem_cache_t *names_cachep; | |
126 | extern kmem_cache_t *files_cachep; | |
127 | extern kmem_cache_t *filp_cachep; | |
128 | extern kmem_cache_t *fs_cachep; | |
129 | extern kmem_cache_t *signal_cachep; | |
130 | extern kmem_cache_t *sighand_cachep; | |
131 | extern kmem_cache_t *bio_cachep; | |
132 | ||
133 | extern atomic_t slab_reclaim_pages; | |
134 | ||
135 | #endif /* __KERNEL__ */ | |
136 | ||
137 | #endif /* _LINUX_SLAB_H */ |