]>
Commit | Line | Data |
---|---|---|
81819f0f CL |
1 | #ifndef _LINUX_SLUB_DEF_H |
2 | #define _LINUX_SLUB_DEF_H | |
3 | ||
4 | /* | |
5 | * SLUB : A Slab allocator without object queues. | |
6 | * | |
7 | * (C) 2007 SGI, Christoph Lameter <[email protected]> | |
8 | */ | |
9 | #include <linux/types.h> | |
10 | #include <linux/gfp.h> | |
11 | #include <linux/workqueue.h> | |
12 | #include <linux/kobject.h> | |
13 | ||
14 | struct kmem_cache_node { | |
15 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | |
16 | unsigned long nr_partial; | |
17 | atomic_long_t nr_slabs; | |
18 | struct list_head partial; | |
643b1138 | 19 | struct list_head full; |
81819f0f CL |
20 | }; |
21 | ||
22 | /* | |
23 | * Slab cache management. | |
24 | */ | |
25 | struct kmem_cache { | |
26 | /* Used for retriving partial slabs etc */ | |
27 | unsigned long flags; | |
28 | int size; /* The size of an object including meta data */ | |
29 | int objsize; /* The size of an object without meta data */ | |
30 | int offset; /* Free pointer offset. */ | |
4b356be0 | 31 | int order; |
81819f0f CL |
32 | |
33 | /* | |
34 | * Avoid an extra cache line for UP, SMP and for the node local to | |
35 | * struct kmem_cache. | |
36 | */ | |
37 | struct kmem_cache_node local_node; | |
38 | ||
39 | /* Allocation and freeing of slabs */ | |
40 | int objects; /* Number of objects in slab */ | |
41 | int refcount; /* Refcount for slab cache destroy */ | |
42 | void (*ctor)(void *, struct kmem_cache *, unsigned long); | |
81819f0f CL |
43 | int inuse; /* Offset to metadata */ |
44 | int align; /* Alignment */ | |
45 | const char *name; /* Name (only for display!) */ | |
46 | struct list_head list; /* List of slab caches */ | |
47 | struct kobject kobj; /* For sysfs */ | |
48 | ||
49 | #ifdef CONFIG_NUMA | |
50 | int defrag_ratio; | |
51 | struct kmem_cache_node *node[MAX_NUMNODES]; | |
52 | #endif | |
53 | struct page *cpu_slab[NR_CPUS]; | |
54 | }; | |
55 | ||
56 | /* | |
57 | * Kmalloc subsystem. | |
58 | */ | |
4b356be0 CL |
59 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 |
60 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN | |
61 | #else | |
62 | #define KMALLOC_MIN_SIZE 8 | |
63 | #endif | |
64 | ||
65 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | |
81819f0f | 66 | |
81819f0f CL |
67 | /* |
68 | * We keep the general caches in an array of slab caches that are used for | |
69 | * 2^x bytes of allocations. | |
70 | */ | |
71 | extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; | |
72 | ||
73 | /* | |
74 | * Sorry that the following has to be that ugly but some versions of GCC | |
75 | * have trouble with constant propagation and loops. | |
76 | */ | |
0aa817f0 | 77 | static inline int kmalloc_index(size_t size) |
81819f0f | 78 | { |
272c1d21 CL |
79 | if (!size) |
80 | return 0; | |
614410d5 | 81 | |
0aa817f0 | 82 | if (size > KMALLOC_MAX_SIZE) |
cfbf07f2 CL |
83 | return -1; |
84 | ||
4b356be0 CL |
85 | if (size <= KMALLOC_MIN_SIZE) |
86 | return KMALLOC_SHIFT_LOW; | |
87 | ||
81819f0f CL |
88 | if (size > 64 && size <= 96) |
89 | return 1; | |
90 | if (size > 128 && size <= 192) | |
91 | return 2; | |
92 | if (size <= 8) return 3; | |
93 | if (size <= 16) return 4; | |
94 | if (size <= 32) return 5; | |
95 | if (size <= 64) return 6; | |
96 | if (size <= 128) return 7; | |
97 | if (size <= 256) return 8; | |
98 | if (size <= 512) return 9; | |
99 | if (size <= 1024) return 10; | |
100 | if (size <= 2 * 1024) return 11; | |
101 | if (size <= 4 * 1024) return 12; | |
102 | if (size <= 8 * 1024) return 13; | |
103 | if (size <= 16 * 1024) return 14; | |
104 | if (size <= 32 * 1024) return 15; | |
105 | if (size <= 64 * 1024) return 16; | |
106 | if (size <= 128 * 1024) return 17; | |
107 | if (size <= 256 * 1024) return 18; | |
81819f0f CL |
108 | if (size <= 512 * 1024) return 19; |
109 | if (size <= 1024 * 1024) return 20; | |
81819f0f CL |
110 | if (size <= 2 * 1024 * 1024) return 21; |
111 | if (size <= 4 * 1024 * 1024) return 22; | |
112 | if (size <= 8 * 1024 * 1024) return 23; | |
113 | if (size <= 16 * 1024 * 1024) return 24; | |
114 | if (size <= 32 * 1024 * 1024) return 25; | |
81819f0f CL |
115 | return -1; |
116 | ||
117 | /* | |
118 | * What we really wanted to do and cannot do because of compiler issues is: | |
119 | * int i; | |
120 | * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | |
121 | * if (size <= (1 << i)) | |
122 | * return i; | |
123 | */ | |
124 | } | |
125 | ||
126 | /* | |
127 | * Find the slab cache for a given combination of allocation flags and size. | |
128 | * | |
129 | * This ought to end up with a global pointer to the right cache | |
130 | * in kmalloc_caches. | |
131 | */ | |
132 | static inline struct kmem_cache *kmalloc_slab(size_t size) | |
133 | { | |
134 | int index = kmalloc_index(size); | |
135 | ||
136 | if (index == 0) | |
137 | return NULL; | |
138 | ||
ade3aff2 AM |
139 | /* |
140 | * This function only gets expanded if __builtin_constant_p(size), so | |
141 | * testing it here shouldn't be needed. But some versions of gcc need | |
142 | * help. | |
143 | */ | |
144 | if (__builtin_constant_p(size) && index < 0) { | |
81819f0f CL |
145 | /* |
146 | * Generate a link failure. Would be great if we could | |
147 | * do something to stop the compile here. | |
148 | */ | |
149 | extern void __kmalloc_size_too_large(void); | |
150 | __kmalloc_size_too_large(); | |
151 | } | |
152 | return &kmalloc_caches[index]; | |
153 | } | |
154 | ||
155 | #ifdef CONFIG_ZONE_DMA | |
156 | #define SLUB_DMA __GFP_DMA | |
157 | #else | |
158 | /* Disable DMA functionality */ | |
159 | #define SLUB_DMA 0 | |
160 | #endif | |
161 | ||
6193a2ff PM |
162 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
163 | void *__kmalloc(size_t size, gfp_t flags); | |
164 | ||
81819f0f CL |
165 | static inline void *kmalloc(size_t size, gfp_t flags) |
166 | { | |
167 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | |
168 | struct kmem_cache *s = kmalloc_slab(size); | |
169 | ||
170 | if (!s) | |
272c1d21 | 171 | return ZERO_SIZE_PTR; |
81819f0f CL |
172 | |
173 | return kmem_cache_alloc(s, flags); | |
174 | } else | |
175 | return __kmalloc(size, flags); | |
176 | } | |
177 | ||
178 | static inline void *kzalloc(size_t size, gfp_t flags) | |
179 | { | |
180 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | |
181 | struct kmem_cache *s = kmalloc_slab(size); | |
182 | ||
183 | if (!s) | |
272c1d21 | 184 | return ZERO_SIZE_PTR; |
81819f0f CL |
185 | |
186 | return kmem_cache_zalloc(s, flags); | |
187 | } else | |
188 | return __kzalloc(size, flags); | |
189 | } | |
190 | ||
191 | #ifdef CONFIG_NUMA | |
6193a2ff PM |
192 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
193 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
81819f0f CL |
194 | |
195 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |
196 | { | |
197 | if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { | |
198 | struct kmem_cache *s = kmalloc_slab(size); | |
199 | ||
200 | if (!s) | |
272c1d21 | 201 | return ZERO_SIZE_PTR; |
81819f0f CL |
202 | |
203 | return kmem_cache_alloc_node(s, flags, node); | |
204 | } else | |
205 | return __kmalloc_node(size, flags, node); | |
206 | } | |
207 | #endif | |
208 | ||
209 | #endif /* _LINUX_SLUB_DEF_H */ |