]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * 2002-10-18 written by Jim Houston [email protected] | |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | * | |
6 | * Modified by George Anzinger to reuse immediately and to use | |
7 | * find bit instructions. Also removed _irq on spinlocks. | |
8 | * | |
3219b3b7 ND |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * | |
e15ae2dd | 11 | * Small id to pointer translation service. |
1da177e4 | 12 | * |
e15ae2dd | 13 | * It uses a radix tree like structure as a sparse array indexed |
1da177e4 | 14 | * by the id to obtain the pointer. The bitmap makes allocating |
e15ae2dd | 15 | * a new id quick. |
1da177e4 LT |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a | |
18 | * pointer or what ever, we treat it as a (void *). You can pass this | |
19 | * id to a user for him to pass back at a later time. You then pass | |
20 | * that id to this code and it returns your pointer. | |
21 | ||
e15ae2dd | 22 | * You can release ids at any time. When all ids are released, most of |
1da177e4 | 23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
e15ae2dd | 24 | * don't need to go to the memory "store" during an id allocate, just |
1da177e4 LT |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. | |
27 | */ | |
28 | ||
29 | #ifndef TEST // to test in user space... | |
30 | #include <linux/slab.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/module.h> | |
33 | #endif | |
5806f07c | 34 | #include <linux/err.h> |
1da177e4 LT |
35 | #include <linux/string.h> |
36 | #include <linux/idr.h> | |
37 | ||
e18b890b | 38 | static struct kmem_cache *idr_layer_cache; |
1da177e4 | 39 | |
4ae53789 | 40 | static struct idr_layer *get_from_free_list(struct idr *idp) |
1da177e4 LT |
41 | { |
42 | struct idr_layer *p; | |
c259cc28 | 43 | unsigned long flags; |
1da177e4 | 44 | |
c259cc28 | 45 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
46 | if ((p = idp->id_free)) { |
47 | idp->id_free = p->ary[0]; | |
48 | idp->id_free_cnt--; | |
49 | p->ary[0] = NULL; | |
50 | } | |
c259cc28 | 51 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
52 | return(p); |
53 | } | |
54 | ||
cf481c20 ND |
55 | static void idr_layer_rcu_free(struct rcu_head *head) |
56 | { | |
57 | struct idr_layer *layer; | |
58 | ||
59 | layer = container_of(head, struct idr_layer, rcu_head); | |
60 | kmem_cache_free(idr_layer_cache, layer); | |
61 | } | |
62 | ||
63 | static inline void free_layer(struct idr_layer *p) | |
64 | { | |
65 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | |
66 | } | |
67 | ||
1eec0056 | 68 | /* only called when idp->lock is held */ |
4ae53789 | 69 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
1eec0056 SR |
70 | { |
71 | p->ary[0] = idp->id_free; | |
72 | idp->id_free = p; | |
73 | idp->id_free_cnt++; | |
74 | } | |
75 | ||
4ae53789 | 76 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
1da177e4 | 77 | { |
c259cc28 RD |
78 | unsigned long flags; |
79 | ||
1da177e4 LT |
80 | /* |
81 | * Depends on the return element being zeroed. | |
82 | */ | |
c259cc28 | 83 | spin_lock_irqsave(&idp->lock, flags); |
4ae53789 | 84 | __move_to_free_list(idp, p); |
c259cc28 | 85 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
86 | } |
87 | ||
e33ac8bd TH |
88 | static void idr_mark_full(struct idr_layer **pa, int id) |
89 | { | |
90 | struct idr_layer *p = pa[0]; | |
91 | int l = 0; | |
92 | ||
93 | __set_bit(id & IDR_MASK, &p->bitmap); | |
94 | /* | |
95 | * If this layer is full mark the bit in the layer above to | |
96 | * show that this part of the radix tree is full. This may | |
97 | * complete the layer above and require walking up the radix | |
98 | * tree. | |
99 | */ | |
100 | while (p->bitmap == IDR_FULL) { | |
101 | if (!(p = pa[++l])) | |
102 | break; | |
103 | id = id >> IDR_BITS; | |
104 | __set_bit((id & IDR_MASK), &p->bitmap); | |
105 | } | |
106 | } | |
107 | ||
1da177e4 LT |
108 | /** |
109 | * idr_pre_get - reserver resources for idr allocation | |
110 | * @idp: idr handle | |
111 | * @gfp_mask: memory allocation flags | |
112 | * | |
113 | * This function should be called prior to locking and calling the | |
3219b3b7 | 114 | * idr_get_new* functions. It preallocates enough memory to satisfy |
1da177e4 LT |
115 | * the worst possible allocation. |
116 | * | |
117 | * If the system is REALLY out of memory this function returns 0, | |
118 | * otherwise 1. | |
119 | */ | |
fd4f2df2 | 120 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
1da177e4 LT |
121 | { |
122 | while (idp->id_free_cnt < IDR_FREE_MAX) { | |
123 | struct idr_layer *new; | |
124 | new = kmem_cache_alloc(idr_layer_cache, gfp_mask); | |
e15ae2dd | 125 | if (new == NULL) |
1da177e4 | 126 | return (0); |
4ae53789 | 127 | move_to_free_list(idp, new); |
1da177e4 LT |
128 | } |
129 | return 1; | |
130 | } | |
131 | EXPORT_SYMBOL(idr_pre_get); | |
132 | ||
e33ac8bd | 133 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
1da177e4 LT |
134 | { |
135 | int n, m, sh; | |
136 | struct idr_layer *p, *new; | |
7aae6dd8 | 137 | int l, id, oid; |
5ba25331 | 138 | unsigned long bm; |
1da177e4 LT |
139 | |
140 | id = *starting_id; | |
7aae6dd8 | 141 | restart: |
1da177e4 LT |
142 | p = idp->top; |
143 | l = idp->layers; | |
144 | pa[l--] = NULL; | |
145 | while (1) { | |
146 | /* | |
147 | * We run around this while until we reach the leaf node... | |
148 | */ | |
149 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | |
150 | bm = ~p->bitmap; | |
151 | m = find_next_bit(&bm, IDR_SIZE, n); | |
152 | if (m == IDR_SIZE) { | |
153 | /* no space available go back to previous layer. */ | |
154 | l++; | |
7aae6dd8 | 155 | oid = id; |
e15ae2dd | 156 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
7aae6dd8 TH |
157 | |
158 | /* if already at the top layer, we need to grow */ | |
1da177e4 LT |
159 | if (!(p = pa[l])) { |
160 | *starting_id = id; | |
944ca05c | 161 | return IDR_NEED_TO_GROW; |
1da177e4 | 162 | } |
7aae6dd8 TH |
163 | |
164 | /* If we need to go up one layer, continue the | |
165 | * loop; otherwise, restart from the top. | |
166 | */ | |
167 | sh = IDR_BITS * (l + 1); | |
168 | if (oid >> sh == id >> sh) | |
169 | continue; | |
170 | else | |
171 | goto restart; | |
1da177e4 LT |
172 | } |
173 | if (m != n) { | |
174 | sh = IDR_BITS*l; | |
175 | id = ((id >> sh) ^ n ^ m) << sh; | |
176 | } | |
177 | if ((id >= MAX_ID_BIT) || (id < 0)) | |
944ca05c | 178 | return IDR_NOMORE_SPACE; |
1da177e4 LT |
179 | if (l == 0) |
180 | break; | |
181 | /* | |
182 | * Create the layer below if it is missing. | |
183 | */ | |
184 | if (!p->ary[m]) { | |
4ae53789 ND |
185 | new = get_from_free_list(idp); |
186 | if (!new) | |
1da177e4 | 187 | return -1; |
3219b3b7 | 188 | rcu_assign_pointer(p->ary[m], new); |
1da177e4 LT |
189 | p->count++; |
190 | } | |
191 | pa[l--] = p; | |
192 | p = p->ary[m]; | |
193 | } | |
e33ac8bd TH |
194 | |
195 | pa[l] = p; | |
196 | return id; | |
1da177e4 LT |
197 | } |
198 | ||
e33ac8bd TH |
199 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
200 | struct idr_layer **pa) | |
1da177e4 LT |
201 | { |
202 | struct idr_layer *p, *new; | |
203 | int layers, v, id; | |
c259cc28 | 204 | unsigned long flags; |
e15ae2dd | 205 | |
1da177e4 LT |
206 | id = starting_id; |
207 | build_up: | |
208 | p = idp->top; | |
209 | layers = idp->layers; | |
210 | if (unlikely(!p)) { | |
4ae53789 | 211 | if (!(p = get_from_free_list(idp))) |
1da177e4 LT |
212 | return -1; |
213 | layers = 1; | |
214 | } | |
215 | /* | |
216 | * Add a new layer to the top of the tree if the requested | |
217 | * id is larger than the currently allocated space. | |
218 | */ | |
589777ea | 219 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
1da177e4 LT |
220 | layers++; |
221 | if (!p->count) | |
222 | continue; | |
4ae53789 | 223 | if (!(new = get_from_free_list(idp))) { |
1da177e4 LT |
224 | /* |
225 | * The allocation failed. If we built part of | |
226 | * the structure tear it down. | |
227 | */ | |
c259cc28 | 228 | spin_lock_irqsave(&idp->lock, flags); |
1da177e4 LT |
229 | for (new = p; p && p != idp->top; new = p) { |
230 | p = p->ary[0]; | |
231 | new->ary[0] = NULL; | |
232 | new->bitmap = new->count = 0; | |
4ae53789 | 233 | __move_to_free_list(idp, new); |
1da177e4 | 234 | } |
c259cc28 | 235 | spin_unlock_irqrestore(&idp->lock, flags); |
1da177e4 LT |
236 | return -1; |
237 | } | |
238 | new->ary[0] = p; | |
239 | new->count = 1; | |
240 | if (p->bitmap == IDR_FULL) | |
241 | __set_bit(0, &new->bitmap); | |
242 | p = new; | |
243 | } | |
3219b3b7 | 244 | rcu_assign_pointer(idp->top, p); |
1da177e4 | 245 | idp->layers = layers; |
e33ac8bd | 246 | v = sub_alloc(idp, &id, pa); |
944ca05c | 247 | if (v == IDR_NEED_TO_GROW) |
1da177e4 LT |
248 | goto build_up; |
249 | return(v); | |
250 | } | |
251 | ||
e33ac8bd TH |
252 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
253 | { | |
254 | struct idr_layer *pa[MAX_LEVEL]; | |
255 | int id; | |
256 | ||
257 | id = idr_get_empty_slot(idp, starting_id, pa); | |
258 | if (id >= 0) { | |
259 | /* | |
260 | * Successfully found an empty slot. Install the user | |
261 | * pointer and mark the slot full. | |
262 | */ | |
3219b3b7 ND |
263 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
264 | (struct idr_layer *)ptr); | |
e33ac8bd TH |
265 | pa[0]->count++; |
266 | idr_mark_full(pa, id); | |
267 | } | |
268 | ||
269 | return id; | |
270 | } | |
271 | ||
1da177e4 | 272 | /** |
7c657f2f | 273 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
1da177e4 LT |
274 | * @idp: idr handle |
275 | * @ptr: pointer you want associated with the ide | |
276 | * @start_id: id to start search at | |
277 | * @id: pointer to the allocated handle | |
278 | * | |
279 | * This is the allocate id function. It should be called with any | |
280 | * required locks. | |
281 | * | |
282 | * If memory is required, it will return -EAGAIN, you should unlock | |
283 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
284 | * return -ENOSPC. | |
285 | * | |
286 | * @id returns a value in the range 0 ... 0x7fffffff | |
287 | */ | |
288 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |
289 | { | |
290 | int rv; | |
e15ae2dd | 291 | |
1da177e4 LT |
292 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
293 | /* | |
294 | * This is a cheap hack until the IDR code can be fixed to | |
295 | * return proper error values. | |
296 | */ | |
944ca05c ND |
297 | if (rv < 0) |
298 | return _idr_rc_to_errno(rv); | |
1da177e4 LT |
299 | *id = rv; |
300 | return 0; | |
301 | } | |
302 | EXPORT_SYMBOL(idr_get_new_above); | |
303 | ||
304 | /** | |
305 | * idr_get_new - allocate new idr entry | |
306 | * @idp: idr handle | |
307 | * @ptr: pointer you want associated with the ide | |
308 | * @id: pointer to the allocated handle | |
309 | * | |
310 | * This is the allocate id function. It should be called with any | |
311 | * required locks. | |
312 | * | |
313 | * If memory is required, it will return -EAGAIN, you should unlock | |
314 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
315 | * return -ENOSPC. | |
316 | * | |
317 | * @id returns a value in the range 0 ... 0x7fffffff | |
318 | */ | |
319 | int idr_get_new(struct idr *idp, void *ptr, int *id) | |
320 | { | |
321 | int rv; | |
e15ae2dd | 322 | |
1da177e4 LT |
323 | rv = idr_get_new_above_int(idp, ptr, 0); |
324 | /* | |
325 | * This is a cheap hack until the IDR code can be fixed to | |
326 | * return proper error values. | |
327 | */ | |
944ca05c ND |
328 | if (rv < 0) |
329 | return _idr_rc_to_errno(rv); | |
1da177e4 LT |
330 | *id = rv; |
331 | return 0; | |
332 | } | |
333 | EXPORT_SYMBOL(idr_get_new); | |
334 | ||
335 | static void idr_remove_warning(int id) | |
336 | { | |
f098ad65 ND |
337 | printk(KERN_WARNING |
338 | "idr_remove called for id=%d which is not allocated.\n", id); | |
1da177e4 LT |
339 | dump_stack(); |
340 | } | |
341 | ||
342 | static void sub_remove(struct idr *idp, int shift, int id) | |
343 | { | |
344 | struct idr_layer *p = idp->top; | |
345 | struct idr_layer **pa[MAX_LEVEL]; | |
346 | struct idr_layer ***paa = &pa[0]; | |
cf481c20 | 347 | struct idr_layer *to_free; |
1da177e4 LT |
348 | int n; |
349 | ||
350 | *paa = NULL; | |
351 | *++paa = &idp->top; | |
352 | ||
353 | while ((shift > 0) && p) { | |
354 | n = (id >> shift) & IDR_MASK; | |
355 | __clear_bit(n, &p->bitmap); | |
356 | *++paa = &p->ary[n]; | |
357 | p = p->ary[n]; | |
358 | shift -= IDR_BITS; | |
359 | } | |
360 | n = id & IDR_MASK; | |
361 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | |
362 | __clear_bit(n, &p->bitmap); | |
cf481c20 ND |
363 | rcu_assign_pointer(p->ary[n], NULL); |
364 | to_free = NULL; | |
1da177e4 | 365 | while(*paa && ! --((**paa)->count)){ |
cf481c20 ND |
366 | if (to_free) |
367 | free_layer(to_free); | |
368 | to_free = **paa; | |
1da177e4 LT |
369 | **paa-- = NULL; |
370 | } | |
e15ae2dd | 371 | if (!*paa) |
1da177e4 | 372 | idp->layers = 0; |
cf481c20 ND |
373 | if (to_free) |
374 | free_layer(to_free); | |
e15ae2dd | 375 | } else |
1da177e4 | 376 | idr_remove_warning(id); |
1da177e4 LT |
377 | } |
378 | ||
379 | /** | |
380 | * idr_remove - remove the given id and free it's slot | |
72fd4a35 RD |
381 | * @idp: idr handle |
382 | * @id: unique key | |
1da177e4 LT |
383 | */ |
384 | void idr_remove(struct idr *idp, int id) | |
385 | { | |
386 | struct idr_layer *p; | |
cf481c20 | 387 | struct idr_layer *to_free; |
1da177e4 LT |
388 | |
389 | /* Mask off upper bits we don't use for the search. */ | |
390 | id &= MAX_ID_MASK; | |
391 | ||
392 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | |
e15ae2dd | 393 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
cf481c20 ND |
394 | idp->top->ary[0]) { |
395 | /* | |
396 | * Single child at leftmost slot: we can shrink the tree. | |
397 | * This level is not needed anymore since when layers are | |
398 | * inserted, they are inserted at the top of the existing | |
399 | * tree. | |
400 | */ | |
401 | to_free = idp->top; | |
1da177e4 | 402 | p = idp->top->ary[0]; |
cf481c20 | 403 | rcu_assign_pointer(idp->top, p); |
1da177e4 | 404 | --idp->layers; |
cf481c20 ND |
405 | to_free->bitmap = to_free->count = 0; |
406 | free_layer(to_free); | |
1da177e4 LT |
407 | } |
408 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | |
4ae53789 | 409 | p = get_from_free_list(idp); |
cf481c20 ND |
410 | /* |
411 | * Note: we don't call the rcu callback here, since the only | |
412 | * layers that fall into the freelist are those that have been | |
413 | * preallocated. | |
414 | */ | |
1da177e4 | 415 | kmem_cache_free(idr_layer_cache, p); |
1da177e4 | 416 | } |
af8e2a4c | 417 | return; |
1da177e4 LT |
418 | } |
419 | EXPORT_SYMBOL(idr_remove); | |
420 | ||
23936cc0 KH |
421 | /** |
422 | * idr_remove_all - remove all ids from the given idr tree | |
423 | * @idp: idr handle | |
424 | * | |
425 | * idr_destroy() only frees up unused, cached idp_layers, but this | |
426 | * function will remove all id mappings and leave all idp_layers | |
427 | * unused. | |
428 | * | |
429 | * A typical clean-up sequence for objects stored in an idr tree, will | |
430 | * use idr_for_each() to free all objects, if necessay, then | |
431 | * idr_remove_all() to remove all ids, and idr_destroy() to free | |
432 | * up the cached idr_layers. | |
433 | */ | |
434 | void idr_remove_all(struct idr *idp) | |
435 | { | |
6ace06dc | 436 | int n, id, max; |
23936cc0 KH |
437 | struct idr_layer *p; |
438 | struct idr_layer *pa[MAX_LEVEL]; | |
439 | struct idr_layer **paa = &pa[0]; | |
440 | ||
441 | n = idp->layers * IDR_BITS; | |
442 | p = idp->top; | |
443 | max = 1 << n; | |
444 | ||
445 | id = 0; | |
6ace06dc | 446 | while (id < max) { |
23936cc0 KH |
447 | while (n > IDR_BITS && p) { |
448 | n -= IDR_BITS; | |
449 | *paa++ = p; | |
450 | p = p->ary[(id >> n) & IDR_MASK]; | |
451 | } | |
452 | ||
453 | id += 1 << n; | |
454 | while (n < fls(id)) { | |
cf481c20 ND |
455 | if (p) |
456 | free_layer(p); | |
23936cc0 KH |
457 | n += IDR_BITS; |
458 | p = *--paa; | |
459 | } | |
460 | } | |
cf481c20 | 461 | rcu_assign_pointer(idp->top, NULL); |
23936cc0 KH |
462 | idp->layers = 0; |
463 | } | |
464 | EXPORT_SYMBOL(idr_remove_all); | |
465 | ||
8d3b3591 AM |
466 | /** |
467 | * idr_destroy - release all cached layers within an idr tree | |
468 | * idp: idr handle | |
469 | */ | |
470 | void idr_destroy(struct idr *idp) | |
471 | { | |
472 | while (idp->id_free_cnt) { | |
4ae53789 | 473 | struct idr_layer *p = get_from_free_list(idp); |
8d3b3591 AM |
474 | kmem_cache_free(idr_layer_cache, p); |
475 | } | |
476 | } | |
477 | EXPORT_SYMBOL(idr_destroy); | |
478 | ||
1da177e4 LT |
479 | /** |
480 | * idr_find - return pointer for given id | |
481 | * @idp: idr handle | |
482 | * @id: lookup key | |
483 | * | |
484 | * Return the pointer given the id it has been registered with. A %NULL | |
485 | * return indicates that @id is not valid or you passed %NULL in | |
486 | * idr_get_new(). | |
487 | * | |
f9c46d6e ND |
488 | * This function can be called under rcu_read_lock(), given that the leaf |
489 | * pointers lifetimes are correctly managed. | |
1da177e4 LT |
490 | */ |
491 | void *idr_find(struct idr *idp, int id) | |
492 | { | |
493 | int n; | |
494 | struct idr_layer *p; | |
495 | ||
496 | n = idp->layers * IDR_BITS; | |
f9c46d6e | 497 | p = rcu_dereference(idp->top); |
1da177e4 LT |
498 | |
499 | /* Mask off upper bits we don't use for the search. */ | |
500 | id &= MAX_ID_MASK; | |
501 | ||
502 | if (id >= (1 << n)) | |
503 | return NULL; | |
504 | ||
505 | while (n > 0 && p) { | |
506 | n -= IDR_BITS; | |
f9c46d6e | 507 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
1da177e4 LT |
508 | } |
509 | return((void *)p); | |
510 | } | |
511 | EXPORT_SYMBOL(idr_find); | |
512 | ||
96d7fa42 KH |
513 | /** |
514 | * idr_for_each - iterate through all stored pointers | |
515 | * @idp: idr handle | |
516 | * @fn: function to be called for each pointer | |
517 | * @data: data passed back to callback function | |
518 | * | |
519 | * Iterate over the pointers registered with the given idr. The | |
520 | * callback function will be called for each pointer currently | |
521 | * registered, passing the id, the pointer and the data pointer passed | |
522 | * to this function. It is not safe to modify the idr tree while in | |
523 | * the callback, so functions such as idr_get_new and idr_remove are | |
524 | * not allowed. | |
525 | * | |
526 | * We check the return of @fn each time. If it returns anything other | |
527 | * than 0, we break out and return that value. | |
528 | * | |
529 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | |
530 | */ | |
531 | int idr_for_each(struct idr *idp, | |
532 | int (*fn)(int id, void *p, void *data), void *data) | |
533 | { | |
534 | int n, id, max, error = 0; | |
535 | struct idr_layer *p; | |
536 | struct idr_layer *pa[MAX_LEVEL]; | |
537 | struct idr_layer **paa = &pa[0]; | |
538 | ||
539 | n = idp->layers * IDR_BITS; | |
f9c46d6e | 540 | p = rcu_dereference(idp->top); |
96d7fa42 KH |
541 | max = 1 << n; |
542 | ||
543 | id = 0; | |
544 | while (id < max) { | |
545 | while (n > 0 && p) { | |
546 | n -= IDR_BITS; | |
547 | *paa++ = p; | |
f9c46d6e | 548 | p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); |
96d7fa42 KH |
549 | } |
550 | ||
551 | if (p) { | |
552 | error = fn(id, (void *)p, data); | |
553 | if (error) | |
554 | break; | |
555 | } | |
556 | ||
557 | id += 1 << n; | |
558 | while (n < fls(id)) { | |
559 | n += IDR_BITS; | |
560 | p = *--paa; | |
561 | } | |
562 | } | |
563 | ||
564 | return error; | |
565 | } | |
566 | EXPORT_SYMBOL(idr_for_each); | |
567 | ||
5806f07c JM |
568 | /** |
569 | * idr_replace - replace pointer for given id | |
570 | * @idp: idr handle | |
571 | * @ptr: pointer you want associated with the id | |
572 | * @id: lookup key | |
573 | * | |
574 | * Replace the pointer registered with an id and return the old value. | |
575 | * A -ENOENT return indicates that @id was not found. | |
576 | * A -EINVAL return indicates that @id was not within valid constraints. | |
577 | * | |
cf481c20 | 578 | * The caller must serialize with writers. |
5806f07c JM |
579 | */ |
580 | void *idr_replace(struct idr *idp, void *ptr, int id) | |
581 | { | |
582 | int n; | |
583 | struct idr_layer *p, *old_p; | |
584 | ||
585 | n = idp->layers * IDR_BITS; | |
586 | p = idp->top; | |
587 | ||
588 | id &= MAX_ID_MASK; | |
589 | ||
590 | if (id >= (1 << n)) | |
591 | return ERR_PTR(-EINVAL); | |
592 | ||
593 | n -= IDR_BITS; | |
594 | while ((n > 0) && p) { | |
595 | p = p->ary[(id >> n) & IDR_MASK]; | |
596 | n -= IDR_BITS; | |
597 | } | |
598 | ||
599 | n = id & IDR_MASK; | |
600 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | |
601 | return ERR_PTR(-ENOENT); | |
602 | ||
603 | old_p = p->ary[n]; | |
cf481c20 | 604 | rcu_assign_pointer(p->ary[n], ptr); |
5806f07c JM |
605 | |
606 | return old_p; | |
607 | } | |
608 | EXPORT_SYMBOL(idr_replace); | |
609 | ||
51cc5068 | 610 | static void idr_cache_ctor(void *idr_layer) |
1da177e4 LT |
611 | { |
612 | memset(idr_layer, 0, sizeof(struct idr_layer)); | |
613 | } | |
614 | ||
199f0ca5 | 615 | void __init idr_init_cache(void) |
1da177e4 | 616 | { |
199f0ca5 AM |
617 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
618 | sizeof(struct idr_layer), 0, SLAB_PANIC, | |
619 | idr_cache_ctor); | |
1da177e4 LT |
620 | } |
621 | ||
622 | /** | |
623 | * idr_init - initialize idr handle | |
624 | * @idp: idr handle | |
625 | * | |
626 | * This function is use to set up the handle (@idp) that you will pass | |
627 | * to the rest of the functions. | |
628 | */ | |
629 | void idr_init(struct idr *idp) | |
630 | { | |
1da177e4 LT |
631 | memset(idp, 0, sizeof(struct idr)); |
632 | spin_lock_init(&idp->lock); | |
633 | } | |
634 | EXPORT_SYMBOL(idr_init); | |
72dba584 TH |
635 | |
636 | ||
637 | /* | |
638 | * IDA - IDR based ID allocator | |
639 | * | |
640 | * this is id allocator without id -> pointer translation. Memory | |
641 | * usage is much lower than full blown idr because each id only | |
642 | * occupies a bit. ida uses a custom leaf node which contains | |
643 | * IDA_BITMAP_BITS slots. | |
644 | * | |
645 | * 2007-04-25 written by Tejun Heo <[email protected]> | |
646 | */ | |
647 | ||
648 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |
649 | { | |
650 | unsigned long flags; | |
651 | ||
652 | if (!ida->free_bitmap) { | |
653 | spin_lock_irqsave(&ida->idr.lock, flags); | |
654 | if (!ida->free_bitmap) { | |
655 | ida->free_bitmap = bitmap; | |
656 | bitmap = NULL; | |
657 | } | |
658 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
659 | } | |
660 | ||
661 | kfree(bitmap); | |
662 | } | |
663 | ||
664 | /** | |
665 | * ida_pre_get - reserve resources for ida allocation | |
666 | * @ida: ida handle | |
667 | * @gfp_mask: memory allocation flag | |
668 | * | |
669 | * This function should be called prior to locking and calling the | |
670 | * following function. It preallocates enough memory to satisfy the | |
671 | * worst possible allocation. | |
672 | * | |
673 | * If the system is REALLY out of memory this function returns 0, | |
674 | * otherwise 1. | |
675 | */ | |
676 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | |
677 | { | |
678 | /* allocate idr_layers */ | |
679 | if (!idr_pre_get(&ida->idr, gfp_mask)) | |
680 | return 0; | |
681 | ||
682 | /* allocate free_bitmap */ | |
683 | if (!ida->free_bitmap) { | |
684 | struct ida_bitmap *bitmap; | |
685 | ||
686 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | |
687 | if (!bitmap) | |
688 | return 0; | |
689 | ||
690 | free_bitmap(ida, bitmap); | |
691 | } | |
692 | ||
693 | return 1; | |
694 | } | |
695 | EXPORT_SYMBOL(ida_pre_get); | |
696 | ||
697 | /** | |
698 | * ida_get_new_above - allocate new ID above or equal to a start id | |
699 | * @ida: ida handle | |
700 | * @staring_id: id to start search at | |
701 | * @p_id: pointer to the allocated handle | |
702 | * | |
703 | * Allocate new ID above or equal to @ida. It should be called with | |
704 | * any required locks. | |
705 | * | |
706 | * If memory is required, it will return -EAGAIN, you should unlock | |
707 | * and go back to the ida_pre_get() call. If the ida is full, it will | |
708 | * return -ENOSPC. | |
709 | * | |
710 | * @p_id returns a value in the range 0 ... 0x7fffffff. | |
711 | */ | |
712 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |
713 | { | |
714 | struct idr_layer *pa[MAX_LEVEL]; | |
715 | struct ida_bitmap *bitmap; | |
716 | unsigned long flags; | |
717 | int idr_id = starting_id / IDA_BITMAP_BITS; | |
718 | int offset = starting_id % IDA_BITMAP_BITS; | |
719 | int t, id; | |
720 | ||
721 | restart: | |
722 | /* get vacant slot */ | |
723 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | |
944ca05c ND |
724 | if (t < 0) |
725 | return _idr_rc_to_errno(t); | |
72dba584 TH |
726 | |
727 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | |
728 | return -ENOSPC; | |
729 | ||
730 | if (t != idr_id) | |
731 | offset = 0; | |
732 | idr_id = t; | |
733 | ||
734 | /* if bitmap isn't there, create a new one */ | |
735 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | |
736 | if (!bitmap) { | |
737 | spin_lock_irqsave(&ida->idr.lock, flags); | |
738 | bitmap = ida->free_bitmap; | |
739 | ida->free_bitmap = NULL; | |
740 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
741 | ||
742 | if (!bitmap) | |
743 | return -EAGAIN; | |
744 | ||
745 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | |
3219b3b7 ND |
746 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
747 | (void *)bitmap); | |
72dba584 TH |
748 | pa[0]->count++; |
749 | } | |
750 | ||
751 | /* lookup for empty slot */ | |
752 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | |
753 | if (t == IDA_BITMAP_BITS) { | |
754 | /* no empty slot after offset, continue to the next chunk */ | |
755 | idr_id++; | |
756 | offset = 0; | |
757 | goto restart; | |
758 | } | |
759 | ||
760 | id = idr_id * IDA_BITMAP_BITS + t; | |
761 | if (id >= MAX_ID_BIT) | |
762 | return -ENOSPC; | |
763 | ||
764 | __set_bit(t, bitmap->bitmap); | |
765 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | |
766 | idr_mark_full(pa, idr_id); | |
767 | ||
768 | *p_id = id; | |
769 | ||
770 | /* Each leaf node can handle nearly a thousand slots and the | |
771 | * whole idea of ida is to have small memory foot print. | |
772 | * Throw away extra resources one by one after each successful | |
773 | * allocation. | |
774 | */ | |
775 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | |
4ae53789 | 776 | struct idr_layer *p = get_from_free_list(&ida->idr); |
72dba584 TH |
777 | if (p) |
778 | kmem_cache_free(idr_layer_cache, p); | |
779 | } | |
780 | ||
781 | return 0; | |
782 | } | |
783 | EXPORT_SYMBOL(ida_get_new_above); | |
784 | ||
785 | /** | |
786 | * ida_get_new - allocate new ID | |
787 | * @ida: idr handle | |
788 | * @p_id: pointer to the allocated handle | |
789 | * | |
790 | * Allocate new ID. It should be called with any required locks. | |
791 | * | |
792 | * If memory is required, it will return -EAGAIN, you should unlock | |
793 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
794 | * return -ENOSPC. | |
795 | * | |
796 | * @id returns a value in the range 0 ... 0x7fffffff. | |
797 | */ | |
798 | int ida_get_new(struct ida *ida, int *p_id) | |
799 | { | |
800 | return ida_get_new_above(ida, 0, p_id); | |
801 | } | |
802 | EXPORT_SYMBOL(ida_get_new); | |
803 | ||
804 | /** | |
805 | * ida_remove - remove the given ID | |
806 | * @ida: ida handle | |
807 | * @id: ID to free | |
808 | */ | |
809 | void ida_remove(struct ida *ida, int id) | |
810 | { | |
811 | struct idr_layer *p = ida->idr.top; | |
812 | int shift = (ida->idr.layers - 1) * IDR_BITS; | |
813 | int idr_id = id / IDA_BITMAP_BITS; | |
814 | int offset = id % IDA_BITMAP_BITS; | |
815 | int n; | |
816 | struct ida_bitmap *bitmap; | |
817 | ||
818 | /* clear full bits while looking up the leaf idr_layer */ | |
819 | while ((shift > 0) && p) { | |
820 | n = (idr_id >> shift) & IDR_MASK; | |
821 | __clear_bit(n, &p->bitmap); | |
822 | p = p->ary[n]; | |
823 | shift -= IDR_BITS; | |
824 | } | |
825 | ||
826 | if (p == NULL) | |
827 | goto err; | |
828 | ||
829 | n = idr_id & IDR_MASK; | |
830 | __clear_bit(n, &p->bitmap); | |
831 | ||
832 | bitmap = (void *)p->ary[n]; | |
833 | if (!test_bit(offset, bitmap->bitmap)) | |
834 | goto err; | |
835 | ||
836 | /* update bitmap and remove it if empty */ | |
837 | __clear_bit(offset, bitmap->bitmap); | |
838 | if (--bitmap->nr_busy == 0) { | |
839 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | |
840 | idr_remove(&ida->idr, idr_id); | |
841 | free_bitmap(ida, bitmap); | |
842 | } | |
843 | ||
844 | return; | |
845 | ||
846 | err: | |
847 | printk(KERN_WARNING | |
848 | "ida_remove called for id=%d which is not allocated.\n", id); | |
849 | } | |
850 | EXPORT_SYMBOL(ida_remove); | |
851 | ||
852 | /** | |
853 | * ida_destroy - release all cached layers within an ida tree | |
854 | * ida: ida handle | |
855 | */ | |
856 | void ida_destroy(struct ida *ida) | |
857 | { | |
858 | idr_destroy(&ida->idr); | |
859 | kfree(ida->free_bitmap); | |
860 | } | |
861 | EXPORT_SYMBOL(ida_destroy); | |
862 | ||
863 | /** | |
864 | * ida_init - initialize ida handle | |
865 | * @ida: ida handle | |
866 | * | |
867 | * This function is use to set up the handle (@ida) that you will pass | |
868 | * to the rest of the functions. | |
869 | */ | |
870 | void ida_init(struct ida *ida) | |
871 | { | |
872 | memset(ida, 0, sizeof(struct ida)); | |
873 | idr_init(&ida->idr); | |
874 | ||
875 | } | |
876 | EXPORT_SYMBOL(ida_init); |