]>
Commit | Line | Data |
---|---|---|
de6cc651 | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4 LT |
2 | /* |
3 | * Copyright (C) 2001 Momchil Velikov | |
4 | * Portions Copyright (C) 2001 Christoph Hellwig | |
7cf9c2c7 | 5 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
1da177e4 LT |
7 | */ |
8 | #ifndef _LINUX_RADIX_TREE_H | |
9 | #define _LINUX_RADIX_TREE_H | |
10 | ||
f67c07f0 | 11 | #include <linux/bitops.h> |
9f162193 | 12 | #include <linux/gfp_types.h> |
15f2e88d | 13 | #include <linux/list.h> |
98e1385e AS |
14 | #include <linux/lockdep.h> |
15 | #include <linux/math.h> | |
dd841a74 | 16 | #include <linux/percpu.h> |
15f2e88d | 17 | #include <linux/preempt.h> |
7cf9c2c7 | 18 | #include <linux/rcupdate.h> |
15f2e88d MW |
19 | #include <linux/spinlock.h> |
20 | #include <linux/types.h> | |
3159f943 | 21 | #include <linux/xarray.h> |
cfa6705d | 22 | #include <linux/local_lock.h> |
7cf9c2c7 | 23 | |
f8d5d0cc MW |
24 | /* Keep unconverted code working */ |
25 | #define radix_tree_root xarray | |
01959dfe | 26 | #define radix_tree_node xa_node |
f8d5d0cc | 27 | |
cfa6705d SAS |
28 | struct radix_tree_preload { |
29 | local_lock_t lock; | |
30 | unsigned nr; | |
31 | /* nodes->parent points to next preallocated node */ | |
32 | struct radix_tree_node *nodes; | |
33 | }; | |
34 | DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); | |
35 | ||
7cf9c2c7 | 36 | /* |
3bcadd6f MW |
37 | * The bottom two bits of the slot determine how the remaining bits in the |
38 | * slot are interpreted: | |
7cf9c2c7 | 39 | * |
3bcadd6f | 40 | * 00 - data pointer |
3159f943 MW |
41 | * 10 - internal entry |
42 | * x1 - value entry | |
3bcadd6f MW |
43 | * |
44 | * The internal entry may be a pointer to the next level in the tree, a | |
45 | * sibling entry, or an indicator that the entry in this slot has been moved | |
46 | * to another location in the tree and the lookup should be restarted. While | |
47 | * NULL fits the 'data pointer' pattern, it means that there is no entry in | |
48 | * the tree for this index (no matter what level of the tree it is found at). | |
3159f943 MW |
49 | * This means that storing a NULL entry in the tree is the same as deleting |
50 | * the entry from the tree. | |
7cf9c2c7 | 51 | */ |
3bcadd6f | 52 | #define RADIX_TREE_ENTRY_MASK 3UL |
3159f943 | 53 | #define RADIX_TREE_INTERNAL_NODE 2UL |
7cf9c2c7 | 54 | |
3bcadd6f | 55 | static inline bool radix_tree_is_internal_node(void *ptr) |
7cf9c2c7 | 56 | { |
3bcadd6f MW |
57 | return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == |
58 | RADIX_TREE_INTERNAL_NODE; | |
7cf9c2c7 NP |
59 | } |
60 | ||
61 | /*** radix-tree API starts here ***/ | |
1da177e4 | 62 | |
02c02bf1 | 63 | #define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT |
139e5616 JW |
64 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) |
65 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) | |
66 | ||
01959dfe MW |
67 | #define RADIX_TREE_MAX_TAGS XA_MAX_MARKS |
68 | #define RADIX_TREE_TAG_LONGS XA_MARK_LONGS | |
139e5616 | 69 | |
449dd698 JW |
70 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) |
71 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ | |
72 | RADIX_TREE_MAP_SHIFT)) | |
73 | ||
f8d5d0cc | 74 | /* The IDR tag is stored in the low bits of xa_flags */ |
fa290cda | 75 | #define ROOT_IS_IDR ((__force gfp_t)4) |
f8d5d0cc | 76 | /* The top bits of xa_flags are used to store the root tags */ |
fa290cda | 77 | #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) |
0a835c4f | 78 | |
f8d5d0cc | 79 | #define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask) |
1da177e4 LT |
80 | |
81 | #define RADIX_TREE(name, mask) \ | |
f6bb2a2c | 82 | struct radix_tree_root name = RADIX_TREE_INIT(name, mask) |
1da177e4 | 83 | |
f8d5d0cc | 84 | #define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask) |
1da177e4 | 85 | |
35534c86 | 86 | static inline bool radix_tree_empty(const struct radix_tree_root *root) |
e9256efc | 87 | { |
f8d5d0cc | 88 | return root->xa_head == NULL; |
e9256efc MW |
89 | } |
90 | ||
268f42de MW |
91 | /** |
92 | * struct radix_tree_iter - radix tree iterator state | |
93 | * | |
94 | * @index: index of current slot | |
95 | * @next_index: one beyond the last index for this chunk | |
96 | * @tags: bit-mask for tag-iterating | |
97 | * @node: node that contains current slot | |
268f42de MW |
98 | * |
99 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a | |
100 | * subinterval of slots contained within one radix tree leaf node. It is | |
101 | * described by a pointer to its first slot and a struct radix_tree_iter | |
102 | * which holds the chunk's position in the tree and its size. For tagged | |
103 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen | |
104 | * radix tree tag. | |
105 | */ | |
106 | struct radix_tree_iter { | |
107 | unsigned long index; | |
108 | unsigned long next_index; | |
109 | unsigned long tags; | |
110 | struct radix_tree_node *node; | |
268f42de MW |
111 | }; |
112 | ||
7cf9c2c7 NP |
113 | /** |
114 | * Radix-tree synchronization | |
115 | * | |
116 | * The radix-tree API requires that users provide all synchronisation (with | |
117 | * specific exceptions, noted below). | |
118 | * | |
119 | * Synchronization of access to the data items being stored in the tree, and | |
120 | * management of their lifetimes must be completely managed by API users. | |
121 | * | |
122 | * For API usage, in general, | |
59c51591 | 123 | * - any function _modifying_ the tree or tags (inserting or deleting |
eb8dc5e7 | 124 | * items, setting or clearing tags) must exclude other modifications, and |
7cf9c2c7 | 125 | * exclude any functions reading the tree. |
59c51591 | 126 | * - any function _reading_ the tree or tags (looking up items or tags, |
7cf9c2c7 NP |
127 | * gang lookups) must exclude modifications to the tree, but may occur |
128 | * concurrently with other readers. | |
129 | * | |
130 | * The notable exceptions to this rule are the following functions: | |
139e5616 | 131 | * __radix_tree_lookup |
7cf9c2c7 | 132 | * radix_tree_lookup |
47feff2c | 133 | * radix_tree_lookup_slot |
7cf9c2c7 NP |
134 | * radix_tree_tag_get |
135 | * radix_tree_gang_lookup | |
136 | * radix_tree_gang_lookup_tag | |
47feff2c | 137 | * radix_tree_gang_lookup_tag_slot |
7cf9c2c7 NP |
138 | * radix_tree_tagged |
139 | * | |
7b8d046f | 140 | * The first 7 functions are able to be called locklessly, using RCU. The |
7cf9c2c7 NP |
141 | * caller must ensure calls to these functions are made within rcu_read_lock() |
142 | * regions. Other readers (lock-free or otherwise) and modifications may be | |
143 | * running concurrently. | |
144 | * | |
145 | * It is still required that the caller manage the synchronization and lifetimes | |
146 | * of the items. So if RCU lock-free lookups are used, typically this would mean | |
147 | * that the items have their own locks, or are amenable to lock-free access; and | |
148 | * that the items are freed by RCU (or only freed after having been deleted from | |
149 | * the radix tree *and* a synchronize_rcu() grace period). | |
150 | * | |
151 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control | |
152 | * access to data items when inserting into or looking up from the radix tree) | |
153 | * | |
ce82653d DH |
154 | * Note that the value returned by radix_tree_tag_get() may not be relied upon |
155 | * if only the RCU read lock is held. Functions to set/clear tags and to | |
156 | * delete nodes running concurrently with it may affect its result such that | |
157 | * two consecutive reads in the same locked section may return different | |
158 | * values. If reliability is required, modification functions must also be | |
159 | * excluded from concurrency. | |
160 | * | |
7cf9c2c7 NP |
161 | * radix_tree_tagged is able to be called without locking or RCU. |
162 | */ | |
163 | ||
164 | /** | |
d7b62727 MW |
165 | * radix_tree_deref_slot - dereference a slot |
166 | * @slot: slot pointer, returned by radix_tree_lookup_slot | |
7cf9c2c7 NP |
167 | * |
168 | * For use with radix_tree_lookup_slot(). Caller must hold tree at least read | |
27d20fdd NP |
169 | * locked across slot lookup and dereference. Not required if write lock is |
170 | * held (ie. items cannot be concurrently inserted). | |
171 | * | |
172 | * radix_tree_deref_retry must be used to confirm validity of the pointer if | |
173 | * only the read lock is held. | |
d7b62727 MW |
174 | * |
175 | * Return: entry stored in that slot. | |
7cf9c2c7 | 176 | */ |
d7b62727 | 177 | static inline void *radix_tree_deref_slot(void __rcu **slot) |
7cf9c2c7 | 178 | { |
d7b62727 | 179 | return rcu_dereference(*slot); |
7cf9c2c7 | 180 | } |
27d20fdd | 181 | |
29c1f677 | 182 | /** |
d7b62727 MW |
183 | * radix_tree_deref_slot_protected - dereference a slot with tree lock held |
184 | * @slot: slot pointer, returned by radix_tree_lookup_slot | |
185 | * | |
186 | * Similar to radix_tree_deref_slot. The caller does not hold the RCU read | |
187 | * lock but it must hold the tree lock to prevent parallel updates. | |
29c1f677 | 188 | * |
d7b62727 | 189 | * Return: entry stored in that slot. |
29c1f677 | 190 | */ |
d7b62727 | 191 | static inline void *radix_tree_deref_slot_protected(void __rcu **slot, |
29c1f677 MG |
192 | spinlock_t *treelock) |
193 | { | |
d7b62727 | 194 | return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); |
29c1f677 MG |
195 | } |
196 | ||
27d20fdd NP |
197 | /** |
198 | * radix_tree_deref_retry - check radix_tree_deref_slot | |
199 | * @arg: pointer returned by radix_tree_deref_slot | |
200 | * Returns: 0 if retry is not required, otherwise retry is required | |
201 | * | |
202 | * radix_tree_deref_retry must be used with radix_tree_deref_slot. | |
203 | */ | |
204 | static inline int radix_tree_deref_retry(void *arg) | |
205 | { | |
b194d16c | 206 | return unlikely(radix_tree_is_internal_node(arg)); |
27d20fdd NP |
207 | } |
208 | ||
6328650b HD |
209 | /** |
210 | * radix_tree_exception - radix_tree_deref_slot returned either exception? | |
211 | * @arg: value returned by radix_tree_deref_slot | |
212 | * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. | |
213 | */ | |
214 | static inline int radix_tree_exception(void *arg) | |
215 | { | |
3bcadd6f | 216 | return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); |
6328650b HD |
217 | } |
218 | ||
3a08cd52 MW |
219 | int radix_tree_insert(struct radix_tree_root *, unsigned long index, |
220 | void *); | |
35534c86 | 221 | void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, |
d7b62727 | 222 | struct radix_tree_node **nodep, void __rcu ***slotp); |
35534c86 | 223 | void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); |
d7b62727 MW |
224 | void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, |
225 | unsigned long index); | |
d7b62727 | 226 | void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, |
1cf56f9d | 227 | void __rcu **slot, void *entry); |
e157b555 | 228 | void radix_tree_iter_replace(struct radix_tree_root *, |
d7b62727 MW |
229 | const struct radix_tree_iter *, void __rcu **slot, void *entry); |
230 | void radix_tree_replace_slot(struct radix_tree_root *, | |
231 | void __rcu **slot, void *entry); | |
0ac398ef | 232 | void radix_tree_iter_delete(struct radix_tree_root *, |
d7b62727 | 233 | struct radix_tree_iter *iter, void __rcu **slot); |
53c59f26 | 234 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); |
1da177e4 | 235 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); |
35534c86 | 236 | unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, |
d604c324 MW |
237 | void **results, unsigned long first_index, |
238 | unsigned int max_items); | |
dd0fc66f | 239 | int radix_tree_preload(gfp_t gfp_mask); |
5e4c0d97 | 240 | int radix_tree_maybe_preload(gfp_t gfp_mask); |
1da177e4 | 241 | void radix_tree_init(void); |
d7b62727 | 242 | void *radix_tree_tag_set(struct radix_tree_root *, |
daff89f3 | 243 | unsigned long index, unsigned int tag); |
d7b62727 | 244 | void *radix_tree_tag_clear(struct radix_tree_root *, |
daff89f3 | 245 | unsigned long index, unsigned int tag); |
35534c86 | 246 | int radix_tree_tag_get(const struct radix_tree_root *, |
daff89f3 | 247 | unsigned long index, unsigned int tag); |
30b888ba | 248 | void radix_tree_iter_tag_clear(struct radix_tree_root *, |
268f42de | 249 | const struct radix_tree_iter *iter, unsigned int tag); |
d7b62727 MW |
250 | unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, |
251 | void **results, unsigned long first_index, | |
252 | unsigned int max_items, unsigned int tag); | |
253 | unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, | |
254 | void __rcu ***results, unsigned long first_index, | |
255 | unsigned int max_items, unsigned int tag); | |
256 | int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); | |
1da177e4 LT |
257 | |
258 | static inline void radix_tree_preload_end(void) | |
259 | { | |
cfa6705d | 260 | local_unlock(&radix_tree_preloads.lock); |
1da177e4 LT |
261 | } |
262 | ||
460488c5 | 263 | void __rcu **idr_get_free(struct radix_tree_root *root, |
388f79fd CM |
264 | struct radix_tree_iter *iter, gfp_t gfp, |
265 | unsigned long max); | |
175542f5 | 266 | |
0a835c4f MW |
267 | enum { |
268 | RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ | |
269 | RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ | |
270 | RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ | |
271 | }; | |
78c1d784 KK |
272 | |
273 | /** | |
274 | * radix_tree_iter_init - initialize radix tree iterator | |
275 | * | |
276 | * @iter: pointer to iterator state | |
277 | * @start: iteration starting index | |
278 | * Returns: NULL | |
279 | */ | |
d7b62727 | 280 | static __always_inline void __rcu ** |
78c1d784 KK |
281 | radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) |
282 | { | |
283 | /* | |
284 | * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it | |
285 | * in the case of a successful tagged chunk lookup. If the lookup was | |
286 | * unsuccessful or non-tagged then nobody cares about ->tags. | |
287 | * | |
288 | * Set index to zero to bypass next_index overflow protection. | |
289 | * See the comment in radix_tree_next_chunk() for details. | |
290 | */ | |
291 | iter->index = 0; | |
292 | iter->next_index = start; | |
293 | return NULL; | |
294 | } | |
295 | ||
296 | /** | |
297 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
298 | * | |
299 | * @root: radix tree root | |
300 | * @iter: iterator state | |
301 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
302 | * Returns: pointer to chunk first slot, or NULL if there no more left | |
303 | * | |
304 | * This function looks up the next chunk in the radix tree starting from | |
305 | * @iter->next_index. It returns a pointer to the chunk's first slot. | |
306 | * Also it fills @iter with data about chunk: position in the tree (index), | |
307 | * its end (next_index), and constructs a bit mask for tagged iterating (tags). | |
308 | */ | |
d7b62727 | 309 | void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, |
78c1d784 KK |
310 | struct radix_tree_iter *iter, unsigned flags); |
311 | ||
0a835c4f MW |
312 | /** |
313 | * radix_tree_iter_lookup - look up an index in the radix tree | |
314 | * @root: radix tree root | |
315 | * @iter: iterator state | |
316 | * @index: key to look up | |
317 | * | |
318 | * If @index is present in the radix tree, this function returns the slot | |
319 | * containing it and updates @iter to describe the entry. If @index is not | |
320 | * present, it returns NULL. | |
321 | */ | |
d7b62727 MW |
322 | static inline void __rcu ** |
323 | radix_tree_iter_lookup(const struct radix_tree_root *root, | |
0a835c4f MW |
324 | struct radix_tree_iter *iter, unsigned long index) |
325 | { | |
326 | radix_tree_iter_init(iter, index); | |
327 | return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); | |
328 | } | |
329 | ||
46437f9a MW |
330 | /** |
331 | * radix_tree_iter_retry - retry this chunk of the iteration | |
332 | * @iter: iterator state | |
333 | * | |
334 | * If we iterate over a tree protected only by the RCU lock, a race | |
335 | * against deletion or creation may result in seeing a slot for which | |
336 | * radix_tree_deref_retry() returns true. If so, call this function | |
337 | * and continue the iteration. | |
338 | */ | |
339 | static inline __must_check | |
d7b62727 | 340 | void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) |
46437f9a MW |
341 | { |
342 | iter->next_index = iter->index; | |
3cb9185c | 343 | iter->tags = 0; |
46437f9a MW |
344 | return NULL; |
345 | } | |
346 | ||
21ef5339 RZ |
347 | static inline unsigned long |
348 | __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) | |
349 | { | |
3a08cd52 | 350 | return iter->index + slots; |
21ef5339 RZ |
351 | } |
352 | ||
7165092f | 353 | /** |
148deab2 MW |
354 | * radix_tree_iter_resume - resume iterating when the chunk may be invalid |
355 | * @slot: pointer to current slot | |
356 | * @iter: iterator state | |
357 | * Returns: New slot pointer | |
7165092f MW |
358 | * |
359 | * If the iterator needs to release then reacquire a lock, the chunk may | |
360 | * have been invalidated by an insertion or deletion. Call this function | |
148deab2 | 361 | * before releasing the lock to continue the iteration from the next index. |
7165092f | 362 | */ |
d7b62727 | 363 | void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, |
148deab2 | 364 | struct radix_tree_iter *iter); |
7165092f | 365 | |
78c1d784 KK |
366 | /** |
367 | * radix_tree_chunk_size - get current chunk size | |
368 | * | |
369 | * @iter: pointer to radix tree iterator | |
370 | * Returns: current chunk size | |
371 | */ | |
73204282 | 372 | static __always_inline long |
78c1d784 KK |
373 | radix_tree_chunk_size(struct radix_tree_iter *iter) |
374 | { | |
3a08cd52 | 375 | return iter->next_index - iter->index; |
78c1d784 KK |
376 | } |
377 | ||
378 | /** | |
379 | * radix_tree_next_slot - find next slot in chunk | |
380 | * | |
381 | * @slot: pointer to current slot | |
f78b8250 | 382 | * @iter: pointer to iterator state |
78c1d784 KK |
383 | * @flags: RADIX_TREE_ITER_*, should be constant |
384 | * Returns: pointer to next slot, or NULL if there no more left | |
385 | * | |
386 | * This function updates @iter->index in the case of a successful lookup. | |
387 | * For tagged lookup it also eats @iter->tags. | |
915045fe RZ |
388 | * |
389 | * There are several cases where 'slot' can be passed in as NULL to this | |
148deab2 | 390 | * function. These cases result from the use of radix_tree_iter_resume() or |
915045fe RZ |
391 | * radix_tree_iter_retry(). In these cases we don't end up dereferencing |
392 | * 'slot' because either: | |
393 | * a) we are doing tagged iteration and iter->tags has been set to 0, or | |
394 | * b) we are doing non-tagged iteration, and iter->index and iter->next_index | |
395 | * have been set up so that radix_tree_chunk_size() returns 1 or 0. | |
78c1d784 | 396 | */ |
d7b62727 MW |
397 | static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, |
398 | struct radix_tree_iter *iter, unsigned flags) | |
78c1d784 KK |
399 | { |
400 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
401 | iter->tags >>= 1; | |
21ef5339 RZ |
402 | if (unlikely(!iter->tags)) |
403 | return NULL; | |
78c1d784 | 404 | if (likely(iter->tags & 1ul)) { |
21ef5339 | 405 | iter->index = __radix_tree_iter_add(iter, 1); |
148deab2 MW |
406 | slot++; |
407 | goto found; | |
78c1d784 | 408 | } |
21ef5339 | 409 | if (!(flags & RADIX_TREE_ITER_CONTIG)) { |
78c1d784 KK |
410 | unsigned offset = __ffs(iter->tags); |
411 | ||
148deab2 MW |
412 | iter->tags >>= offset++; |
413 | iter->index = __radix_tree_iter_add(iter, offset); | |
414 | slot += offset; | |
415 | goto found; | |
78c1d784 KK |
416 | } |
417 | } else { | |
21ef5339 | 418 | long count = radix_tree_chunk_size(iter); |
78c1d784 | 419 | |
21ef5339 | 420 | while (--count > 0) { |
78c1d784 | 421 | slot++; |
21ef5339 RZ |
422 | iter->index = __radix_tree_iter_add(iter, 1); |
423 | ||
78c1d784 | 424 | if (likely(*slot)) |
148deab2 | 425 | goto found; |
fffaee36 KK |
426 | if (flags & RADIX_TREE_ITER_CONTIG) { |
427 | /* forbid switching to the next chunk */ | |
428 | iter->next_index = 0; | |
78c1d784 | 429 | break; |
fffaee36 | 430 | } |
78c1d784 KK |
431 | } |
432 | } | |
433 | return NULL; | |
148deab2 MW |
434 | |
435 | found: | |
148deab2 | 436 | return slot; |
78c1d784 KK |
437 | } |
438 | ||
78c1d784 KK |
439 | /** |
440 | * radix_tree_for_each_slot - iterate over non-empty slots | |
441 | * | |
442 | * @slot: the void** variable for pointer to slot | |
443 | * @root: the struct radix_tree_root pointer | |
444 | * @iter: the struct radix_tree_iter pointer | |
445 | * @start: iteration starting index | |
446 | * | |
447 | * @slot points to radix tree slot, @iter->index contains its index. | |
448 | */ | |
449 | #define radix_tree_for_each_slot(slot, root, iter, start) \ | |
450 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
451 | slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ | |
452 | slot = radix_tree_next_slot(slot, iter, 0)) | |
453 | ||
78c1d784 KK |
454 | /** |
455 | * radix_tree_for_each_tagged - iterate over tagged slots | |
456 | * | |
457 | * @slot: the void** variable for pointer to slot | |
458 | * @root: the struct radix_tree_root pointer | |
459 | * @iter: the struct radix_tree_iter pointer | |
460 | * @start: iteration starting index | |
461 | * @tag: tag index | |
462 | * | |
463 | * @slot points to radix tree slot, @iter->index contains its index. | |
464 | */ | |
465 | #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ | |
466 | for (slot = radix_tree_iter_init(iter, start) ; \ | |
467 | slot || (slot = radix_tree_next_chunk(root, iter, \ | |
468 | RADIX_TREE_ITER_TAGGED | tag)) ; \ | |
469 | slot = radix_tree_next_slot(slot, iter, \ | |
148deab2 | 470 | RADIX_TREE_ITER_TAGGED | tag)) |
78c1d784 | 471 | |
1da177e4 | 472 | #endif /* _LINUX_RADIX_TREE_H */ |