]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Momchil Velikov | |
3 | * Portions Copyright (C) 2001 Christoph Hellwig | |
cde53535 | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
7cf9c2c7 | 5 | * Copyright (C) 2006 Nick Piggin |
78c1d784 | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
6b053b8e MW |
7 | * Copyright (C) 2016 Intel, Matthew Wilcox |
8 | * Copyright (C) 2016 Intel, Ross Zwisler | |
1da177e4 LT |
9 | * |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2, or (at | |
13 | * your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | */ | |
24 | ||
e157b555 | 25 | #include <linux/cpu.h> |
1da177e4 LT |
26 | #include <linux/errno.h> |
27 | #include <linux/init.h> | |
28 | #include <linux/kernel.h> | |
8bc3bcc9 | 29 | #include <linux/export.h> |
1da177e4 LT |
30 | #include <linux/radix-tree.h> |
31 | #include <linux/percpu.h> | |
32 | #include <linux/slab.h> | |
ce80b067 | 33 | #include <linux/kmemleak.h> |
1da177e4 | 34 | #include <linux/cpu.h> |
1da177e4 LT |
35 | #include <linux/string.h> |
36 | #include <linux/bitops.h> | |
7cf9c2c7 | 37 | #include <linux/rcupdate.h> |
92cf2118 | 38 | #include <linux/preempt.h> /* in_interrupt() */ |
1da177e4 LT |
39 | |
40 | ||
c78c66d1 KS |
41 | /* Number of nodes in fully populated tree of given height */ |
42 | static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; | |
43 | ||
1da177e4 LT |
44 | /* |
45 | * Radix tree node cache. | |
46 | */ | |
e18b890b | 47 | static struct kmem_cache *radix_tree_node_cachep; |
1da177e4 | 48 | |
55368052 NP |
49 | /* |
50 | * The radix tree is variable-height, so an insert operation not only has | |
51 | * to build the branch to its corresponding item, it also has to build the | |
52 | * branch to existing items if the size has to be increased (by | |
53 | * radix_tree_extend). | |
54 | * | |
55 | * The worst case is a zero height tree with just a single item at index 0, | |
56 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches | |
57 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. | |
58 | * Hence: | |
59 | */ | |
60 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) | |
61 | ||
1da177e4 LT |
62 | /* |
63 | * Per-cpu pool of preloaded nodes | |
64 | */ | |
65 | struct radix_tree_preload { | |
2fcd9005 | 66 | unsigned nr; |
9d2a8da0 KS |
67 | /* nodes->private_data points to next preallocated node */ |
68 | struct radix_tree_node *nodes; | |
1da177e4 | 69 | }; |
8cef7d57 | 70 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
1da177e4 | 71 | |
148deab2 MW |
72 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
73 | { | |
74 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); | |
75 | } | |
76 | ||
a4db4dce | 77 | static inline void *node_to_entry(void *ptr) |
27d20fdd | 78 | { |
30ff46cc | 79 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); |
27d20fdd NP |
80 | } |
81 | ||
a4db4dce | 82 | #define RADIX_TREE_RETRY node_to_entry(NULL) |
afe0e395 | 83 | |
db050f29 MW |
84 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
85 | /* Sibling slots point directly to another slot in the same node */ | |
86 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
87 | { | |
88 | void **ptr = node; | |
89 | return (parent->slots <= ptr) && | |
90 | (ptr < parent->slots + RADIX_TREE_MAP_SIZE); | |
91 | } | |
92 | #else | |
93 | static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) | |
94 | { | |
95 | return false; | |
96 | } | |
97 | #endif | |
98 | ||
99 | static inline unsigned long get_slot_offset(struct radix_tree_node *parent, | |
100 | void **slot) | |
101 | { | |
102 | return slot - parent->slots; | |
103 | } | |
104 | ||
9e85d811 MW |
105 | static unsigned int radix_tree_descend(struct radix_tree_node *parent, |
106 | struct radix_tree_node **nodep, unsigned long index) | |
db050f29 | 107 | { |
9e85d811 | 108 | unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; |
db050f29 MW |
109 | void **entry = rcu_dereference_raw(parent->slots[offset]); |
110 | ||
111 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
b194d16c | 112 | if (radix_tree_is_internal_node(entry)) { |
8d2c0d36 LT |
113 | if (is_sibling_entry(parent, entry)) { |
114 | void **sibentry = (void **) entry_to_node(entry); | |
115 | offset = get_slot_offset(parent, sibentry); | |
116 | entry = rcu_dereference_raw(*sibentry); | |
db050f29 MW |
117 | } |
118 | } | |
119 | #endif | |
120 | ||
121 | *nodep = (void *)entry; | |
122 | return offset; | |
123 | } | |
124 | ||
612d6c19 NP |
125 | static inline gfp_t root_gfp_mask(struct radix_tree_root *root) |
126 | { | |
127 | return root->gfp_mask & __GFP_BITS_MASK; | |
128 | } | |
129 | ||
643b52b9 NP |
130 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
131 | int offset) | |
132 | { | |
133 | __set_bit(offset, node->tags[tag]); | |
134 | } | |
135 | ||
136 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, | |
137 | int offset) | |
138 | { | |
139 | __clear_bit(offset, node->tags[tag]); | |
140 | } | |
141 | ||
142 | static inline int tag_get(struct radix_tree_node *node, unsigned int tag, | |
143 | int offset) | |
144 | { | |
145 | return test_bit(offset, node->tags[tag]); | |
146 | } | |
147 | ||
148 | static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) | |
149 | { | |
150 | root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); | |
151 | } | |
152 | ||
2fcd9005 | 153 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
643b52b9 NP |
154 | { |
155 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); | |
156 | } | |
157 | ||
158 | static inline void root_tag_clear_all(struct radix_tree_root *root) | |
159 | { | |
160 | root->gfp_mask &= __GFP_BITS_MASK; | |
161 | } | |
162 | ||
163 | static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) | |
164 | { | |
2fcd9005 | 165 | return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); |
643b52b9 NP |
166 | } |
167 | ||
7b60e9ad MW |
168 | static inline unsigned root_tags_get(struct radix_tree_root *root) |
169 | { | |
170 | return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; | |
171 | } | |
172 | ||
643b52b9 NP |
173 | /* |
174 | * Returns 1 if any slot in the node has this tag set. | |
175 | * Otherwise returns 0. | |
176 | */ | |
177 | static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) | |
178 | { | |
2fcd9005 | 179 | unsigned idx; |
643b52b9 NP |
180 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
181 | if (node->tags[tag][idx]) | |
182 | return 1; | |
183 | } | |
184 | return 0; | |
185 | } | |
78c1d784 KK |
186 | |
187 | /** | |
188 | * radix_tree_find_next_bit - find the next set bit in a memory region | |
189 | * | |
190 | * @addr: The address to base the search on | |
191 | * @size: The bitmap size in bits | |
192 | * @offset: The bitnumber to start searching at | |
193 | * | |
194 | * Unrollable variant of find_next_bit() for constant size arrays. | |
195 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. | |
196 | * Returns next bit offset, or size if nothing found. | |
197 | */ | |
198 | static __always_inline unsigned long | |
bc412fca MW |
199 | radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, |
200 | unsigned long offset) | |
78c1d784 | 201 | { |
bc412fca | 202 | const unsigned long *addr = node->tags[tag]; |
78c1d784 | 203 | |
bc412fca | 204 | if (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
205 | unsigned long tmp; |
206 | ||
207 | addr += offset / BITS_PER_LONG; | |
208 | tmp = *addr >> (offset % BITS_PER_LONG); | |
209 | if (tmp) | |
210 | return __ffs(tmp) + offset; | |
211 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); | |
bc412fca | 212 | while (offset < RADIX_TREE_MAP_SIZE) { |
78c1d784 KK |
213 | tmp = *++addr; |
214 | if (tmp) | |
215 | return __ffs(tmp) + offset; | |
216 | offset += BITS_PER_LONG; | |
217 | } | |
218 | } | |
bc412fca | 219 | return RADIX_TREE_MAP_SIZE; |
78c1d784 KK |
220 | } |
221 | ||
268f42de MW |
222 | static unsigned int iter_offset(const struct radix_tree_iter *iter) |
223 | { | |
224 | return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; | |
225 | } | |
226 | ||
218ed750 MW |
227 | /* |
228 | * The maximum index which can be stored in a radix tree | |
229 | */ | |
230 | static inline unsigned long shift_maxindex(unsigned int shift) | |
231 | { | |
232 | return (RADIX_TREE_MAP_SIZE << shift) - 1; | |
233 | } | |
234 | ||
235 | static inline unsigned long node_maxindex(struct radix_tree_node *node) | |
236 | { | |
237 | return shift_maxindex(node->shift); | |
238 | } | |
239 | ||
0796c583 | 240 | #ifndef __KERNEL__ |
d0891265 | 241 | static void dump_node(struct radix_tree_node *node, unsigned long index) |
7cf19af4 | 242 | { |
0796c583 | 243 | unsigned long i; |
7cf19af4 | 244 | |
218ed750 MW |
245 | pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n", |
246 | node, node->offset, index, index | node_maxindex(node), | |
247 | node->parent, | |
0796c583 | 248 | node->tags[0][0], node->tags[1][0], node->tags[2][0], |
218ed750 | 249 | node->shift, node->count, node->exceptional); |
0796c583 RZ |
250 | |
251 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | |
d0891265 MW |
252 | unsigned long first = index | (i << node->shift); |
253 | unsigned long last = first | ((1UL << node->shift) - 1); | |
0796c583 RZ |
254 | void *entry = node->slots[i]; |
255 | if (!entry) | |
256 | continue; | |
218ed750 MW |
257 | if (entry == RADIX_TREE_RETRY) { |
258 | pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n", | |
259 | i, first, last, node); | |
b194d16c | 260 | } else if (!radix_tree_is_internal_node(entry)) { |
218ed750 MW |
261 | pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n", |
262 | entry, i, first, last, node); | |
263 | } else if (is_sibling_entry(node, entry)) { | |
264 | pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n", | |
265 | entry, i, first, last, node, | |
266 | *(void **)entry_to_node(entry)); | |
0796c583 | 267 | } else { |
4dd6c098 | 268 | dump_node(entry_to_node(entry), first); |
0796c583 RZ |
269 | } |
270 | } | |
7cf19af4 MW |
271 | } |
272 | ||
273 | /* For debug */ | |
274 | static void radix_tree_dump(struct radix_tree_root *root) | |
275 | { | |
d0891265 MW |
276 | pr_debug("radix root: %p rnode %p tags %x\n", |
277 | root, root->rnode, | |
7cf19af4 | 278 | root->gfp_mask >> __GFP_BITS_SHIFT); |
b194d16c | 279 | if (!radix_tree_is_internal_node(root->rnode)) |
7cf19af4 | 280 | return; |
4dd6c098 | 281 | dump_node(entry_to_node(root->rnode), 0); |
7cf19af4 MW |
282 | } |
283 | #endif | |
284 | ||
1da177e4 LT |
285 | /* |
286 | * This assumes that the caller has performed appropriate preallocation, and | |
287 | * that the caller has pinned this thread of control to the current CPU. | |
288 | */ | |
289 | static struct radix_tree_node * | |
e8de4340 MW |
290 | radix_tree_node_alloc(struct radix_tree_root *root, |
291 | struct radix_tree_node *parent, | |
292 | unsigned int shift, unsigned int offset, | |
293 | unsigned int count, unsigned int exceptional) | |
1da177e4 | 294 | { |
e2848a0e | 295 | struct radix_tree_node *ret = NULL; |
612d6c19 | 296 | gfp_t gfp_mask = root_gfp_mask(root); |
1da177e4 | 297 | |
5e4c0d97 | 298 | /* |
2fcd9005 MW |
299 | * Preload code isn't irq safe and it doesn't make sense to use |
300 | * preloading during an interrupt anyway as all the allocations have | |
301 | * to be atomic. So just do normal allocation when in interrupt. | |
5e4c0d97 | 302 | */ |
d0164adc | 303 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
1da177e4 LT |
304 | struct radix_tree_preload *rtp; |
305 | ||
58e698af VD |
306 | /* |
307 | * Even if the caller has preloaded, try to allocate from the | |
05eb6e72 VD |
308 | * cache first for the new node to get accounted to the memory |
309 | * cgroup. | |
58e698af VD |
310 | */ |
311 | ret = kmem_cache_alloc(radix_tree_node_cachep, | |
05eb6e72 | 312 | gfp_mask | __GFP_NOWARN); |
58e698af VD |
313 | if (ret) |
314 | goto out; | |
315 | ||
e2848a0e NP |
316 | /* |
317 | * Provided the caller has preloaded here, we will always | |
318 | * succeed in getting a node here (and never reach | |
319 | * kmem_cache_alloc) | |
320 | */ | |
7c8e0181 | 321 | rtp = this_cpu_ptr(&radix_tree_preloads); |
1da177e4 | 322 | if (rtp->nr) { |
9d2a8da0 KS |
323 | ret = rtp->nodes; |
324 | rtp->nodes = ret->private_data; | |
325 | ret->private_data = NULL; | |
1da177e4 LT |
326 | rtp->nr--; |
327 | } | |
ce80b067 CM |
328 | /* |
329 | * Update the allocation stack trace as this is more useful | |
330 | * for debugging. | |
331 | */ | |
332 | kmemleak_update_trace(ret); | |
58e698af | 333 | goto out; |
1da177e4 | 334 | } |
05eb6e72 | 335 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
58e698af | 336 | out: |
b194d16c | 337 | BUG_ON(radix_tree_is_internal_node(ret)); |
e8de4340 MW |
338 | if (ret) { |
339 | ret->parent = parent; | |
340 | ret->shift = shift; | |
341 | ret->offset = offset; | |
342 | ret->count = count; | |
343 | ret->exceptional = exceptional; | |
344 | } | |
1da177e4 LT |
345 | return ret; |
346 | } | |
347 | ||
7cf9c2c7 NP |
348 | static void radix_tree_node_rcu_free(struct rcu_head *head) |
349 | { | |
350 | struct radix_tree_node *node = | |
351 | container_of(head, struct radix_tree_node, rcu_head); | |
643b52b9 NP |
352 | |
353 | /* | |
175542f5 MW |
354 | * Must only free zeroed nodes into the slab. We can be left with |
355 | * non-NULL entries by radix_tree_free_nodes, so clear the entries | |
356 | * and tags here. | |
643b52b9 | 357 | */ |
175542f5 MW |
358 | memset(node->slots, 0, sizeof(node->slots)); |
359 | memset(node->tags, 0, sizeof(node->tags)); | |
91d9c05a | 360 | INIT_LIST_HEAD(&node->private_list); |
643b52b9 | 361 | |
7cf9c2c7 NP |
362 | kmem_cache_free(radix_tree_node_cachep, node); |
363 | } | |
364 | ||
1da177e4 LT |
365 | static inline void |
366 | radix_tree_node_free(struct radix_tree_node *node) | |
367 | { | |
7cf9c2c7 | 368 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
1da177e4 LT |
369 | } |
370 | ||
371 | /* | |
372 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
373 | * ensure that the addition of a single element in the tree cannot fail. On | |
374 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
375 | * with preemption not disabled. | |
b34df792 DH |
376 | * |
377 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 378 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
1da177e4 | 379 | */ |
2791653a | 380 | static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) |
1da177e4 LT |
381 | { |
382 | struct radix_tree_preload *rtp; | |
383 | struct radix_tree_node *node; | |
384 | int ret = -ENOMEM; | |
385 | ||
05eb6e72 VD |
386 | /* |
387 | * Nodes preloaded by one cgroup can be be used by another cgroup, so | |
388 | * they should never be accounted to any particular memory cgroup. | |
389 | */ | |
390 | gfp_mask &= ~__GFP_ACCOUNT; | |
391 | ||
1da177e4 | 392 | preempt_disable(); |
7c8e0181 | 393 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 394 | while (rtp->nr < nr) { |
1da177e4 | 395 | preempt_enable(); |
488514d1 | 396 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
1da177e4 LT |
397 | if (node == NULL) |
398 | goto out; | |
399 | preempt_disable(); | |
7c8e0181 | 400 | rtp = this_cpu_ptr(&radix_tree_preloads); |
c78c66d1 | 401 | if (rtp->nr < nr) { |
9d2a8da0 KS |
402 | node->private_data = rtp->nodes; |
403 | rtp->nodes = node; | |
404 | rtp->nr++; | |
405 | } else { | |
1da177e4 | 406 | kmem_cache_free(radix_tree_node_cachep, node); |
9d2a8da0 | 407 | } |
1da177e4 LT |
408 | } |
409 | ret = 0; | |
410 | out: | |
411 | return ret; | |
412 | } | |
5e4c0d97 JK |
413 | |
414 | /* | |
415 | * Load up this CPU's radix_tree_node buffer with sufficient objects to | |
416 | * ensure that the addition of a single element in the tree cannot fail. On | |
417 | * success, return zero, with preemption disabled. On error, return -ENOMEM | |
418 | * with preemption not disabled. | |
419 | * | |
420 | * To make use of this facility, the radix tree must be initialised without | |
d0164adc | 421 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
5e4c0d97 JK |
422 | */ |
423 | int radix_tree_preload(gfp_t gfp_mask) | |
424 | { | |
425 | /* Warn on non-sensical use... */ | |
d0164adc | 426 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
c78c66d1 | 427 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 | 428 | } |
d7f0923d | 429 | EXPORT_SYMBOL(radix_tree_preload); |
1da177e4 | 430 | |
5e4c0d97 JK |
431 | /* |
432 | * The same as above function, except we don't guarantee preloading happens. | |
433 | * We do it, if we decide it helps. On success, return zero with preemption | |
434 | * disabled. On error, return -ENOMEM with preemption not disabled. | |
435 | */ | |
436 | int radix_tree_maybe_preload(gfp_t gfp_mask) | |
437 | { | |
d0164adc | 438 | if (gfpflags_allow_blocking(gfp_mask)) |
c78c66d1 | 439 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
5e4c0d97 JK |
440 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
441 | preempt_disable(); | |
442 | return 0; | |
443 | } | |
444 | EXPORT_SYMBOL(radix_tree_maybe_preload); | |
445 | ||
2791653a MW |
446 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
447 | /* | |
448 | * Preload with enough objects to ensure that we can split a single entry | |
449 | * of order @old_order into many entries of size @new_order | |
450 | */ | |
451 | int radix_tree_split_preload(unsigned int old_order, unsigned int new_order, | |
452 | gfp_t gfp_mask) | |
453 | { | |
454 | unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT); | |
455 | unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) - | |
456 | (new_order / RADIX_TREE_MAP_SHIFT); | |
457 | unsigned nr = 0; | |
458 | ||
459 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); | |
460 | BUG_ON(new_order >= old_order); | |
461 | ||
462 | while (layers--) | |
463 | nr = nr * RADIX_TREE_MAP_SIZE + 1; | |
464 | return __radix_tree_preload(gfp_mask, top * nr); | |
465 | } | |
466 | #endif | |
467 | ||
c78c66d1 KS |
468 | /* |
469 | * The same as function above, but preload number of nodes required to insert | |
470 | * (1 << order) continuous naturally-aligned elements. | |
471 | */ | |
472 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) | |
473 | { | |
474 | unsigned long nr_subtrees; | |
475 | int nr_nodes, subtree_height; | |
476 | ||
477 | /* Preloading doesn't help anything with this gfp mask, skip it */ | |
478 | if (!gfpflags_allow_blocking(gfp_mask)) { | |
479 | preempt_disable(); | |
480 | return 0; | |
481 | } | |
482 | ||
483 | /* | |
484 | * Calculate number and height of fully populated subtrees it takes to | |
485 | * store (1 << order) elements. | |
486 | */ | |
487 | nr_subtrees = 1 << order; | |
488 | for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; | |
489 | subtree_height++) | |
490 | nr_subtrees >>= RADIX_TREE_MAP_SHIFT; | |
491 | ||
492 | /* | |
493 | * The worst case is zero height tree with a single item at index 0 and | |
494 | * then inserting items starting at ULONG_MAX - (1 << order). | |
495 | * | |
496 | * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to | |
497 | * 0-index item. | |
498 | */ | |
499 | nr_nodes = RADIX_TREE_MAX_PATH; | |
500 | ||
501 | /* Plus branch to fully populated subtrees. */ | |
502 | nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; | |
503 | ||
504 | /* Root node is shared. */ | |
505 | nr_nodes--; | |
506 | ||
507 | /* Plus nodes required to build subtrees. */ | |
508 | nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; | |
509 | ||
510 | return __radix_tree_preload(gfp_mask, nr_nodes); | |
511 | } | |
512 | ||
1456a439 MW |
513 | static unsigned radix_tree_load_root(struct radix_tree_root *root, |
514 | struct radix_tree_node **nodep, unsigned long *maxindex) | |
515 | { | |
516 | struct radix_tree_node *node = rcu_dereference_raw(root->rnode); | |
517 | ||
518 | *nodep = node; | |
519 | ||
b194d16c | 520 | if (likely(radix_tree_is_internal_node(node))) { |
4dd6c098 | 521 | node = entry_to_node(node); |
1456a439 | 522 | *maxindex = node_maxindex(node); |
c12e51b0 | 523 | return node->shift + RADIX_TREE_MAP_SHIFT; |
1456a439 MW |
524 | } |
525 | ||
526 | *maxindex = 0; | |
527 | return 0; | |
528 | } | |
529 | ||
1da177e4 LT |
530 | /* |
531 | * Extend a radix tree so it can store key @index. | |
532 | */ | |
e6145236 | 533 | static int radix_tree_extend(struct radix_tree_root *root, |
d0891265 | 534 | unsigned long index, unsigned int shift) |
1da177e4 | 535 | { |
e2bdb933 | 536 | struct radix_tree_node *slot; |
d0891265 | 537 | unsigned int maxshift; |
1da177e4 LT |
538 | int tag; |
539 | ||
d0891265 MW |
540 | /* Figure out what the shift should be. */ |
541 | maxshift = shift; | |
542 | while (index > shift_maxindex(maxshift)) | |
543 | maxshift += RADIX_TREE_MAP_SHIFT; | |
1da177e4 | 544 | |
d0891265 MW |
545 | slot = root->rnode; |
546 | if (!slot) | |
1da177e4 | 547 | goto out; |
1da177e4 | 548 | |
1da177e4 | 549 | do { |
e8de4340 MW |
550 | struct radix_tree_node *node = radix_tree_node_alloc(root, |
551 | NULL, shift, 0, 1, 0); | |
2fcd9005 | 552 | if (!node) |
1da177e4 LT |
553 | return -ENOMEM; |
554 | ||
1da177e4 | 555 | /* Propagate the aggregated tag info into the new root */ |
daff89f3 | 556 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
612d6c19 | 557 | if (root_tag_get(root, tag)) |
1da177e4 LT |
558 | tag_set(node, tag, 0); |
559 | } | |
560 | ||
d0891265 | 561 | BUG_ON(shift > BITS_PER_LONG); |
f7942430 | 562 | if (radix_tree_is_internal_node(slot)) { |
4dd6c098 | 563 | entry_to_node(slot)->parent = node; |
e8de4340 | 564 | } else if (radix_tree_exceptional_entry(slot)) { |
f7942430 | 565 | /* Moving an exceptional root->rnode to a node */ |
e8de4340 | 566 | node->exceptional = 1; |
f7942430 | 567 | } |
e2bdb933 | 568 | node->slots[0] = slot; |
a4db4dce MW |
569 | slot = node_to_entry(node); |
570 | rcu_assign_pointer(root->rnode, slot); | |
d0891265 | 571 | shift += RADIX_TREE_MAP_SHIFT; |
d0891265 | 572 | } while (shift <= maxshift); |
1da177e4 | 573 | out: |
d0891265 | 574 | return maxshift + RADIX_TREE_MAP_SHIFT; |
1da177e4 LT |
575 | } |
576 | ||
f4b109c6 JW |
577 | /** |
578 | * radix_tree_shrink - shrink radix tree to minimum height | |
579 | * @root radix tree root | |
580 | */ | |
14b46879 | 581 | static inline void radix_tree_shrink(struct radix_tree_root *root, |
4d693d08 JW |
582 | radix_tree_update_node_t update_node, |
583 | void *private) | |
f4b109c6 | 584 | { |
f4b109c6 JW |
585 | for (;;) { |
586 | struct radix_tree_node *node = root->rnode; | |
587 | struct radix_tree_node *child; | |
588 | ||
589 | if (!radix_tree_is_internal_node(node)) | |
590 | break; | |
591 | node = entry_to_node(node); | |
592 | ||
593 | /* | |
594 | * The candidate node has more than one child, or its child | |
595 | * is not at the leftmost slot, or the child is a multiorder | |
596 | * entry, we cannot shrink. | |
597 | */ | |
598 | if (node->count != 1) | |
599 | break; | |
600 | child = node->slots[0]; | |
601 | if (!child) | |
602 | break; | |
603 | if (!radix_tree_is_internal_node(child) && node->shift) | |
604 | break; | |
605 | ||
606 | if (radix_tree_is_internal_node(child)) | |
607 | entry_to_node(child)->parent = NULL; | |
608 | ||
609 | /* | |
610 | * We don't need rcu_assign_pointer(), since we are simply | |
611 | * moving the node from one part of the tree to another: if it | |
612 | * was safe to dereference the old pointer to it | |
613 | * (node->slots[0]), it will be safe to dereference the new | |
614 | * one (root->rnode) as far as dependent read barriers go. | |
615 | */ | |
616 | root->rnode = child; | |
617 | ||
618 | /* | |
619 | * We have a dilemma here. The node's slot[0] must not be | |
620 | * NULLed in case there are concurrent lookups expecting to | |
621 | * find the item. However if this was a bottom-level node, | |
622 | * then it may be subject to the slot pointer being visible | |
623 | * to callers dereferencing it. If item corresponding to | |
624 | * slot[0] is subsequently deleted, these callers would expect | |
625 | * their slot to become empty sooner or later. | |
626 | * | |
627 | * For example, lockless pagecache will look up a slot, deref | |
628 | * the page pointer, and if the page has 0 refcount it means it | |
629 | * was concurrently deleted from pagecache so try the deref | |
630 | * again. Fortunately there is already a requirement for logic | |
631 | * to retry the entire slot lookup -- the indirect pointer | |
632 | * problem (replacing direct root node with an indirect pointer | |
633 | * also results in a stale slot). So tag the slot as indirect | |
634 | * to force callers to retry. | |
635 | */ | |
4d693d08 JW |
636 | node->count = 0; |
637 | if (!radix_tree_is_internal_node(child)) { | |
f4b109c6 | 638 | node->slots[0] = RADIX_TREE_RETRY; |
4d693d08 JW |
639 | if (update_node) |
640 | update_node(node, private); | |
641 | } | |
f4b109c6 | 642 | |
ea07b862 | 643 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
f4b109c6 | 644 | radix_tree_node_free(node); |
f4b109c6 | 645 | } |
f4b109c6 JW |
646 | } |
647 | ||
14b46879 | 648 | static void delete_node(struct radix_tree_root *root, |
4d693d08 JW |
649 | struct radix_tree_node *node, |
650 | radix_tree_update_node_t update_node, void *private) | |
f4b109c6 | 651 | { |
f4b109c6 JW |
652 | do { |
653 | struct radix_tree_node *parent; | |
654 | ||
655 | if (node->count) { | |
656 | if (node == entry_to_node(root->rnode)) | |
14b46879 JW |
657 | radix_tree_shrink(root, update_node, private); |
658 | return; | |
f4b109c6 JW |
659 | } |
660 | ||
661 | parent = node->parent; | |
662 | if (parent) { | |
663 | parent->slots[node->offset] = NULL; | |
664 | parent->count--; | |
665 | } else { | |
666 | root_tag_clear_all(root); | |
667 | root->rnode = NULL; | |
668 | } | |
669 | ||
ea07b862 | 670 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
f4b109c6 | 671 | radix_tree_node_free(node); |
f4b109c6 JW |
672 | |
673 | node = parent; | |
674 | } while (node); | |
f4b109c6 JW |
675 | } |
676 | ||
1da177e4 | 677 | /** |
139e5616 | 678 | * __radix_tree_create - create a slot in a radix tree |
1da177e4 LT |
679 | * @root: radix tree root |
680 | * @index: index key | |
e6145236 | 681 | * @order: index occupies 2^order aligned slots |
139e5616 JW |
682 | * @nodep: returns node |
683 | * @slotp: returns slot | |
1da177e4 | 684 | * |
139e5616 JW |
685 | * Create, if necessary, and return the node and slot for an item |
686 | * at position @index in the radix tree @root. | |
687 | * | |
688 | * Until there is more than one item in the tree, no nodes are | |
689 | * allocated and @root->rnode is used as a direct slot instead of | |
690 | * pointing to a node, in which case *@nodep will be NULL. | |
691 | * | |
692 | * Returns -ENOMEM, or 0 for success. | |
1da177e4 | 693 | */ |
139e5616 | 694 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
e6145236 MW |
695 | unsigned order, struct radix_tree_node **nodep, |
696 | void ***slotp) | |
1da177e4 | 697 | { |
89148aa4 MW |
698 | struct radix_tree_node *node = NULL, *child; |
699 | void **slot = (void **)&root->rnode; | |
49ea6ebc | 700 | unsigned long maxindex; |
89148aa4 | 701 | unsigned int shift, offset = 0; |
49ea6ebc MW |
702 | unsigned long max = index | ((1UL << order) - 1); |
703 | ||
89148aa4 | 704 | shift = radix_tree_load_root(root, &child, &maxindex); |
1da177e4 LT |
705 | |
706 | /* Make sure the tree is high enough. */ | |
175542f5 MW |
707 | if (order > 0 && max == ((1UL << order) - 1)) |
708 | max++; | |
49ea6ebc | 709 | if (max > maxindex) { |
d0891265 | 710 | int error = radix_tree_extend(root, max, shift); |
49ea6ebc | 711 | if (error < 0) |
1da177e4 | 712 | return error; |
49ea6ebc | 713 | shift = error; |
89148aa4 | 714 | child = root->rnode; |
1da177e4 LT |
715 | } |
716 | ||
e6145236 | 717 | while (shift > order) { |
c12e51b0 | 718 | shift -= RADIX_TREE_MAP_SHIFT; |
89148aa4 | 719 | if (child == NULL) { |
1da177e4 | 720 | /* Have to add a child node. */ |
e8de4340 MW |
721 | child = radix_tree_node_alloc(root, node, shift, |
722 | offset, 0, 0); | |
89148aa4 | 723 | if (!child) |
1da177e4 | 724 | return -ENOMEM; |
89148aa4 MW |
725 | rcu_assign_pointer(*slot, node_to_entry(child)); |
726 | if (node) | |
1da177e4 | 727 | node->count++; |
89148aa4 | 728 | } else if (!radix_tree_is_internal_node(child)) |
e6145236 | 729 | break; |
1da177e4 LT |
730 | |
731 | /* Go a level down */ | |
89148aa4 | 732 | node = entry_to_node(child); |
9e85d811 | 733 | offset = radix_tree_descend(node, &child, index); |
89148aa4 | 734 | slot = &node->slots[offset]; |
e6145236 MW |
735 | } |
736 | ||
175542f5 MW |
737 | if (nodep) |
738 | *nodep = node; | |
739 | if (slotp) | |
740 | *slotp = slot; | |
741 | return 0; | |
742 | } | |
743 | ||
57578c2e | 744 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
175542f5 MW |
745 | /* |
746 | * Free any nodes below this node. The tree is presumed to not need | |
747 | * shrinking, and any user data in the tree is presumed to not need a | |
748 | * destructor called on it. If we need to add a destructor, we can | |
749 | * add that functionality later. Note that we may not clear tags or | |
750 | * slots from the tree as an RCU walker may still have a pointer into | |
751 | * this subtree. We could replace the entries with RADIX_TREE_RETRY, | |
752 | * but we'll still have to clear those in rcu_free. | |
753 | */ | |
754 | static void radix_tree_free_nodes(struct radix_tree_node *node) | |
755 | { | |
756 | unsigned offset = 0; | |
757 | struct radix_tree_node *child = entry_to_node(node); | |
758 | ||
759 | for (;;) { | |
760 | void *entry = child->slots[offset]; | |
761 | if (radix_tree_is_internal_node(entry) && | |
762 | !is_sibling_entry(child, entry)) { | |
763 | child = entry_to_node(entry); | |
764 | offset = 0; | |
765 | continue; | |
766 | } | |
767 | offset++; | |
768 | while (offset == RADIX_TREE_MAP_SIZE) { | |
769 | struct radix_tree_node *old = child; | |
770 | offset = child->offset + 1; | |
771 | child = child->parent; | |
ea07b862 | 772 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
175542f5 MW |
773 | radix_tree_node_free(old); |
774 | if (old == entry_to_node(node)) | |
775 | return; | |
776 | } | |
777 | } | |
778 | } | |
779 | ||
780 | static inline int insert_entries(struct radix_tree_node *node, void **slot, | |
781 | void *item, unsigned order, bool replace) | |
782 | { | |
783 | struct radix_tree_node *child; | |
784 | unsigned i, n, tag, offset, tags = 0; | |
785 | ||
786 | if (node) { | |
e157b555 MW |
787 | if (order > node->shift) |
788 | n = 1 << (order - node->shift); | |
789 | else | |
790 | n = 1; | |
175542f5 MW |
791 | offset = get_slot_offset(node, slot); |
792 | } else { | |
793 | n = 1; | |
794 | offset = 0; | |
795 | } | |
796 | ||
797 | if (n > 1) { | |
e6145236 | 798 | offset = offset & ~(n - 1); |
89148aa4 | 799 | slot = &node->slots[offset]; |
175542f5 MW |
800 | } |
801 | child = node_to_entry(slot); | |
802 | ||
803 | for (i = 0; i < n; i++) { | |
804 | if (slot[i]) { | |
805 | if (replace) { | |
806 | node->count--; | |
807 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
808 | if (tag_get(node, tag, offset + i)) | |
809 | tags |= 1 << tag; | |
810 | } else | |
e6145236 MW |
811 | return -EEXIST; |
812 | } | |
175542f5 | 813 | } |
e6145236 | 814 | |
175542f5 MW |
815 | for (i = 0; i < n; i++) { |
816 | struct radix_tree_node *old = slot[i]; | |
817 | if (i) { | |
89148aa4 | 818 | rcu_assign_pointer(slot[i], child); |
175542f5 MW |
819 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
820 | if (tags & (1 << tag)) | |
821 | tag_clear(node, tag, offset + i); | |
822 | } else { | |
823 | rcu_assign_pointer(slot[i], item); | |
824 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
825 | if (tags & (1 << tag)) | |
826 | tag_set(node, tag, offset); | |
e6145236 | 827 | } |
175542f5 | 828 | if (radix_tree_is_internal_node(old) && |
e157b555 MW |
829 | !is_sibling_entry(node, old) && |
830 | (old != RADIX_TREE_RETRY)) | |
175542f5 MW |
831 | radix_tree_free_nodes(old); |
832 | if (radix_tree_exceptional_entry(old)) | |
833 | node->exceptional--; | |
612d6c19 | 834 | } |
175542f5 MW |
835 | if (node) { |
836 | node->count += n; | |
837 | if (radix_tree_exceptional_entry(item)) | |
838 | node->exceptional += n; | |
839 | } | |
840 | return n; | |
139e5616 | 841 | } |
175542f5 MW |
842 | #else |
843 | static inline int insert_entries(struct radix_tree_node *node, void **slot, | |
844 | void *item, unsigned order, bool replace) | |
845 | { | |
846 | if (*slot) | |
847 | return -EEXIST; | |
848 | rcu_assign_pointer(*slot, item); | |
849 | if (node) { | |
850 | node->count++; | |
851 | if (radix_tree_exceptional_entry(item)) | |
852 | node->exceptional++; | |
853 | } | |
854 | return 1; | |
855 | } | |
856 | #endif | |
139e5616 JW |
857 | |
858 | /** | |
e6145236 | 859 | * __radix_tree_insert - insert into a radix tree |
139e5616 JW |
860 | * @root: radix tree root |
861 | * @index: index key | |
e6145236 | 862 | * @order: key covers the 2^order indices around index |
139e5616 JW |
863 | * @item: item to insert |
864 | * | |
865 | * Insert an item into the radix tree at position @index. | |
866 | */ | |
e6145236 MW |
867 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
868 | unsigned order, void *item) | |
139e5616 JW |
869 | { |
870 | struct radix_tree_node *node; | |
871 | void **slot; | |
872 | int error; | |
873 | ||
b194d16c | 874 | BUG_ON(radix_tree_is_internal_node(item)); |
139e5616 | 875 | |
e6145236 | 876 | error = __radix_tree_create(root, index, order, &node, &slot); |
139e5616 JW |
877 | if (error) |
878 | return error; | |
175542f5 MW |
879 | |
880 | error = insert_entries(node, slot, item, order, false); | |
881 | if (error < 0) | |
882 | return error; | |
201b6264 | 883 | |
612d6c19 | 884 | if (node) { |
7b60e9ad | 885 | unsigned offset = get_slot_offset(node, slot); |
7b60e9ad MW |
886 | BUG_ON(tag_get(node, 0, offset)); |
887 | BUG_ON(tag_get(node, 1, offset)); | |
888 | BUG_ON(tag_get(node, 2, offset)); | |
612d6c19 | 889 | } else { |
7b60e9ad | 890 | BUG_ON(root_tags_get(root)); |
612d6c19 | 891 | } |
1da177e4 | 892 | |
1da177e4 LT |
893 | return 0; |
894 | } | |
e6145236 | 895 | EXPORT_SYMBOL(__radix_tree_insert); |
1da177e4 | 896 | |
139e5616 JW |
897 | /** |
898 | * __radix_tree_lookup - lookup an item in a radix tree | |
899 | * @root: radix tree root | |
900 | * @index: index key | |
901 | * @nodep: returns node | |
902 | * @slotp: returns slot | |
903 | * | |
904 | * Lookup and return the item at position @index in the radix | |
905 | * tree @root. | |
906 | * | |
907 | * Until there is more than one item in the tree, no nodes are | |
908 | * allocated and @root->rnode is used as a direct slot instead of | |
909 | * pointing to a node, in which case *@nodep will be NULL. | |
7cf9c2c7 | 910 | */ |
139e5616 JW |
911 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
912 | struct radix_tree_node **nodep, void ***slotp) | |
1da177e4 | 913 | { |
139e5616 | 914 | struct radix_tree_node *node, *parent; |
85829954 | 915 | unsigned long maxindex; |
139e5616 | 916 | void **slot; |
612d6c19 | 917 | |
85829954 MW |
918 | restart: |
919 | parent = NULL; | |
920 | slot = (void **)&root->rnode; | |
9e85d811 | 921 | radix_tree_load_root(root, &node, &maxindex); |
85829954 | 922 | if (index > maxindex) |
1da177e4 LT |
923 | return NULL; |
924 | ||
b194d16c | 925 | while (radix_tree_is_internal_node(node)) { |
85829954 | 926 | unsigned offset; |
1da177e4 | 927 | |
85829954 MW |
928 | if (node == RADIX_TREE_RETRY) |
929 | goto restart; | |
4dd6c098 | 930 | parent = entry_to_node(node); |
9e85d811 | 931 | offset = radix_tree_descend(parent, &node, index); |
85829954 MW |
932 | slot = parent->slots + offset; |
933 | } | |
1da177e4 | 934 | |
139e5616 JW |
935 | if (nodep) |
936 | *nodep = parent; | |
937 | if (slotp) | |
938 | *slotp = slot; | |
939 | return node; | |
b72b71c6 HS |
940 | } |
941 | ||
942 | /** | |
943 | * radix_tree_lookup_slot - lookup a slot in a radix tree | |
944 | * @root: radix tree root | |
945 | * @index: index key | |
946 | * | |
947 | * Returns: the slot corresponding to the position @index in the | |
948 | * radix tree @root. This is useful for update-if-exists operations. | |
949 | * | |
950 | * This function can be called under rcu_read_lock iff the slot is not | |
951 | * modified by radix_tree_replace_slot, otherwise it must be called | |
952 | * exclusive from other writers. Any dereference of the slot must be done | |
953 | * using radix_tree_deref_slot. | |
954 | */ | |
955 | void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) | |
956 | { | |
139e5616 JW |
957 | void **slot; |
958 | ||
959 | if (!__radix_tree_lookup(root, index, NULL, &slot)) | |
960 | return NULL; | |
961 | return slot; | |
a4331366 | 962 | } |
a4331366 HR |
963 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
964 | ||
965 | /** | |
966 | * radix_tree_lookup - perform lookup operation on a radix tree | |
967 | * @root: radix tree root | |
968 | * @index: index key | |
969 | * | |
970 | * Lookup the item at the position @index in the radix tree @root. | |
7cf9c2c7 NP |
971 | * |
972 | * This function can be called under rcu_read_lock, however the caller | |
973 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free | |
974 | * them safely). No RCU barriers are required to access or modify the | |
975 | * returned item, however. | |
a4331366 HR |
976 | */ |
977 | void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) | |
978 | { | |
139e5616 | 979 | return __radix_tree_lookup(root, index, NULL, NULL); |
1da177e4 LT |
980 | } |
981 | EXPORT_SYMBOL(radix_tree_lookup); | |
982 | ||
a90eb3a2 MW |
983 | static inline int slot_count(struct radix_tree_node *node, |
984 | void **slot) | |
985 | { | |
986 | int n = 1; | |
987 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
988 | void *ptr = node_to_entry(slot); | |
989 | unsigned offset = get_slot_offset(node, slot); | |
990 | int i; | |
991 | ||
992 | for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { | |
993 | if (node->slots[offset + i] != ptr) | |
994 | break; | |
995 | n++; | |
996 | } | |
997 | #endif | |
998 | return n; | |
999 | } | |
1000 | ||
6d75f366 JW |
1001 | static void replace_slot(struct radix_tree_root *root, |
1002 | struct radix_tree_node *node, | |
1003 | void **slot, void *item, | |
1004 | bool warn_typeswitch) | |
f7942430 JW |
1005 | { |
1006 | void *old = rcu_dereference_raw(*slot); | |
f4b109c6 | 1007 | int count, exceptional; |
f7942430 JW |
1008 | |
1009 | WARN_ON_ONCE(radix_tree_is_internal_node(item)); | |
f7942430 | 1010 | |
f4b109c6 | 1011 | count = !!item - !!old; |
f7942430 JW |
1012 | exceptional = !!radix_tree_exceptional_entry(item) - |
1013 | !!radix_tree_exceptional_entry(old); | |
1014 | ||
f4b109c6 | 1015 | WARN_ON_ONCE(warn_typeswitch && (count || exceptional)); |
f7942430 | 1016 | |
f4b109c6 JW |
1017 | if (node) { |
1018 | node->count += count; | |
a90eb3a2 MW |
1019 | if (exceptional) { |
1020 | exceptional *= slot_count(node, slot); | |
1021 | node->exceptional += exceptional; | |
1022 | } | |
f4b109c6 | 1023 | } |
f7942430 JW |
1024 | |
1025 | rcu_assign_pointer(*slot, item); | |
1026 | } | |
1027 | ||
a90eb3a2 MW |
1028 | static inline void delete_sibling_entries(struct radix_tree_node *node, |
1029 | void **slot) | |
1030 | { | |
1031 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1032 | bool exceptional = radix_tree_exceptional_entry(*slot); | |
1033 | void *ptr = node_to_entry(slot); | |
1034 | unsigned offset = get_slot_offset(node, slot); | |
1035 | int i; | |
1036 | ||
1037 | for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { | |
1038 | if (node->slots[offset + i] != ptr) | |
1039 | break; | |
1040 | node->slots[offset + i] = NULL; | |
1041 | node->count--; | |
1042 | if (exceptional) | |
1043 | node->exceptional--; | |
1044 | } | |
1045 | #endif | |
1046 | } | |
1047 | ||
6d75f366 JW |
1048 | /** |
1049 | * __radix_tree_replace - replace item in a slot | |
4d693d08 JW |
1050 | * @root: radix tree root |
1051 | * @node: pointer to tree node | |
1052 | * @slot: pointer to slot in @node | |
1053 | * @item: new item to store in the slot. | |
1054 | * @update_node: callback for changing leaf nodes | |
1055 | * @private: private data to pass to @update_node | |
6d75f366 JW |
1056 | * |
1057 | * For use with __radix_tree_lookup(). Caller must hold tree write locked | |
1058 | * across slot lookup and replacement. | |
1059 | */ | |
1060 | void __radix_tree_replace(struct radix_tree_root *root, | |
1061 | struct radix_tree_node *node, | |
4d693d08 JW |
1062 | void **slot, void *item, |
1063 | radix_tree_update_node_t update_node, void *private) | |
6d75f366 | 1064 | { |
a90eb3a2 MW |
1065 | if (!item) |
1066 | delete_sibling_entries(node, slot); | |
6d75f366 | 1067 | /* |
f4b109c6 JW |
1068 | * This function supports replacing exceptional entries and |
1069 | * deleting entries, but that needs accounting against the | |
1070 | * node unless the slot is root->rnode. | |
6d75f366 JW |
1071 | */ |
1072 | replace_slot(root, node, slot, item, | |
1073 | !node && slot != (void **)&root->rnode); | |
f4b109c6 | 1074 | |
4d693d08 JW |
1075 | if (!node) |
1076 | return; | |
1077 | ||
1078 | if (update_node) | |
1079 | update_node(node, private); | |
1080 | ||
1081 | delete_node(root, node, update_node, private); | |
6d75f366 JW |
1082 | } |
1083 | ||
1084 | /** | |
1085 | * radix_tree_replace_slot - replace item in a slot | |
1086 | * @root: radix tree root | |
1087 | * @slot: pointer to slot | |
1088 | * @item: new item to store in the slot. | |
1089 | * | |
1090 | * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), | |
1091 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked | |
1092 | * across slot lookup and replacement. | |
1093 | * | |
1094 | * NOTE: This cannot be used to switch between non-entries (empty slots), | |
1095 | * regular entries, and exceptional entries, as that requires accounting | |
f4b109c6 | 1096 | * inside the radix tree node. When switching from one type of entry or |
e157b555 MW |
1097 | * deleting, use __radix_tree_lookup() and __radix_tree_replace() or |
1098 | * radix_tree_iter_replace(). | |
6d75f366 JW |
1099 | */ |
1100 | void radix_tree_replace_slot(struct radix_tree_root *root, | |
1101 | void **slot, void *item) | |
1102 | { | |
1103 | replace_slot(root, NULL, slot, item, true); | |
1104 | } | |
1105 | ||
e157b555 MW |
1106 | /** |
1107 | * radix_tree_iter_replace - replace item in a slot | |
1108 | * @root: radix tree root | |
1109 | * @slot: pointer to slot | |
1110 | * @item: new item to store in the slot. | |
1111 | * | |
1112 | * For use with radix_tree_split() and radix_tree_for_each_slot(). | |
1113 | * Caller must hold tree write locked across split and replacement. | |
1114 | */ | |
1115 | void radix_tree_iter_replace(struct radix_tree_root *root, | |
1116 | const struct radix_tree_iter *iter, void **slot, void *item) | |
1117 | { | |
1118 | __radix_tree_replace(root, iter->node, slot, item, NULL, NULL); | |
1119 | } | |
1120 | ||
175542f5 MW |
1121 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
1122 | /** | |
1123 | * radix_tree_join - replace multiple entries with one multiorder entry | |
1124 | * @root: radix tree root | |
1125 | * @index: an index inside the new entry | |
1126 | * @order: order of the new entry | |
1127 | * @item: new entry | |
1128 | * | |
1129 | * Call this function to replace several entries with one larger entry. | |
1130 | * The existing entries are presumed to not need freeing as a result of | |
1131 | * this call. | |
1132 | * | |
1133 | * The replacement entry will have all the tags set on it that were set | |
1134 | * on any of the entries it is replacing. | |
1135 | */ | |
1136 | int radix_tree_join(struct radix_tree_root *root, unsigned long index, | |
1137 | unsigned order, void *item) | |
1138 | { | |
1139 | struct radix_tree_node *node; | |
1140 | void **slot; | |
1141 | int error; | |
1142 | ||
1143 | BUG_ON(radix_tree_is_internal_node(item)); | |
1144 | ||
1145 | error = __radix_tree_create(root, index, order, &node, &slot); | |
1146 | if (!error) | |
1147 | error = insert_entries(node, slot, item, order, true); | |
1148 | if (error > 0) | |
1149 | error = 0; | |
1150 | ||
1151 | return error; | |
1152 | } | |
e157b555 MW |
1153 | |
1154 | /** | |
1155 | * radix_tree_split - Split an entry into smaller entries | |
1156 | * @root: radix tree root | |
1157 | * @index: An index within the large entry | |
1158 | * @order: Order of new entries | |
1159 | * | |
1160 | * Call this function as the first step in replacing a multiorder entry | |
1161 | * with several entries of lower order. After this function returns, | |
1162 | * loop over the relevant portion of the tree using radix_tree_for_each_slot() | |
1163 | * and call radix_tree_iter_replace() to set up each new entry. | |
1164 | * | |
1165 | * The tags from this entry are replicated to all the new entries. | |
1166 | * | |
1167 | * The radix tree should be locked against modification during the entire | |
1168 | * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which | |
1169 | * should prompt RCU walkers to restart the lookup from the root. | |
1170 | */ | |
1171 | int radix_tree_split(struct radix_tree_root *root, unsigned long index, | |
1172 | unsigned order) | |
1173 | { | |
1174 | struct radix_tree_node *parent, *node, *child; | |
1175 | void **slot; | |
1176 | unsigned int offset, end; | |
1177 | unsigned n, tag, tags = 0; | |
1178 | ||
1179 | if (!__radix_tree_lookup(root, index, &parent, &slot)) | |
1180 | return -ENOENT; | |
1181 | if (!parent) | |
1182 | return -ENOENT; | |
1183 | ||
1184 | offset = get_slot_offset(parent, slot); | |
1185 | ||
1186 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1187 | if (tag_get(parent, tag, offset)) | |
1188 | tags |= 1 << tag; | |
1189 | ||
1190 | for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { | |
1191 | if (!is_sibling_entry(parent, parent->slots[end])) | |
1192 | break; | |
1193 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1194 | if (tags & (1 << tag)) | |
1195 | tag_set(parent, tag, end); | |
1196 | /* rcu_assign_pointer ensures tags are set before RETRY */ | |
1197 | rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY); | |
1198 | } | |
1199 | rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY); | |
1200 | parent->exceptional -= (end - offset); | |
1201 | ||
1202 | if (order == parent->shift) | |
1203 | return 0; | |
1204 | if (order > parent->shift) { | |
1205 | while (offset < end) | |
1206 | offset += insert_entries(parent, &parent->slots[offset], | |
1207 | RADIX_TREE_RETRY, order, true); | |
1208 | return 0; | |
1209 | } | |
1210 | ||
1211 | node = parent; | |
1212 | ||
1213 | for (;;) { | |
1214 | if (node->shift > order) { | |
e8de4340 MW |
1215 | child = radix_tree_node_alloc(root, node, |
1216 | node->shift - RADIX_TREE_MAP_SHIFT, | |
1217 | offset, 0, 0); | |
e157b555 MW |
1218 | if (!child) |
1219 | goto nomem; | |
e157b555 MW |
1220 | if (node != parent) { |
1221 | node->count++; | |
1222 | node->slots[offset] = node_to_entry(child); | |
1223 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1224 | if (tags & (1 << tag)) | |
1225 | tag_set(node, tag, offset); | |
1226 | } | |
1227 | ||
1228 | node = child; | |
1229 | offset = 0; | |
1230 | continue; | |
1231 | } | |
1232 | ||
1233 | n = insert_entries(node, &node->slots[offset], | |
1234 | RADIX_TREE_RETRY, order, false); | |
1235 | BUG_ON(n > RADIX_TREE_MAP_SIZE); | |
1236 | ||
1237 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1238 | if (tags & (1 << tag)) | |
1239 | tag_set(node, tag, offset); | |
1240 | offset += n; | |
1241 | ||
1242 | while (offset == RADIX_TREE_MAP_SIZE) { | |
1243 | if (node == parent) | |
1244 | break; | |
1245 | offset = node->offset; | |
1246 | child = node; | |
1247 | node = node->parent; | |
1248 | rcu_assign_pointer(node->slots[offset], | |
1249 | node_to_entry(child)); | |
1250 | offset++; | |
1251 | } | |
1252 | if ((node == parent) && (offset == end)) | |
1253 | return 0; | |
1254 | } | |
1255 | ||
1256 | nomem: | |
1257 | /* Shouldn't happen; did user forget to preload? */ | |
1258 | /* TODO: free all the allocated nodes */ | |
1259 | WARN_ON(1); | |
1260 | return -ENOMEM; | |
1261 | } | |
175542f5 MW |
1262 | #endif |
1263 | ||
1da177e4 LT |
1264 | /** |
1265 | * radix_tree_tag_set - set a tag on a radix tree node | |
1266 | * @root: radix tree root | |
1267 | * @index: index key | |
2fcd9005 | 1268 | * @tag: tag index |
1da177e4 | 1269 | * |
daff89f3 JC |
1270 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
1271 | * corresponding to @index in the radix tree. From | |
1da177e4 LT |
1272 | * the root all the way down to the leaf node. |
1273 | * | |
2fcd9005 | 1274 | * Returns the address of the tagged item. Setting a tag on a not-present |
1da177e4 LT |
1275 | * item is a bug. |
1276 | */ | |
1277 | void *radix_tree_tag_set(struct radix_tree_root *root, | |
daff89f3 | 1278 | unsigned long index, unsigned int tag) |
1da177e4 | 1279 | { |
fb969909 RZ |
1280 | struct radix_tree_node *node, *parent; |
1281 | unsigned long maxindex; | |
1da177e4 | 1282 | |
9e85d811 | 1283 | radix_tree_load_root(root, &node, &maxindex); |
fb969909 | 1284 | BUG_ON(index > maxindex); |
1da177e4 | 1285 | |
b194d16c | 1286 | while (radix_tree_is_internal_node(node)) { |
fb969909 | 1287 | unsigned offset; |
1da177e4 | 1288 | |
4dd6c098 | 1289 | parent = entry_to_node(node); |
9e85d811 | 1290 | offset = radix_tree_descend(parent, &node, index); |
fb969909 RZ |
1291 | BUG_ON(!node); |
1292 | ||
1293 | if (!tag_get(parent, tag, offset)) | |
1294 | tag_set(parent, tag, offset); | |
1da177e4 LT |
1295 | } |
1296 | ||
612d6c19 | 1297 | /* set the root's tag bit */ |
fb969909 | 1298 | if (!root_tag_get(root, tag)) |
612d6c19 NP |
1299 | root_tag_set(root, tag); |
1300 | ||
fb969909 | 1301 | return node; |
1da177e4 LT |
1302 | } |
1303 | EXPORT_SYMBOL(radix_tree_tag_set); | |
1304 | ||
d604c324 MW |
1305 | static void node_tag_clear(struct radix_tree_root *root, |
1306 | struct radix_tree_node *node, | |
1307 | unsigned int tag, unsigned int offset) | |
1308 | { | |
1309 | while (node) { | |
1310 | if (!tag_get(node, tag, offset)) | |
1311 | return; | |
1312 | tag_clear(node, tag, offset); | |
1313 | if (any_tag_set(node, tag)) | |
1314 | return; | |
1315 | ||
1316 | offset = node->offset; | |
1317 | node = node->parent; | |
1318 | } | |
1319 | ||
1320 | /* clear the root's tag bit */ | |
1321 | if (root_tag_get(root, tag)) | |
1322 | root_tag_clear(root, tag); | |
1323 | } | |
1324 | ||
9498d2bb MW |
1325 | static void node_tag_set(struct radix_tree_root *root, |
1326 | struct radix_tree_node *node, | |
1327 | unsigned int tag, unsigned int offset) | |
1328 | { | |
1329 | while (node) { | |
1330 | if (tag_get(node, tag, offset)) | |
1331 | return; | |
1332 | tag_set(node, tag, offset); | |
1333 | offset = node->offset; | |
1334 | node = node->parent; | |
1335 | } | |
1336 | ||
1337 | if (!root_tag_get(root, tag)) | |
1338 | root_tag_set(root, tag); | |
1339 | } | |
1340 | ||
268f42de MW |
1341 | /** |
1342 | * radix_tree_iter_tag_set - set a tag on the current iterator entry | |
1343 | * @root: radix tree root | |
1344 | * @iter: iterator state | |
1345 | * @tag: tag to set | |
1346 | */ | |
1347 | void radix_tree_iter_tag_set(struct radix_tree_root *root, | |
1348 | const struct radix_tree_iter *iter, unsigned int tag) | |
1349 | { | |
1350 | node_tag_set(root, iter->node, tag, iter_offset(iter)); | |
1351 | } | |
1352 | ||
1da177e4 LT |
1353 | /** |
1354 | * radix_tree_tag_clear - clear a tag on a radix tree node | |
1355 | * @root: radix tree root | |
1356 | * @index: index key | |
2fcd9005 | 1357 | * @tag: tag index |
1da177e4 | 1358 | * |
daff89f3 | 1359 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
2fcd9005 MW |
1360 | * corresponding to @index in the radix tree. If this causes |
1361 | * the leaf node to have no tags set then clear the tag in the | |
1da177e4 LT |
1362 | * next-to-leaf node, etc. |
1363 | * | |
1364 | * Returns the address of the tagged item on success, else NULL. ie: | |
1365 | * has the same return value and semantics as radix_tree_lookup(). | |
1366 | */ | |
1367 | void *radix_tree_tag_clear(struct radix_tree_root *root, | |
daff89f3 | 1368 | unsigned long index, unsigned int tag) |
1da177e4 | 1369 | { |
00f47b58 RZ |
1370 | struct radix_tree_node *node, *parent; |
1371 | unsigned long maxindex; | |
e2bdb933 | 1372 | int uninitialized_var(offset); |
1da177e4 | 1373 | |
9e85d811 | 1374 | radix_tree_load_root(root, &node, &maxindex); |
00f47b58 RZ |
1375 | if (index > maxindex) |
1376 | return NULL; | |
1da177e4 | 1377 | |
00f47b58 | 1378 | parent = NULL; |
1da177e4 | 1379 | |
b194d16c | 1380 | while (radix_tree_is_internal_node(node)) { |
4dd6c098 | 1381 | parent = entry_to_node(node); |
9e85d811 | 1382 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 LT |
1383 | } |
1384 | ||
d604c324 MW |
1385 | if (node) |
1386 | node_tag_clear(root, parent, tag, offset); | |
1da177e4 | 1387 | |
00f47b58 | 1388 | return node; |
1da177e4 LT |
1389 | } |
1390 | EXPORT_SYMBOL(radix_tree_tag_clear); | |
1391 | ||
1da177e4 | 1392 | /** |
32605a18 MT |
1393 | * radix_tree_tag_get - get a tag on a radix tree node |
1394 | * @root: radix tree root | |
1395 | * @index: index key | |
2fcd9005 | 1396 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 | 1397 | * |
32605a18 | 1398 | * Return values: |
1da177e4 | 1399 | * |
612d6c19 NP |
1400 | * 0: tag not present or not set |
1401 | * 1: tag set | |
ce82653d DH |
1402 | * |
1403 | * Note that the return value of this function may not be relied on, even if | |
1404 | * the RCU lock is held, unless tag modification and node deletion are excluded | |
1405 | * from concurrency. | |
1da177e4 LT |
1406 | */ |
1407 | int radix_tree_tag_get(struct radix_tree_root *root, | |
daff89f3 | 1408 | unsigned long index, unsigned int tag) |
1da177e4 | 1409 | { |
4589ba6d RZ |
1410 | struct radix_tree_node *node, *parent; |
1411 | unsigned long maxindex; | |
1da177e4 | 1412 | |
612d6c19 NP |
1413 | if (!root_tag_get(root, tag)) |
1414 | return 0; | |
1415 | ||
9e85d811 | 1416 | radix_tree_load_root(root, &node, &maxindex); |
4589ba6d RZ |
1417 | if (index > maxindex) |
1418 | return 0; | |
7cf9c2c7 NP |
1419 | if (node == NULL) |
1420 | return 0; | |
1421 | ||
b194d16c | 1422 | while (radix_tree_is_internal_node(node)) { |
9e85d811 | 1423 | unsigned offset; |
1da177e4 | 1424 | |
4dd6c098 | 1425 | parent = entry_to_node(node); |
9e85d811 | 1426 | offset = radix_tree_descend(parent, &node, index); |
1da177e4 | 1427 | |
4589ba6d | 1428 | if (!node) |
1da177e4 | 1429 | return 0; |
4589ba6d | 1430 | if (!tag_get(parent, tag, offset)) |
3fa36acb | 1431 | return 0; |
4589ba6d RZ |
1432 | if (node == RADIX_TREE_RETRY) |
1433 | break; | |
1da177e4 | 1434 | } |
4589ba6d RZ |
1435 | |
1436 | return 1; | |
1da177e4 LT |
1437 | } |
1438 | EXPORT_SYMBOL(radix_tree_tag_get); | |
1da177e4 | 1439 | |
21ef5339 RZ |
1440 | static inline void __set_iter_shift(struct radix_tree_iter *iter, |
1441 | unsigned int shift) | |
1442 | { | |
1443 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1444 | iter->shift = shift; | |
1445 | #endif | |
1446 | } | |
1447 | ||
148deab2 MW |
1448 | /* Construct iter->tags bit-mask from node->tags[tag] array */ |
1449 | static void set_iter_tags(struct radix_tree_iter *iter, | |
1450 | struct radix_tree_node *node, unsigned offset, | |
1451 | unsigned tag) | |
1452 | { | |
1453 | unsigned tag_long = offset / BITS_PER_LONG; | |
1454 | unsigned tag_bit = offset % BITS_PER_LONG; | |
1455 | ||
1456 | iter->tags = node->tags[tag][tag_long] >> tag_bit; | |
1457 | ||
1458 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ | |
1459 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { | |
1460 | /* Pick tags from next element */ | |
1461 | if (tag_bit) | |
1462 | iter->tags |= node->tags[tag][tag_long + 1] << | |
1463 | (BITS_PER_LONG - tag_bit); | |
1464 | /* Clip chunk size, here only BITS_PER_LONG tags */ | |
1465 | iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); | |
1466 | } | |
1467 | } | |
1468 | ||
1469 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | |
1470 | static void **skip_siblings(struct radix_tree_node **nodep, | |
1471 | void **slot, struct radix_tree_iter *iter) | |
1472 | { | |
1473 | void *sib = node_to_entry(slot - 1); | |
1474 | ||
1475 | while (iter->index < iter->next_index) { | |
1476 | *nodep = rcu_dereference_raw(*slot); | |
1477 | if (*nodep && *nodep != sib) | |
1478 | return slot; | |
1479 | slot++; | |
1480 | iter->index = __radix_tree_iter_add(iter, 1); | |
1481 | iter->tags >>= 1; | |
1482 | } | |
1483 | ||
1484 | *nodep = NULL; | |
1485 | return NULL; | |
1486 | } | |
1487 | ||
1488 | void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, | |
1489 | unsigned flags) | |
1490 | { | |
1491 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; | |
1492 | struct radix_tree_node *node = rcu_dereference_raw(*slot); | |
1493 | ||
1494 | slot = skip_siblings(&node, slot, iter); | |
1495 | ||
1496 | while (radix_tree_is_internal_node(node)) { | |
1497 | unsigned offset; | |
1498 | unsigned long next_index; | |
1499 | ||
1500 | if (node == RADIX_TREE_RETRY) | |
1501 | return slot; | |
1502 | node = entry_to_node(node); | |
268f42de | 1503 | iter->node = node; |
148deab2 MW |
1504 | iter->shift = node->shift; |
1505 | ||
1506 | if (flags & RADIX_TREE_ITER_TAGGED) { | |
1507 | offset = radix_tree_find_next_bit(node, tag, 0); | |
1508 | if (offset == RADIX_TREE_MAP_SIZE) | |
1509 | return NULL; | |
1510 | slot = &node->slots[offset]; | |
1511 | iter->index = __radix_tree_iter_add(iter, offset); | |
1512 | set_iter_tags(iter, node, offset, tag); | |
1513 | node = rcu_dereference_raw(*slot); | |
1514 | } else { | |
1515 | offset = 0; | |
1516 | slot = &node->slots[0]; | |
1517 | for (;;) { | |
1518 | node = rcu_dereference_raw(*slot); | |
1519 | if (node) | |
1520 | break; | |
1521 | slot++; | |
1522 | offset++; | |
1523 | if (offset == RADIX_TREE_MAP_SIZE) | |
1524 | return NULL; | |
1525 | } | |
1526 | iter->index = __radix_tree_iter_add(iter, offset); | |
1527 | } | |
1528 | if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) | |
1529 | goto none; | |
1530 | next_index = (iter->index | shift_maxindex(iter->shift)) + 1; | |
1531 | if (next_index < iter->next_index) | |
1532 | iter->next_index = next_index; | |
1533 | } | |
1534 | ||
1535 | return slot; | |
1536 | none: | |
1537 | iter->next_index = 0; | |
1538 | return NULL; | |
1539 | } | |
1540 | EXPORT_SYMBOL(__radix_tree_next_slot); | |
1541 | #else | |
1542 | static void **skip_siblings(struct radix_tree_node **nodep, | |
1543 | void **slot, struct radix_tree_iter *iter) | |
1544 | { | |
1545 | return slot; | |
1546 | } | |
1547 | #endif | |
1548 | ||
1549 | void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) | |
1550 | { | |
1551 | struct radix_tree_node *node; | |
1552 | ||
1553 | slot++; | |
1554 | iter->index = __radix_tree_iter_add(iter, 1); | |
1555 | node = rcu_dereference_raw(*slot); | |
1556 | skip_siblings(&node, slot, iter); | |
1557 | iter->next_index = iter->index; | |
1558 | iter->tags = 0; | |
1559 | return NULL; | |
1560 | } | |
1561 | EXPORT_SYMBOL(radix_tree_iter_resume); | |
1562 | ||
78c1d784 KK |
1563 | /** |
1564 | * radix_tree_next_chunk - find next chunk of slots for iteration | |
1565 | * | |
1566 | * @root: radix tree root | |
1567 | * @iter: iterator state | |
1568 | * @flags: RADIX_TREE_ITER_* flags and tag index | |
1569 | * Returns: pointer to chunk first slot, or NULL if iteration is over | |
1570 | */ | |
1571 | void **radix_tree_next_chunk(struct radix_tree_root *root, | |
1572 | struct radix_tree_iter *iter, unsigned flags) | |
1573 | { | |
9e85d811 | 1574 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
8c1244de | 1575 | struct radix_tree_node *node, *child; |
21ef5339 | 1576 | unsigned long index, offset, maxindex; |
78c1d784 KK |
1577 | |
1578 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) | |
1579 | return NULL; | |
1580 | ||
1581 | /* | |
1582 | * Catch next_index overflow after ~0UL. iter->index never overflows | |
1583 | * during iterating; it can be zero only at the beginning. | |
1584 | * And we cannot overflow iter->next_index in a single step, | |
1585 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. | |
fffaee36 KK |
1586 | * |
1587 | * This condition also used by radix_tree_next_slot() to stop | |
91b9677c | 1588 | * contiguous iterating, and forbid switching to the next chunk. |
78c1d784 KK |
1589 | */ |
1590 | index = iter->next_index; | |
1591 | if (!index && iter->index) | |
1592 | return NULL; | |
1593 | ||
21ef5339 | 1594 | restart: |
9e85d811 | 1595 | radix_tree_load_root(root, &child, &maxindex); |
21ef5339 RZ |
1596 | if (index > maxindex) |
1597 | return NULL; | |
8c1244de MW |
1598 | if (!child) |
1599 | return NULL; | |
21ef5339 | 1600 | |
8c1244de | 1601 | if (!radix_tree_is_internal_node(child)) { |
78c1d784 | 1602 | /* Single-slot tree */ |
21ef5339 RZ |
1603 | iter->index = index; |
1604 | iter->next_index = maxindex + 1; | |
78c1d784 | 1605 | iter->tags = 1; |
268f42de | 1606 | iter->node = NULL; |
8c1244de | 1607 | __set_iter_shift(iter, 0); |
78c1d784 | 1608 | return (void **)&root->rnode; |
8c1244de | 1609 | } |
21ef5339 | 1610 | |
8c1244de MW |
1611 | do { |
1612 | node = entry_to_node(child); | |
9e85d811 | 1613 | offset = radix_tree_descend(node, &child, index); |
21ef5339 | 1614 | |
78c1d784 | 1615 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
8c1244de | 1616 | !tag_get(node, tag, offset) : !child) { |
78c1d784 KK |
1617 | /* Hole detected */ |
1618 | if (flags & RADIX_TREE_ITER_CONTIG) | |
1619 | return NULL; | |
1620 | ||
1621 | if (flags & RADIX_TREE_ITER_TAGGED) | |
bc412fca | 1622 | offset = radix_tree_find_next_bit(node, tag, |
78c1d784 KK |
1623 | offset + 1); |
1624 | else | |
1625 | while (++offset < RADIX_TREE_MAP_SIZE) { | |
21ef5339 RZ |
1626 | void *slot = node->slots[offset]; |
1627 | if (is_sibling_entry(node, slot)) | |
1628 | continue; | |
1629 | if (slot) | |
78c1d784 KK |
1630 | break; |
1631 | } | |
8c1244de | 1632 | index &= ~node_maxindex(node); |
9e85d811 | 1633 | index += offset << node->shift; |
78c1d784 KK |
1634 | /* Overflow after ~0UL */ |
1635 | if (!index) | |
1636 | return NULL; | |
1637 | if (offset == RADIX_TREE_MAP_SIZE) | |
1638 | goto restart; | |
8c1244de | 1639 | child = rcu_dereference_raw(node->slots[offset]); |
78c1d784 KK |
1640 | } |
1641 | ||
e157b555 | 1642 | if (!child) |
78c1d784 | 1643 | goto restart; |
e157b555 MW |
1644 | if (child == RADIX_TREE_RETRY) |
1645 | break; | |
8c1244de | 1646 | } while (radix_tree_is_internal_node(child)); |
78c1d784 KK |
1647 | |
1648 | /* Update the iterator state */ | |
8c1244de MW |
1649 | iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); |
1650 | iter->next_index = (index | node_maxindex(node)) + 1; | |
268f42de | 1651 | iter->node = node; |
9e85d811 | 1652 | __set_iter_shift(iter, node->shift); |
78c1d784 | 1653 | |
148deab2 MW |
1654 | if (flags & RADIX_TREE_ITER_TAGGED) |
1655 | set_iter_tags(iter, node, offset, tag); | |
78c1d784 KK |
1656 | |
1657 | return node->slots + offset; | |
1658 | } | |
1659 | EXPORT_SYMBOL(radix_tree_next_chunk); | |
1660 | ||
1da177e4 LT |
1661 | /** |
1662 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree | |
1663 | * @root: radix tree root | |
1664 | * @results: where the results of the lookup are placed | |
1665 | * @first_index: start the lookup from this key | |
1666 | * @max_items: place up to this many items at *results | |
1667 | * | |
1668 | * Performs an index-ascending scan of the tree for present items. Places | |
1669 | * them at *@results and returns the number of items which were placed at | |
1670 | * *@results. | |
1671 | * | |
1672 | * The implementation is naive. | |
7cf9c2c7 NP |
1673 | * |
1674 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under | |
1675 | * rcu_read_lock. In this case, rather than the returned results being | |
2fcd9005 MW |
1676 | * an atomic snapshot of the tree at a single point in time, the |
1677 | * semantics of an RCU protected gang lookup are as though multiple | |
1678 | * radix_tree_lookups have been issued in individual locks, and results | |
1679 | * stored in 'results'. | |
1da177e4 LT |
1680 | */ |
1681 | unsigned int | |
1682 | radix_tree_gang_lookup(struct radix_tree_root *root, void **results, | |
1683 | unsigned long first_index, unsigned int max_items) | |
1684 | { | |
cebbd29e KK |
1685 | struct radix_tree_iter iter; |
1686 | void **slot; | |
1687 | unsigned int ret = 0; | |
7cf9c2c7 | 1688 | |
cebbd29e | 1689 | if (unlikely(!max_items)) |
7cf9c2c7 | 1690 | return 0; |
1da177e4 | 1691 | |
cebbd29e | 1692 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
46437f9a | 1693 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1694 | if (!results[ret]) |
1695 | continue; | |
b194d16c | 1696 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1697 | slot = radix_tree_iter_retry(&iter); |
1698 | continue; | |
1699 | } | |
cebbd29e | 1700 | if (++ret == max_items) |
1da177e4 | 1701 | break; |
1da177e4 | 1702 | } |
7cf9c2c7 | 1703 | |
1da177e4 LT |
1704 | return ret; |
1705 | } | |
1706 | EXPORT_SYMBOL(radix_tree_gang_lookup); | |
1707 | ||
47feff2c NP |
1708 | /** |
1709 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree | |
1710 | * @root: radix tree root | |
1711 | * @results: where the results of the lookup are placed | |
6328650b | 1712 | * @indices: where their indices should be placed (but usually NULL) |
47feff2c NP |
1713 | * @first_index: start the lookup from this key |
1714 | * @max_items: place up to this many items at *results | |
1715 | * | |
1716 | * Performs an index-ascending scan of the tree for present items. Places | |
1717 | * their slots at *@results and returns the number of items which were | |
1718 | * placed at *@results. | |
1719 | * | |
1720 | * The implementation is naive. | |
1721 | * | |
1722 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must | |
1723 | * be dereferenced with radix_tree_deref_slot, and if using only RCU | |
1724 | * protection, radix_tree_deref_slot may fail requiring a retry. | |
1725 | */ | |
1726 | unsigned int | |
6328650b HD |
1727 | radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
1728 | void ***results, unsigned long *indices, | |
47feff2c NP |
1729 | unsigned long first_index, unsigned int max_items) |
1730 | { | |
cebbd29e KK |
1731 | struct radix_tree_iter iter; |
1732 | void **slot; | |
1733 | unsigned int ret = 0; | |
47feff2c | 1734 | |
cebbd29e | 1735 | if (unlikely(!max_items)) |
47feff2c NP |
1736 | return 0; |
1737 | ||
cebbd29e KK |
1738 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
1739 | results[ret] = slot; | |
6328650b | 1740 | if (indices) |
cebbd29e KK |
1741 | indices[ret] = iter.index; |
1742 | if (++ret == max_items) | |
47feff2c | 1743 | break; |
47feff2c NP |
1744 | } |
1745 | ||
1746 | return ret; | |
1747 | } | |
1748 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); | |
1749 | ||
1da177e4 LT |
1750 | /** |
1751 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree | |
1752 | * based on a tag | |
1753 | * @root: radix tree root | |
1754 | * @results: where the results of the lookup are placed | |
1755 | * @first_index: start the lookup from this key | |
1756 | * @max_items: place up to this many items at *results | |
daff89f3 | 1757 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
1da177e4 LT |
1758 | * |
1759 | * Performs an index-ascending scan of the tree for present items which | |
1760 | * have the tag indexed by @tag set. Places the items at *@results and | |
1761 | * returns the number of items which were placed at *@results. | |
1762 | */ | |
1763 | unsigned int | |
1764 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, | |
daff89f3 JC |
1765 | unsigned long first_index, unsigned int max_items, |
1766 | unsigned int tag) | |
1da177e4 | 1767 | { |
cebbd29e KK |
1768 | struct radix_tree_iter iter; |
1769 | void **slot; | |
1770 | unsigned int ret = 0; | |
612d6c19 | 1771 | |
cebbd29e | 1772 | if (unlikely(!max_items)) |
7cf9c2c7 NP |
1773 | return 0; |
1774 | ||
cebbd29e | 1775 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
46437f9a | 1776 | results[ret] = rcu_dereference_raw(*slot); |
cebbd29e KK |
1777 | if (!results[ret]) |
1778 | continue; | |
b194d16c | 1779 | if (radix_tree_is_internal_node(results[ret])) { |
46437f9a MW |
1780 | slot = radix_tree_iter_retry(&iter); |
1781 | continue; | |
1782 | } | |
cebbd29e | 1783 | if (++ret == max_items) |
1da177e4 | 1784 | break; |
1da177e4 | 1785 | } |
7cf9c2c7 | 1786 | |
1da177e4 LT |
1787 | return ret; |
1788 | } | |
1789 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); | |
1790 | ||
47feff2c NP |
1791 | /** |
1792 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a | |
1793 | * radix tree based on a tag | |
1794 | * @root: radix tree root | |
1795 | * @results: where the results of the lookup are placed | |
1796 | * @first_index: start the lookup from this key | |
1797 | * @max_items: place up to this many items at *results | |
1798 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) | |
1799 | * | |
1800 | * Performs an index-ascending scan of the tree for present items which | |
1801 | * have the tag indexed by @tag set. Places the slots at *@results and | |
1802 | * returns the number of slots which were placed at *@results. | |
1803 | */ | |
1804 | unsigned int | |
1805 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | |
1806 | unsigned long first_index, unsigned int max_items, | |
1807 | unsigned int tag) | |
1808 | { | |
cebbd29e KK |
1809 | struct radix_tree_iter iter; |
1810 | void **slot; | |
1811 | unsigned int ret = 0; | |
47feff2c | 1812 | |
cebbd29e | 1813 | if (unlikely(!max_items)) |
47feff2c NP |
1814 | return 0; |
1815 | ||
cebbd29e KK |
1816 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
1817 | results[ret] = slot; | |
1818 | if (++ret == max_items) | |
47feff2c | 1819 | break; |
47feff2c NP |
1820 | } |
1821 | ||
1822 | return ret; | |
1823 | } | |
1824 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |
1825 | ||
139e5616 JW |
1826 | /** |
1827 | * __radix_tree_delete_node - try to free node after clearing a slot | |
1828 | * @root: radix tree root | |
139e5616 | 1829 | * @node: node containing @index |
ea07b862 JW |
1830 | * @update_node: callback for changing leaf nodes |
1831 | * @private: private data to pass to @update_node | |
139e5616 JW |
1832 | * |
1833 | * After clearing the slot at @index in @node from radix tree | |
1834 | * rooted at @root, call this function to attempt freeing the | |
1835 | * node and shrinking the tree. | |
139e5616 | 1836 | */ |
14b46879 | 1837 | void __radix_tree_delete_node(struct radix_tree_root *root, |
ea07b862 JW |
1838 | struct radix_tree_node *node, |
1839 | radix_tree_update_node_t update_node, | |
1840 | void *private) | |
139e5616 | 1841 | { |
ea07b862 | 1842 | delete_node(root, node, update_node, private); |
139e5616 JW |
1843 | } |
1844 | ||
1da177e4 | 1845 | /** |
53c59f26 | 1846 | * radix_tree_delete_item - delete an item from a radix tree |
1da177e4 LT |
1847 | * @root: radix tree root |
1848 | * @index: index key | |
53c59f26 | 1849 | * @item: expected item |
1da177e4 | 1850 | * |
53c59f26 | 1851 | * Remove @item at @index from the radix tree rooted at @root. |
1da177e4 | 1852 | * |
53c59f26 JW |
1853 | * Returns the address of the deleted item, or NULL if it was not present |
1854 | * or the entry at the given @index was not @item. | |
1da177e4 | 1855 | */ |
53c59f26 JW |
1856 | void *radix_tree_delete_item(struct radix_tree_root *root, |
1857 | unsigned long index, void *item) | |
1da177e4 | 1858 | { |
139e5616 | 1859 | struct radix_tree_node *node; |
57578c2e | 1860 | unsigned int offset; |
139e5616 JW |
1861 | void **slot; |
1862 | void *entry; | |
d5274261 | 1863 | int tag; |
1da177e4 | 1864 | |
139e5616 JW |
1865 | entry = __radix_tree_lookup(root, index, &node, &slot); |
1866 | if (!entry) | |
1867 | return NULL; | |
1da177e4 | 1868 | |
139e5616 JW |
1869 | if (item && entry != item) |
1870 | return NULL; | |
1871 | ||
1872 | if (!node) { | |
612d6c19 NP |
1873 | root_tag_clear_all(root); |
1874 | root->rnode = NULL; | |
139e5616 | 1875 | return entry; |
612d6c19 | 1876 | } |
1da177e4 | 1877 | |
29e0967c | 1878 | offset = get_slot_offset(node, slot); |
53c59f26 | 1879 | |
d604c324 MW |
1880 | /* Clear all tags associated with the item to be deleted. */ |
1881 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1882 | node_tag_clear(root, node, tag, offset); | |
1da177e4 | 1883 | |
4d693d08 | 1884 | __radix_tree_replace(root, node, slot, NULL, NULL, NULL); |
612d6c19 | 1885 | |
139e5616 | 1886 | return entry; |
1da177e4 | 1887 | } |
53c59f26 JW |
1888 | EXPORT_SYMBOL(radix_tree_delete_item); |
1889 | ||
1890 | /** | |
1891 | * radix_tree_delete - delete an item from a radix tree | |
1892 | * @root: radix tree root | |
1893 | * @index: index key | |
1894 | * | |
1895 | * Remove the item at @index from the radix tree rooted at @root. | |
1896 | * | |
1897 | * Returns the address of the deleted item, or NULL if it was not present. | |
1898 | */ | |
1899 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) | |
1900 | { | |
1901 | return radix_tree_delete_item(root, index, NULL); | |
1902 | } | |
1da177e4 LT |
1903 | EXPORT_SYMBOL(radix_tree_delete); |
1904 | ||
d3798ae8 JW |
1905 | void radix_tree_clear_tags(struct radix_tree_root *root, |
1906 | struct radix_tree_node *node, | |
1907 | void **slot) | |
d604c324 | 1908 | { |
d604c324 MW |
1909 | if (node) { |
1910 | unsigned int tag, offset = get_slot_offset(node, slot); | |
1911 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) | |
1912 | node_tag_clear(root, node, tag, offset); | |
1913 | } else { | |
1914 | /* Clear root node tags */ | |
1915 | root->gfp_mask &= __GFP_BITS_MASK; | |
1916 | } | |
d604c324 MW |
1917 | } |
1918 | ||
1da177e4 LT |
1919 | /** |
1920 | * radix_tree_tagged - test whether any items in the tree are tagged | |
1921 | * @root: radix tree root | |
1922 | * @tag: tag to test | |
1923 | */ | |
daff89f3 | 1924 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) |
1da177e4 | 1925 | { |
612d6c19 | 1926 | return root_tag_get(root, tag); |
1da177e4 LT |
1927 | } |
1928 | EXPORT_SYMBOL(radix_tree_tagged); | |
1929 | ||
1930 | static void | |
449dd698 | 1931 | radix_tree_node_ctor(void *arg) |
1da177e4 | 1932 | { |
449dd698 JW |
1933 | struct radix_tree_node *node = arg; |
1934 | ||
1935 | memset(node, 0, sizeof(*node)); | |
1936 | INIT_LIST_HEAD(&node->private_list); | |
1da177e4 LT |
1937 | } |
1938 | ||
c78c66d1 KS |
1939 | static __init unsigned long __maxindex(unsigned int height) |
1940 | { | |
1941 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; | |
1942 | int shift = RADIX_TREE_INDEX_BITS - width; | |
1943 | ||
1944 | if (shift < 0) | |
1945 | return ~0UL; | |
1946 | if (shift >= BITS_PER_LONG) | |
1947 | return 0UL; | |
1948 | return ~0UL >> shift; | |
1949 | } | |
1950 | ||
1951 | static __init void radix_tree_init_maxnodes(void) | |
1952 | { | |
1953 | unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; | |
1954 | unsigned int i, j; | |
1955 | ||
1956 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) | |
1957 | height_to_maxindex[i] = __maxindex(i); | |
1958 | for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { | |
1959 | for (j = i; j > 0; j--) | |
1960 | height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; | |
1961 | } | |
1962 | } | |
1963 | ||
d544abd5 | 1964 | static int radix_tree_cpu_dead(unsigned int cpu) |
1da177e4 | 1965 | { |
2fcd9005 MW |
1966 | struct radix_tree_preload *rtp; |
1967 | struct radix_tree_node *node; | |
1968 | ||
1969 | /* Free per-cpu pool of preloaded nodes */ | |
d544abd5 SAS |
1970 | rtp = &per_cpu(radix_tree_preloads, cpu); |
1971 | while (rtp->nr) { | |
1972 | node = rtp->nodes; | |
1973 | rtp->nodes = node->private_data; | |
1974 | kmem_cache_free(radix_tree_node_cachep, node); | |
1975 | rtp->nr--; | |
2fcd9005 | 1976 | } |
d544abd5 | 1977 | return 0; |
1da177e4 | 1978 | } |
1da177e4 LT |
1979 | |
1980 | void __init radix_tree_init(void) | |
1981 | { | |
d544abd5 | 1982 | int ret; |
1da177e4 LT |
1983 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
1984 | sizeof(struct radix_tree_node), 0, | |
488514d1 CL |
1985 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1986 | radix_tree_node_ctor); | |
c78c66d1 | 1987 | radix_tree_init_maxnodes(); |
d544abd5 SAS |
1988 | ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", |
1989 | NULL, radix_tree_cpu_dead); | |
1990 | WARN_ON(ret < 0); | |
1da177e4 | 1991 | } |