]>
Commit | Line | Data |
---|---|---|
f8d5d0cc MW |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * XArray implementation | |
4 | * Copyright (c) 2017 Microsoft Corporation | |
5 | * Author: Matthew Wilcox <[email protected]> | |
6 | */ | |
7 | ||
9b89a035 | 8 | #include <linux/bitmap.h> |
f8d5d0cc | 9 | #include <linux/export.h> |
58d6ea30 MW |
10 | #include <linux/list.h> |
11 | #include <linux/slab.h> | |
f8d5d0cc MW |
12 | #include <linux/xarray.h> |
13 | ||
14 | /* | |
15 | * Coding conventions in this file: | |
16 | * | |
17 | * @xa is used to refer to the entire xarray. | |
18 | * @xas is the 'xarray operation state'. It may be either a pointer to | |
19 | * an xa_state, or an xa_state stored on the stack. This is an unfortunate | |
20 | * ambiguity. | |
21 | * @index is the index of the entry being operated on | |
22 | * @mark is an xa_mark_t; a small number indicating one of the mark bits. | |
23 | * @node refers to an xa_node; usually the primary one being operated on by | |
24 | * this function. | |
25 | * @offset is the index into the slots array inside an xa_node. | |
26 | * @parent refers to the @xa_node closer to the head than @node. | |
27 | * @entry refers to something stored in a slot in the xarray | |
28 | */ | |
29 | ||
58d6ea30 MW |
30 | static inline unsigned int xa_lock_type(const struct xarray *xa) |
31 | { | |
32 | return (__force unsigned int)xa->xa_flags & 3; | |
33 | } | |
34 | ||
35 | static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) | |
36 | { | |
37 | if (lock_type == XA_LOCK_IRQ) | |
38 | xas_lock_irq(xas); | |
39 | else if (lock_type == XA_LOCK_BH) | |
40 | xas_lock_bh(xas); | |
41 | else | |
42 | xas_lock(xas); | |
43 | } | |
44 | ||
45 | static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) | |
46 | { | |
47 | if (lock_type == XA_LOCK_IRQ) | |
48 | xas_unlock_irq(xas); | |
49 | else if (lock_type == XA_LOCK_BH) | |
50 | xas_unlock_bh(xas); | |
51 | else | |
52 | xas_unlock(xas); | |
53 | } | |
54 | ||
371c752d MW |
55 | static inline bool xa_track_free(const struct xarray *xa) |
56 | { | |
57 | return xa->xa_flags & XA_FLAGS_TRACK_FREE; | |
58 | } | |
59 | ||
9b89a035 MW |
60 | static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) |
61 | { | |
62 | if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) | |
63 | xa->xa_flags |= XA_FLAGS_MARK(mark); | |
64 | } | |
65 | ||
66 | static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) | |
67 | { | |
68 | if (xa->xa_flags & XA_FLAGS_MARK(mark)) | |
69 | xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); | |
70 | } | |
71 | ||
72 | static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) | |
73 | { | |
74 | return node->marks[(__force unsigned)mark]; | |
75 | } | |
76 | ||
77 | static inline bool node_get_mark(struct xa_node *node, | |
78 | unsigned int offset, xa_mark_t mark) | |
79 | { | |
80 | return test_bit(offset, node_marks(node, mark)); | |
81 | } | |
82 | ||
83 | /* returns true if the bit was set */ | |
84 | static inline bool node_set_mark(struct xa_node *node, unsigned int offset, | |
85 | xa_mark_t mark) | |
86 | { | |
87 | return __test_and_set_bit(offset, node_marks(node, mark)); | |
88 | } | |
89 | ||
90 | /* returns true if the bit was set */ | |
91 | static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, | |
92 | xa_mark_t mark) | |
93 | { | |
94 | return __test_and_clear_bit(offset, node_marks(node, mark)); | |
95 | } | |
96 | ||
97 | static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) | |
98 | { | |
99 | return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); | |
100 | } | |
101 | ||
371c752d MW |
102 | static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) |
103 | { | |
104 | bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); | |
105 | } | |
106 | ||
58d6ea30 MW |
107 | #define mark_inc(mark) do { \ |
108 | mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ | |
109 | } while (0) | |
110 | ||
111 | /* | |
112 | * xas_squash_marks() - Merge all marks to the first entry | |
113 | * @xas: Array operation state. | |
114 | * | |
115 | * Set a mark on the first entry if any entry has it set. Clear marks on | |
116 | * all sibling entries. | |
117 | */ | |
118 | static void xas_squash_marks(const struct xa_state *xas) | |
119 | { | |
120 | unsigned int mark = 0; | |
121 | unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; | |
122 | ||
123 | if (!xas->xa_sibs) | |
124 | return; | |
125 | ||
126 | do { | |
127 | unsigned long *marks = xas->xa_node->marks[mark]; | |
128 | if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) | |
129 | continue; | |
130 | __set_bit(xas->xa_offset, marks); | |
131 | bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); | |
132 | } while (mark++ != (__force unsigned)XA_MARK_MAX); | |
133 | } | |
134 | ||
ad3d6c72 MW |
135 | /* extracts the offset within this node from the index */ |
136 | static unsigned int get_offset(unsigned long index, struct xa_node *node) | |
137 | { | |
138 | return (index >> node->shift) & XA_CHUNK_MASK; | |
139 | } | |
140 | ||
b803b428 MW |
141 | static void xas_set_offset(struct xa_state *xas) |
142 | { | |
143 | xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); | |
144 | } | |
145 | ||
ad3d6c72 MW |
146 | /* move the index either forwards (find) or backwards (sibling slot) */ |
147 | static void xas_move_index(struct xa_state *xas, unsigned long offset) | |
148 | { | |
149 | unsigned int shift = xas->xa_node->shift; | |
150 | xas->xa_index &= ~XA_CHUNK_MASK << shift; | |
151 | xas->xa_index += offset << shift; | |
152 | } | |
153 | ||
b803b428 MW |
154 | static void xas_advance(struct xa_state *xas) |
155 | { | |
156 | xas->xa_offset++; | |
157 | xas_move_index(xas, xas->xa_offset); | |
158 | } | |
159 | ||
ad3d6c72 MW |
160 | static void *set_bounds(struct xa_state *xas) |
161 | { | |
162 | xas->xa_node = XAS_BOUNDS; | |
163 | return NULL; | |
164 | } | |
165 | ||
166 | /* | |
167 | * Starts a walk. If the @xas is already valid, we assume that it's on | |
168 | * the right path and just return where we've got to. If we're in an | |
169 | * error state, return NULL. If the index is outside the current scope | |
170 | * of the xarray, return NULL without changing @xas->xa_node. Otherwise | |
171 | * set @xas->xa_node to NULL and return the current head of the array. | |
172 | */ | |
173 | static void *xas_start(struct xa_state *xas) | |
174 | { | |
175 | void *entry; | |
176 | ||
177 | if (xas_valid(xas)) | |
178 | return xas_reload(xas); | |
179 | if (xas_error(xas)) | |
180 | return NULL; | |
181 | ||
182 | entry = xa_head(xas->xa); | |
183 | if (!xa_is_node(entry)) { | |
184 | if (xas->xa_index) | |
185 | return set_bounds(xas); | |
186 | } else { | |
187 | if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) | |
188 | return set_bounds(xas); | |
189 | } | |
190 | ||
191 | xas->xa_node = NULL; | |
192 | return entry; | |
193 | } | |
194 | ||
195 | static void *xas_descend(struct xa_state *xas, struct xa_node *node) | |
196 | { | |
197 | unsigned int offset = get_offset(xas->xa_index, node); | |
198 | void *entry = xa_entry(xas->xa, node, offset); | |
199 | ||
200 | xas->xa_node = node; | |
201 | if (xa_is_sibling(entry)) { | |
202 | offset = xa_to_sibling(entry); | |
203 | entry = xa_entry(xas->xa, node, offset); | |
204 | } | |
205 | ||
206 | xas->xa_offset = offset; | |
207 | return entry; | |
208 | } | |
209 | ||
210 | /** | |
211 | * xas_load() - Load an entry from the XArray (advanced). | |
212 | * @xas: XArray operation state. | |
213 | * | |
214 | * Usually walks the @xas to the appropriate state to load the entry | |
215 | * stored at xa_index. However, it will do nothing and return %NULL if | |
216 | * @xas is in an error state. xas_load() will never expand the tree. | |
217 | * | |
218 | * If the xa_state is set up to operate on a multi-index entry, xas_load() | |
219 | * may return %NULL or an internal entry, even if there are entries | |
220 | * present within the range specified by @xas. | |
221 | * | |
222 | * Context: Any context. The caller should hold the xa_lock or the RCU lock. | |
223 | * Return: Usually an entry in the XArray, but see description for exceptions. | |
224 | */ | |
225 | void *xas_load(struct xa_state *xas) | |
226 | { | |
227 | void *entry = xas_start(xas); | |
228 | ||
229 | while (xa_is_node(entry)) { | |
230 | struct xa_node *node = xa_to_node(entry); | |
231 | ||
232 | if (xas->xa_shift > node->shift) | |
233 | break; | |
234 | entry = xas_descend(xas, node); | |
235 | } | |
236 | return entry; | |
237 | } | |
238 | EXPORT_SYMBOL_GPL(xas_load); | |
239 | ||
58d6ea30 MW |
240 | /* Move the radix tree node cache here */ |
241 | extern struct kmem_cache *radix_tree_node_cachep; | |
242 | extern void radix_tree_node_rcu_free(struct rcu_head *head); | |
243 | ||
244 | #define XA_RCU_FREE ((struct xarray *)1) | |
245 | ||
246 | static void xa_node_free(struct xa_node *node) | |
247 | { | |
248 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
249 | node->array = XA_RCU_FREE; | |
250 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); | |
251 | } | |
252 | ||
253 | /* | |
254 | * xas_destroy() - Free any resources allocated during the XArray operation. | |
255 | * @xas: XArray operation state. | |
256 | * | |
257 | * This function is now internal-only. | |
258 | */ | |
259 | static void xas_destroy(struct xa_state *xas) | |
260 | { | |
261 | struct xa_node *node = xas->xa_alloc; | |
262 | ||
263 | if (!node) | |
264 | return; | |
265 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
266 | kmem_cache_free(radix_tree_node_cachep, node); | |
267 | xas->xa_alloc = NULL; | |
268 | } | |
269 | ||
270 | /** | |
271 | * xas_nomem() - Allocate memory if needed. | |
272 | * @xas: XArray operation state. | |
273 | * @gfp: Memory allocation flags. | |
274 | * | |
275 | * If we need to add new nodes to the XArray, we try to allocate memory | |
276 | * with GFP_NOWAIT while holding the lock, which will usually succeed. | |
277 | * If it fails, @xas is flagged as needing memory to continue. The caller | |
278 | * should drop the lock and call xas_nomem(). If xas_nomem() succeeds, | |
279 | * the caller should retry the operation. | |
280 | * | |
281 | * Forward progress is guaranteed as one node is allocated here and | |
282 | * stored in the xa_state where it will be found by xas_alloc(). More | |
283 | * nodes will likely be found in the slab allocator, but we do not tie | |
284 | * them up here. | |
285 | * | |
286 | * Return: true if memory was needed, and was successfully allocated. | |
287 | */ | |
288 | bool xas_nomem(struct xa_state *xas, gfp_t gfp) | |
289 | { | |
290 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { | |
291 | xas_destroy(xas); | |
292 | return false; | |
293 | } | |
294 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | |
295 | if (!xas->xa_alloc) | |
296 | return false; | |
297 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); | |
298 | xas->xa_node = XAS_RESTART; | |
299 | return true; | |
300 | } | |
301 | EXPORT_SYMBOL_GPL(xas_nomem); | |
302 | ||
303 | /* | |
304 | * __xas_nomem() - Drop locks and allocate memory if needed. | |
305 | * @xas: XArray operation state. | |
306 | * @gfp: Memory allocation flags. | |
307 | * | |
308 | * Internal variant of xas_nomem(). | |
309 | * | |
310 | * Return: true if memory was needed, and was successfully allocated. | |
311 | */ | |
312 | static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) | |
313 | __must_hold(xas->xa->xa_lock) | |
314 | { | |
315 | unsigned int lock_type = xa_lock_type(xas->xa); | |
316 | ||
317 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { | |
318 | xas_destroy(xas); | |
319 | return false; | |
320 | } | |
321 | if (gfpflags_allow_blocking(gfp)) { | |
322 | xas_unlock_type(xas, lock_type); | |
323 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | |
324 | xas_lock_type(xas, lock_type); | |
325 | } else { | |
326 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); | |
327 | } | |
328 | if (!xas->xa_alloc) | |
329 | return false; | |
330 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); | |
331 | xas->xa_node = XAS_RESTART; | |
332 | return true; | |
333 | } | |
334 | ||
335 | static void xas_update(struct xa_state *xas, struct xa_node *node) | |
336 | { | |
337 | if (xas->xa_update) | |
338 | xas->xa_update(node); | |
339 | else | |
340 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
341 | } | |
342 | ||
343 | static void *xas_alloc(struct xa_state *xas, unsigned int shift) | |
344 | { | |
345 | struct xa_node *parent = xas->xa_node; | |
346 | struct xa_node *node = xas->xa_alloc; | |
347 | ||
348 | if (xas_invalid(xas)) | |
349 | return NULL; | |
350 | ||
351 | if (node) { | |
352 | xas->xa_alloc = NULL; | |
353 | } else { | |
354 | node = kmem_cache_alloc(radix_tree_node_cachep, | |
355 | GFP_NOWAIT | __GFP_NOWARN); | |
356 | if (!node) { | |
357 | xas_set_err(xas, -ENOMEM); | |
358 | return NULL; | |
359 | } | |
360 | } | |
361 | ||
362 | if (parent) { | |
363 | node->offset = xas->xa_offset; | |
364 | parent->count++; | |
365 | XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); | |
366 | xas_update(xas, parent); | |
367 | } | |
368 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); | |
369 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); | |
370 | node->shift = shift; | |
371 | node->count = 0; | |
372 | node->nr_values = 0; | |
373 | RCU_INIT_POINTER(node->parent, xas->xa_node); | |
374 | node->array = xas->xa; | |
375 | ||
376 | return node; | |
377 | } | |
378 | ||
0e9446c3 MW |
379 | #ifdef CONFIG_XARRAY_MULTI |
380 | /* Returns the number of indices covered by a given xa_state */ | |
381 | static unsigned long xas_size(const struct xa_state *xas) | |
382 | { | |
383 | return (xas->xa_sibs + 1UL) << xas->xa_shift; | |
384 | } | |
385 | #endif | |
386 | ||
58d6ea30 MW |
387 | /* |
388 | * Use this to calculate the maximum index that will need to be created | |
389 | * in order to add the entry described by @xas. Because we cannot store a | |
390 | * multiple-index entry at index 0, the calculation is a little more complex | |
391 | * than you might expect. | |
392 | */ | |
393 | static unsigned long xas_max(struct xa_state *xas) | |
394 | { | |
395 | unsigned long max = xas->xa_index; | |
396 | ||
397 | #ifdef CONFIG_XARRAY_MULTI | |
398 | if (xas->xa_shift || xas->xa_sibs) { | |
0e9446c3 | 399 | unsigned long mask = xas_size(xas) - 1; |
58d6ea30 MW |
400 | max |= mask; |
401 | if (mask == max) | |
402 | max++; | |
403 | } | |
404 | #endif | |
405 | ||
406 | return max; | |
407 | } | |
408 | ||
409 | /* The maximum index that can be contained in the array without expanding it */ | |
410 | static unsigned long max_index(void *entry) | |
411 | { | |
412 | if (!xa_is_node(entry)) | |
413 | return 0; | |
414 | return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; | |
415 | } | |
416 | ||
417 | static void xas_shrink(struct xa_state *xas) | |
418 | { | |
419 | struct xarray *xa = xas->xa; | |
420 | struct xa_node *node = xas->xa_node; | |
421 | ||
422 | for (;;) { | |
423 | void *entry; | |
424 | ||
425 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | |
426 | if (node->count != 1) | |
427 | break; | |
428 | entry = xa_entry_locked(xa, node, 0); | |
429 | if (!entry) | |
430 | break; | |
431 | if (!xa_is_node(entry) && node->shift) | |
432 | break; | |
433 | xas->xa_node = XAS_BOUNDS; | |
434 | ||
435 | RCU_INIT_POINTER(xa->xa_head, entry); | |
371c752d MW |
436 | if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) |
437 | xa_mark_clear(xa, XA_FREE_MARK); | |
58d6ea30 MW |
438 | |
439 | node->count = 0; | |
440 | node->nr_values = 0; | |
441 | if (!xa_is_node(entry)) | |
442 | RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); | |
443 | xas_update(xas, node); | |
444 | xa_node_free(node); | |
445 | if (!xa_is_node(entry)) | |
446 | break; | |
447 | node = xa_to_node(entry); | |
448 | node->parent = NULL; | |
449 | } | |
450 | } | |
451 | ||
452 | /* | |
453 | * xas_delete_node() - Attempt to delete an xa_node | |
454 | * @xas: Array operation state. | |
455 | * | |
456 | * Attempts to delete the @xas->xa_node. This will fail if xa->node has | |
457 | * a non-zero reference count. | |
458 | */ | |
459 | static void xas_delete_node(struct xa_state *xas) | |
460 | { | |
461 | struct xa_node *node = xas->xa_node; | |
462 | ||
463 | for (;;) { | |
464 | struct xa_node *parent; | |
465 | ||
466 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | |
467 | if (node->count) | |
468 | break; | |
469 | ||
470 | parent = xa_parent_locked(xas->xa, node); | |
471 | xas->xa_node = parent; | |
472 | xas->xa_offset = node->offset; | |
473 | xa_node_free(node); | |
474 | ||
475 | if (!parent) { | |
476 | xas->xa->xa_head = NULL; | |
477 | xas->xa_node = XAS_BOUNDS; | |
478 | return; | |
479 | } | |
480 | ||
481 | parent->slots[xas->xa_offset] = NULL; | |
482 | parent->count--; | |
483 | XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); | |
484 | node = parent; | |
485 | xas_update(xas, node); | |
486 | } | |
487 | ||
488 | if (!node->parent) | |
489 | xas_shrink(xas); | |
490 | } | |
491 | ||
492 | /** | |
493 | * xas_free_nodes() - Free this node and all nodes that it references | |
494 | * @xas: Array operation state. | |
495 | * @top: Node to free | |
496 | * | |
497 | * This node has been removed from the tree. We must now free it and all | |
498 | * of its subnodes. There may be RCU walkers with references into the tree, | |
499 | * so we must replace all entries with retry markers. | |
500 | */ | |
501 | static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) | |
502 | { | |
503 | unsigned int offset = 0; | |
504 | struct xa_node *node = top; | |
505 | ||
506 | for (;;) { | |
507 | void *entry = xa_entry_locked(xas->xa, node, offset); | |
508 | ||
509 | if (xa_is_node(entry)) { | |
510 | node = xa_to_node(entry); | |
511 | offset = 0; | |
512 | continue; | |
513 | } | |
514 | if (entry) | |
515 | RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); | |
516 | offset++; | |
517 | while (offset == XA_CHUNK_SIZE) { | |
518 | struct xa_node *parent; | |
519 | ||
520 | parent = xa_parent_locked(xas->xa, node); | |
521 | offset = node->offset + 1; | |
522 | node->count = 0; | |
523 | node->nr_values = 0; | |
524 | xas_update(xas, node); | |
525 | xa_node_free(node); | |
526 | if (node == top) | |
527 | return; | |
528 | node = parent; | |
529 | } | |
530 | } | |
531 | } | |
532 | ||
533 | /* | |
534 | * xas_expand adds nodes to the head of the tree until it has reached | |
535 | * sufficient height to be able to contain @xas->xa_index | |
536 | */ | |
537 | static int xas_expand(struct xa_state *xas, void *head) | |
538 | { | |
539 | struct xarray *xa = xas->xa; | |
540 | struct xa_node *node = NULL; | |
541 | unsigned int shift = 0; | |
542 | unsigned long max = xas_max(xas); | |
543 | ||
544 | if (!head) { | |
545 | if (max == 0) | |
546 | return 0; | |
547 | while ((max >> shift) >= XA_CHUNK_SIZE) | |
548 | shift += XA_CHUNK_SHIFT; | |
549 | return shift + XA_CHUNK_SHIFT; | |
550 | } else if (xa_is_node(head)) { | |
551 | node = xa_to_node(head); | |
552 | shift = node->shift + XA_CHUNK_SHIFT; | |
553 | } | |
554 | xas->xa_node = NULL; | |
555 | ||
556 | while (max > max_index(head)) { | |
557 | xa_mark_t mark = 0; | |
558 | ||
559 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); | |
560 | node = xas_alloc(xas, shift); | |
561 | if (!node) | |
562 | return -ENOMEM; | |
563 | ||
564 | node->count = 1; | |
565 | if (xa_is_value(head)) | |
566 | node->nr_values = 1; | |
567 | RCU_INIT_POINTER(node->slots[0], head); | |
568 | ||
569 | /* Propagate the aggregated mark info to the new child */ | |
570 | for (;;) { | |
371c752d MW |
571 | if (xa_track_free(xa) && mark == XA_FREE_MARK) { |
572 | node_mark_all(node, XA_FREE_MARK); | |
573 | if (!xa_marked(xa, XA_FREE_MARK)) { | |
574 | node_clear_mark(node, 0, XA_FREE_MARK); | |
575 | xa_mark_set(xa, XA_FREE_MARK); | |
576 | } | |
577 | } else if (xa_marked(xa, mark)) { | |
58d6ea30 | 578 | node_set_mark(node, 0, mark); |
371c752d | 579 | } |
58d6ea30 MW |
580 | if (mark == XA_MARK_MAX) |
581 | break; | |
582 | mark_inc(mark); | |
583 | } | |
584 | ||
585 | /* | |
586 | * Now that the new node is fully initialised, we can add | |
587 | * it to the tree | |
588 | */ | |
589 | if (xa_is_node(head)) { | |
590 | xa_to_node(head)->offset = 0; | |
591 | rcu_assign_pointer(xa_to_node(head)->parent, node); | |
592 | } | |
593 | head = xa_mk_node(node); | |
594 | rcu_assign_pointer(xa->xa_head, head); | |
595 | xas_update(xas, node); | |
596 | ||
597 | shift += XA_CHUNK_SHIFT; | |
598 | } | |
599 | ||
600 | xas->xa_node = node; | |
601 | return shift; | |
602 | } | |
603 | ||
604 | /* | |
605 | * xas_create() - Create a slot to store an entry in. | |
606 | * @xas: XArray operation state. | |
607 | * | |
608 | * Most users will not need to call this function directly, as it is called | |
609 | * by xas_store(). It is useful for doing conditional store operations | |
610 | * (see the xa_cmpxchg() implementation for an example). | |
611 | * | |
612 | * Return: If the slot already existed, returns the contents of this slot. | |
804dfaf0 MW |
613 | * If the slot was newly created, returns %NULL. If it failed to create the |
614 | * slot, returns %NULL and indicates the error in @xas. | |
58d6ea30 MW |
615 | */ |
616 | static void *xas_create(struct xa_state *xas) | |
617 | { | |
618 | struct xarray *xa = xas->xa; | |
619 | void *entry; | |
620 | void __rcu **slot; | |
621 | struct xa_node *node = xas->xa_node; | |
622 | int shift; | |
623 | unsigned int order = xas->xa_shift; | |
624 | ||
625 | if (xas_top(node)) { | |
626 | entry = xa_head_locked(xa); | |
627 | xas->xa_node = NULL; | |
628 | shift = xas_expand(xas, entry); | |
629 | if (shift < 0) | |
630 | return NULL; | |
631 | entry = xa_head_locked(xa); | |
632 | slot = &xa->xa_head; | |
633 | } else if (xas_error(xas)) { | |
634 | return NULL; | |
635 | } else if (node) { | |
636 | unsigned int offset = xas->xa_offset; | |
637 | ||
638 | shift = node->shift; | |
639 | entry = xa_entry_locked(xa, node, offset); | |
640 | slot = &node->slots[offset]; | |
641 | } else { | |
642 | shift = 0; | |
643 | entry = xa_head_locked(xa); | |
644 | slot = &xa->xa_head; | |
645 | } | |
646 | ||
647 | while (shift > order) { | |
648 | shift -= XA_CHUNK_SHIFT; | |
649 | if (!entry) { | |
650 | node = xas_alloc(xas, shift); | |
651 | if (!node) | |
652 | break; | |
371c752d MW |
653 | if (xa_track_free(xa)) |
654 | node_mark_all(node, XA_FREE_MARK); | |
58d6ea30 MW |
655 | rcu_assign_pointer(*slot, xa_mk_node(node)); |
656 | } else if (xa_is_node(entry)) { | |
657 | node = xa_to_node(entry); | |
658 | } else { | |
659 | break; | |
660 | } | |
661 | entry = xas_descend(xas, node); | |
662 | slot = &node->slots[xas->xa_offset]; | |
663 | } | |
664 | ||
665 | return entry; | |
666 | } | |
667 | ||
2264f513 MW |
668 | /** |
669 | * xas_create_range() - Ensure that stores to this range will succeed | |
670 | * @xas: XArray operation state. | |
671 | * | |
672 | * Creates all of the slots in the range covered by @xas. Sets @xas to | |
673 | * create single-index entries and positions it at the beginning of the | |
674 | * range. This is for the benefit of users which have not yet been | |
675 | * converted to use multi-index entries. | |
676 | */ | |
677 | void xas_create_range(struct xa_state *xas) | |
678 | { | |
679 | unsigned long index = xas->xa_index; | |
680 | unsigned char shift = xas->xa_shift; | |
681 | unsigned char sibs = xas->xa_sibs; | |
682 | ||
683 | xas->xa_index |= ((sibs + 1) << shift) - 1; | |
684 | if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) | |
685 | xas->xa_offset |= sibs; | |
686 | xas->xa_shift = 0; | |
687 | xas->xa_sibs = 0; | |
688 | ||
689 | for (;;) { | |
690 | xas_create(xas); | |
691 | if (xas_error(xas)) | |
692 | goto restore; | |
693 | if (xas->xa_index <= (index | XA_CHUNK_MASK)) | |
694 | goto success; | |
695 | xas->xa_index -= XA_CHUNK_SIZE; | |
696 | ||
697 | for (;;) { | |
698 | struct xa_node *node = xas->xa_node; | |
699 | xas->xa_node = xa_parent_locked(xas->xa, node); | |
700 | xas->xa_offset = node->offset - 1; | |
701 | if (node->offset != 0) | |
702 | break; | |
703 | } | |
704 | } | |
705 | ||
706 | restore: | |
707 | xas->xa_shift = shift; | |
708 | xas->xa_sibs = sibs; | |
709 | xas->xa_index = index; | |
710 | return; | |
711 | success: | |
712 | xas->xa_index = index; | |
713 | if (xas->xa_node) | |
714 | xas_set_offset(xas); | |
715 | } | |
716 | EXPORT_SYMBOL_GPL(xas_create_range); | |
717 | ||
58d6ea30 MW |
718 | static void update_node(struct xa_state *xas, struct xa_node *node, |
719 | int count, int values) | |
720 | { | |
721 | if (!node || (!count && !values)) | |
722 | return; | |
723 | ||
724 | node->count += count; | |
725 | node->nr_values += values; | |
726 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); | |
727 | XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); | |
728 | xas_update(xas, node); | |
729 | if (count < 0) | |
730 | xas_delete_node(xas); | |
731 | } | |
732 | ||
733 | /** | |
734 | * xas_store() - Store this entry in the XArray. | |
735 | * @xas: XArray operation state. | |
736 | * @entry: New entry. | |
737 | * | |
738 | * If @xas is operating on a multi-index entry, the entry returned by this | |
739 | * function is essentially meaningless (it may be an internal entry or it | |
740 | * may be %NULL, even if there are non-NULL entries at some of the indices | |
741 | * covered by the range). This is not a problem for any current users, | |
742 | * and can be changed if needed. | |
743 | * | |
744 | * Return: The old entry at this index. | |
745 | */ | |
746 | void *xas_store(struct xa_state *xas, void *entry) | |
747 | { | |
748 | struct xa_node *node; | |
749 | void __rcu **slot = &xas->xa->xa_head; | |
750 | unsigned int offset, max; | |
751 | int count = 0; | |
752 | int values = 0; | |
753 | void *first, *next; | |
754 | bool value = xa_is_value(entry); | |
755 | ||
756 | if (entry) | |
757 | first = xas_create(xas); | |
758 | else | |
759 | first = xas_load(xas); | |
760 | ||
761 | if (xas_invalid(xas)) | |
762 | return first; | |
763 | node = xas->xa_node; | |
764 | if (node && (xas->xa_shift < node->shift)) | |
765 | xas->xa_sibs = 0; | |
766 | if ((first == entry) && !xas->xa_sibs) | |
767 | return first; | |
768 | ||
769 | next = first; | |
770 | offset = xas->xa_offset; | |
771 | max = xas->xa_offset + xas->xa_sibs; | |
772 | if (node) { | |
773 | slot = &node->slots[offset]; | |
774 | if (xas->xa_sibs) | |
775 | xas_squash_marks(xas); | |
776 | } | |
777 | if (!entry) | |
778 | xas_init_marks(xas); | |
779 | ||
780 | for (;;) { | |
781 | /* | |
782 | * Must clear the marks before setting the entry to NULL, | |
783 | * otherwise xas_for_each_marked may find a NULL entry and | |
784 | * stop early. rcu_assign_pointer contains a release barrier | |
785 | * so the mark clearing will appear to happen before the | |
786 | * entry is set to NULL. | |
787 | */ | |
788 | rcu_assign_pointer(*slot, entry); | |
789 | if (xa_is_node(next)) | |
790 | xas_free_nodes(xas, xa_to_node(next)); | |
791 | if (!node) | |
792 | break; | |
793 | count += !next - !entry; | |
794 | values += !xa_is_value(first) - !value; | |
795 | if (entry) { | |
796 | if (offset == max) | |
797 | break; | |
798 | if (!xa_is_sibling(entry)) | |
799 | entry = xa_mk_sibling(xas->xa_offset); | |
800 | } else { | |
801 | if (offset == XA_CHUNK_MASK) | |
802 | break; | |
803 | } | |
804 | next = xa_entry_locked(xas->xa, node, ++offset); | |
805 | if (!xa_is_sibling(next)) { | |
806 | if (!entry && (offset > max)) | |
807 | break; | |
808 | first = next; | |
809 | } | |
810 | slot++; | |
811 | } | |
812 | ||
813 | update_node(xas, node, count, values); | |
814 | return first; | |
815 | } | |
816 | EXPORT_SYMBOL_GPL(xas_store); | |
817 | ||
9b89a035 MW |
818 | /** |
819 | * xas_get_mark() - Returns the state of this mark. | |
820 | * @xas: XArray operation state. | |
821 | * @mark: Mark number. | |
822 | * | |
823 | * Return: true if the mark is set, false if the mark is clear or @xas | |
824 | * is in an error state. | |
825 | */ | |
826 | bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) | |
827 | { | |
828 | if (xas_invalid(xas)) | |
829 | return false; | |
830 | if (!xas->xa_node) | |
831 | return xa_marked(xas->xa, mark); | |
832 | return node_get_mark(xas->xa_node, xas->xa_offset, mark); | |
833 | } | |
834 | EXPORT_SYMBOL_GPL(xas_get_mark); | |
835 | ||
836 | /** | |
837 | * xas_set_mark() - Sets the mark on this entry and its parents. | |
838 | * @xas: XArray operation state. | |
839 | * @mark: Mark number. | |
840 | * | |
841 | * Sets the specified mark on this entry, and walks up the tree setting it | |
842 | * on all the ancestor entries. Does nothing if @xas has not been walked to | |
843 | * an entry, or is in an error state. | |
844 | */ | |
845 | void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) | |
846 | { | |
847 | struct xa_node *node = xas->xa_node; | |
848 | unsigned int offset = xas->xa_offset; | |
849 | ||
850 | if (xas_invalid(xas)) | |
851 | return; | |
852 | ||
853 | while (node) { | |
854 | if (node_set_mark(node, offset, mark)) | |
855 | return; | |
856 | offset = node->offset; | |
857 | node = xa_parent_locked(xas->xa, node); | |
858 | } | |
859 | ||
860 | if (!xa_marked(xas->xa, mark)) | |
861 | xa_mark_set(xas->xa, mark); | |
862 | } | |
863 | EXPORT_SYMBOL_GPL(xas_set_mark); | |
864 | ||
865 | /** | |
866 | * xas_clear_mark() - Clears the mark on this entry and its parents. | |
867 | * @xas: XArray operation state. | |
868 | * @mark: Mark number. | |
869 | * | |
870 | * Clears the specified mark on this entry, and walks back to the head | |
871 | * attempting to clear it on all the ancestor entries. Does nothing if | |
872 | * @xas has not been walked to an entry, or is in an error state. | |
873 | */ | |
874 | void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) | |
875 | { | |
876 | struct xa_node *node = xas->xa_node; | |
877 | unsigned int offset = xas->xa_offset; | |
878 | ||
879 | if (xas_invalid(xas)) | |
880 | return; | |
881 | ||
882 | while (node) { | |
883 | if (!node_clear_mark(node, offset, mark)) | |
884 | return; | |
885 | if (node_any_mark(node, mark)) | |
886 | return; | |
887 | ||
888 | offset = node->offset; | |
889 | node = xa_parent_locked(xas->xa, node); | |
890 | } | |
891 | ||
892 | if (xa_marked(xas->xa, mark)) | |
893 | xa_mark_clear(xas->xa, mark); | |
894 | } | |
895 | EXPORT_SYMBOL_GPL(xas_clear_mark); | |
896 | ||
58d6ea30 MW |
897 | /** |
898 | * xas_init_marks() - Initialise all marks for the entry | |
899 | * @xas: Array operations state. | |
900 | * | |
901 | * Initialise all marks for the entry specified by @xas. If we're tracking | |
902 | * free entries with a mark, we need to set it on all entries. All other | |
903 | * marks are cleared. | |
904 | * | |
905 | * This implementation is not as efficient as it could be; we may walk | |
906 | * up the tree multiple times. | |
907 | */ | |
908 | void xas_init_marks(const struct xa_state *xas) | |
909 | { | |
910 | xa_mark_t mark = 0; | |
911 | ||
912 | for (;;) { | |
371c752d MW |
913 | if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) |
914 | xas_set_mark(xas, mark); | |
915 | else | |
916 | xas_clear_mark(xas, mark); | |
58d6ea30 MW |
917 | if (mark == XA_MARK_MAX) |
918 | break; | |
919 | mark_inc(mark); | |
920 | } | |
921 | } | |
922 | EXPORT_SYMBOL_GPL(xas_init_marks); | |
923 | ||
b803b428 MW |
924 | /** |
925 | * xas_pause() - Pause a walk to drop a lock. | |
926 | * @xas: XArray operation state. | |
927 | * | |
928 | * Some users need to pause a walk and drop the lock they're holding in | |
929 | * order to yield to a higher priority thread or carry out an operation | |
930 | * on an entry. Those users should call this function before they drop | |
931 | * the lock. It resets the @xas to be suitable for the next iteration | |
932 | * of the loop after the user has reacquired the lock. If most entries | |
933 | * found during a walk require you to call xas_pause(), the xa_for_each() | |
934 | * iterator may be more appropriate. | |
935 | * | |
936 | * Note that xas_pause() only works for forward iteration. If a user needs | |
937 | * to pause a reverse iteration, we will need a xas_pause_rev(). | |
938 | */ | |
939 | void xas_pause(struct xa_state *xas) | |
940 | { | |
941 | struct xa_node *node = xas->xa_node; | |
942 | ||
943 | if (xas_invalid(xas)) | |
944 | return; | |
945 | ||
946 | if (node) { | |
947 | unsigned int offset = xas->xa_offset; | |
948 | while (++offset < XA_CHUNK_SIZE) { | |
949 | if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) | |
950 | break; | |
951 | } | |
952 | xas->xa_index += (offset - xas->xa_offset) << node->shift; | |
953 | } else { | |
954 | xas->xa_index++; | |
955 | } | |
956 | xas->xa_node = XAS_RESTART; | |
957 | } | |
958 | EXPORT_SYMBOL_GPL(xas_pause); | |
959 | ||
64d3e9a9 MW |
960 | /* |
961 | * __xas_prev() - Find the previous entry in the XArray. | |
962 | * @xas: XArray operation state. | |
963 | * | |
964 | * Helper function for xas_prev() which handles all the complex cases | |
965 | * out of line. | |
966 | */ | |
967 | void *__xas_prev(struct xa_state *xas) | |
968 | { | |
969 | void *entry; | |
970 | ||
971 | if (!xas_frozen(xas->xa_node)) | |
972 | xas->xa_index--; | |
973 | if (xas_not_node(xas->xa_node)) | |
974 | return xas_load(xas); | |
975 | ||
976 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) | |
977 | xas->xa_offset--; | |
978 | ||
979 | while (xas->xa_offset == 255) { | |
980 | xas->xa_offset = xas->xa_node->offset - 1; | |
981 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
982 | if (!xas->xa_node) | |
983 | return set_bounds(xas); | |
984 | } | |
985 | ||
986 | for (;;) { | |
987 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
988 | if (!xa_is_node(entry)) | |
989 | return entry; | |
990 | ||
991 | xas->xa_node = xa_to_node(entry); | |
992 | xas_set_offset(xas); | |
993 | } | |
994 | } | |
995 | EXPORT_SYMBOL_GPL(__xas_prev); | |
996 | ||
997 | /* | |
998 | * __xas_next() - Find the next entry in the XArray. | |
999 | * @xas: XArray operation state. | |
1000 | * | |
1001 | * Helper function for xas_next() which handles all the complex cases | |
1002 | * out of line. | |
1003 | */ | |
1004 | void *__xas_next(struct xa_state *xas) | |
1005 | { | |
1006 | void *entry; | |
1007 | ||
1008 | if (!xas_frozen(xas->xa_node)) | |
1009 | xas->xa_index++; | |
1010 | if (xas_not_node(xas->xa_node)) | |
1011 | return xas_load(xas); | |
1012 | ||
1013 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) | |
1014 | xas->xa_offset++; | |
1015 | ||
1016 | while (xas->xa_offset == XA_CHUNK_SIZE) { | |
1017 | xas->xa_offset = xas->xa_node->offset + 1; | |
1018 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
1019 | if (!xas->xa_node) | |
1020 | return set_bounds(xas); | |
1021 | } | |
1022 | ||
1023 | for (;;) { | |
1024 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1025 | if (!xa_is_node(entry)) | |
1026 | return entry; | |
1027 | ||
1028 | xas->xa_node = xa_to_node(entry); | |
1029 | xas_set_offset(xas); | |
1030 | } | |
1031 | } | |
1032 | EXPORT_SYMBOL_GPL(__xas_next); | |
1033 | ||
b803b428 MW |
1034 | /** |
1035 | * xas_find() - Find the next present entry in the XArray. | |
1036 | * @xas: XArray operation state. | |
1037 | * @max: Highest index to return. | |
1038 | * | |
1039 | * If the @xas has not yet been walked to an entry, return the entry | |
1040 | * which has an index >= xas.xa_index. If it has been walked, the entry | |
1041 | * currently being pointed at has been processed, and so we move to the | |
1042 | * next entry. | |
1043 | * | |
1044 | * If no entry is found and the array is smaller than @max, the iterator | |
1045 | * is set to the smallest index not yet in the array. This allows @xas | |
1046 | * to be immediately passed to xas_store(). | |
1047 | * | |
1048 | * Return: The entry, if found, otherwise %NULL. | |
1049 | */ | |
1050 | void *xas_find(struct xa_state *xas, unsigned long max) | |
1051 | { | |
1052 | void *entry; | |
1053 | ||
1054 | if (xas_error(xas)) | |
1055 | return NULL; | |
1056 | ||
1057 | if (!xas->xa_node) { | |
1058 | xas->xa_index = 1; | |
1059 | return set_bounds(xas); | |
1060 | } else if (xas_top(xas->xa_node)) { | |
1061 | entry = xas_load(xas); | |
1062 | if (entry || xas_not_node(xas->xa_node)) | |
1063 | return entry; | |
1064 | } else if (!xas->xa_node->shift && | |
1065 | xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { | |
1066 | xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; | |
1067 | } | |
1068 | ||
1069 | xas_advance(xas); | |
1070 | ||
1071 | while (xas->xa_node && (xas->xa_index <= max)) { | |
1072 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { | |
1073 | xas->xa_offset = xas->xa_node->offset + 1; | |
1074 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
1075 | continue; | |
1076 | } | |
1077 | ||
1078 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1079 | if (xa_is_node(entry)) { | |
1080 | xas->xa_node = xa_to_node(entry); | |
1081 | xas->xa_offset = 0; | |
1082 | continue; | |
1083 | } | |
1084 | if (entry && !xa_is_sibling(entry)) | |
1085 | return entry; | |
1086 | ||
1087 | xas_advance(xas); | |
1088 | } | |
1089 | ||
1090 | if (!xas->xa_node) | |
1091 | xas->xa_node = XAS_BOUNDS; | |
1092 | return NULL; | |
1093 | } | |
1094 | EXPORT_SYMBOL_GPL(xas_find); | |
1095 | ||
1096 | /** | |
1097 | * xas_find_marked() - Find the next marked entry in the XArray. | |
1098 | * @xas: XArray operation state. | |
1099 | * @max: Highest index to return. | |
1100 | * @mark: Mark number to search for. | |
1101 | * | |
1102 | * If the @xas has not yet been walked to an entry, return the marked entry | |
1103 | * which has an index >= xas.xa_index. If it has been walked, the entry | |
1104 | * currently being pointed at has been processed, and so we return the | |
1105 | * first marked entry with an index > xas.xa_index. | |
1106 | * | |
1107 | * If no marked entry is found and the array is smaller than @max, @xas is | |
1108 | * set to the bounds state and xas->xa_index is set to the smallest index | |
1109 | * not yet in the array. This allows @xas to be immediately passed to | |
1110 | * xas_store(). | |
1111 | * | |
1112 | * If no entry is found before @max is reached, @xas is set to the restart | |
1113 | * state. | |
1114 | * | |
1115 | * Return: The entry, if found, otherwise %NULL. | |
1116 | */ | |
1117 | void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) | |
1118 | { | |
1119 | bool advance = true; | |
1120 | unsigned int offset; | |
1121 | void *entry; | |
1122 | ||
1123 | if (xas_error(xas)) | |
1124 | return NULL; | |
1125 | ||
1126 | if (!xas->xa_node) { | |
1127 | xas->xa_index = 1; | |
1128 | goto out; | |
1129 | } else if (xas_top(xas->xa_node)) { | |
1130 | advance = false; | |
1131 | entry = xa_head(xas->xa); | |
1132 | xas->xa_node = NULL; | |
1133 | if (xas->xa_index > max_index(entry)) | |
1134 | goto bounds; | |
1135 | if (!xa_is_node(entry)) { | |
1136 | if (xa_marked(xas->xa, mark)) | |
1137 | return entry; | |
1138 | xas->xa_index = 1; | |
1139 | goto out; | |
1140 | } | |
1141 | xas->xa_node = xa_to_node(entry); | |
1142 | xas->xa_offset = xas->xa_index >> xas->xa_node->shift; | |
1143 | } | |
1144 | ||
1145 | while (xas->xa_index <= max) { | |
1146 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { | |
1147 | xas->xa_offset = xas->xa_node->offset + 1; | |
1148 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); | |
1149 | if (!xas->xa_node) | |
1150 | break; | |
1151 | advance = false; | |
1152 | continue; | |
1153 | } | |
1154 | ||
1155 | if (!advance) { | |
1156 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1157 | if (xa_is_sibling(entry)) { | |
1158 | xas->xa_offset = xa_to_sibling(entry); | |
1159 | xas_move_index(xas, xas->xa_offset); | |
1160 | } | |
1161 | } | |
1162 | ||
1163 | offset = xas_find_chunk(xas, advance, mark); | |
1164 | if (offset > xas->xa_offset) { | |
1165 | advance = false; | |
1166 | xas_move_index(xas, offset); | |
1167 | /* Mind the wrap */ | |
1168 | if ((xas->xa_index - 1) >= max) | |
1169 | goto max; | |
1170 | xas->xa_offset = offset; | |
1171 | if (offset == XA_CHUNK_SIZE) | |
1172 | continue; | |
1173 | } | |
1174 | ||
1175 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); | |
1176 | if (!xa_is_node(entry)) | |
1177 | return entry; | |
1178 | xas->xa_node = xa_to_node(entry); | |
1179 | xas_set_offset(xas); | |
1180 | } | |
1181 | ||
1182 | out: | |
1183 | if (!max) | |
1184 | goto max; | |
1185 | bounds: | |
1186 | xas->xa_node = XAS_BOUNDS; | |
1187 | return NULL; | |
1188 | max: | |
1189 | xas->xa_node = XAS_RESTART; | |
1190 | return NULL; | |
1191 | } | |
1192 | EXPORT_SYMBOL_GPL(xas_find_marked); | |
1193 | ||
4e99d4e9 MW |
1194 | /** |
1195 | * xas_find_conflict() - Find the next present entry in a range. | |
1196 | * @xas: XArray operation state. | |
1197 | * | |
1198 | * The @xas describes both a range and a position within that range. | |
1199 | * | |
1200 | * Context: Any context. Expects xa_lock to be held. | |
1201 | * Return: The next entry in the range covered by @xas or %NULL. | |
1202 | */ | |
1203 | void *xas_find_conflict(struct xa_state *xas) | |
1204 | { | |
1205 | void *curr; | |
1206 | ||
1207 | if (xas_error(xas)) | |
1208 | return NULL; | |
1209 | ||
1210 | if (!xas->xa_node) | |
1211 | return NULL; | |
1212 | ||
1213 | if (xas_top(xas->xa_node)) { | |
1214 | curr = xas_start(xas); | |
1215 | if (!curr) | |
1216 | return NULL; | |
1217 | while (xa_is_node(curr)) { | |
1218 | struct xa_node *node = xa_to_node(curr); | |
1219 | curr = xas_descend(xas, node); | |
1220 | } | |
1221 | if (curr) | |
1222 | return curr; | |
1223 | } | |
1224 | ||
1225 | if (xas->xa_node->shift > xas->xa_shift) | |
1226 | return NULL; | |
1227 | ||
1228 | for (;;) { | |
1229 | if (xas->xa_node->shift == xas->xa_shift) { | |
1230 | if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) | |
1231 | break; | |
1232 | } else if (xas->xa_offset == XA_CHUNK_MASK) { | |
1233 | xas->xa_offset = xas->xa_node->offset; | |
1234 | xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); | |
1235 | if (!xas->xa_node) | |
1236 | break; | |
1237 | continue; | |
1238 | } | |
1239 | curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); | |
1240 | if (xa_is_sibling(curr)) | |
1241 | continue; | |
1242 | while (xa_is_node(curr)) { | |
1243 | xas->xa_node = xa_to_node(curr); | |
1244 | xas->xa_offset = 0; | |
1245 | curr = xa_entry_locked(xas->xa, xas->xa_node, 0); | |
1246 | } | |
1247 | if (curr) | |
1248 | return curr; | |
1249 | } | |
1250 | xas->xa_offset -= xas->xa_sibs; | |
1251 | return NULL; | |
1252 | } | |
1253 | EXPORT_SYMBOL_GPL(xas_find_conflict); | |
1254 | ||
f8d5d0cc MW |
1255 | /** |
1256 | * xa_init_flags() - Initialise an empty XArray with flags. | |
1257 | * @xa: XArray. | |
1258 | * @flags: XA_FLAG values. | |
1259 | * | |
1260 | * If you need to initialise an XArray with special flags (eg you need | |
1261 | * to take the lock from interrupt context), use this function instead | |
1262 | * of xa_init(). | |
1263 | * | |
1264 | * Context: Any context. | |
1265 | */ | |
1266 | void xa_init_flags(struct xarray *xa, gfp_t flags) | |
1267 | { | |
58d6ea30 MW |
1268 | unsigned int lock_type; |
1269 | static struct lock_class_key xa_lock_irq; | |
1270 | static struct lock_class_key xa_lock_bh; | |
1271 | ||
f8d5d0cc MW |
1272 | spin_lock_init(&xa->xa_lock); |
1273 | xa->xa_flags = flags; | |
1274 | xa->xa_head = NULL; | |
58d6ea30 MW |
1275 | |
1276 | lock_type = xa_lock_type(xa); | |
1277 | if (lock_type == XA_LOCK_IRQ) | |
1278 | lockdep_set_class(&xa->xa_lock, &xa_lock_irq); | |
1279 | else if (lock_type == XA_LOCK_BH) | |
1280 | lockdep_set_class(&xa->xa_lock, &xa_lock_bh); | |
f8d5d0cc MW |
1281 | } |
1282 | EXPORT_SYMBOL(xa_init_flags); | |
ad3d6c72 MW |
1283 | |
1284 | /** | |
1285 | * xa_load() - Load an entry from an XArray. | |
1286 | * @xa: XArray. | |
1287 | * @index: index into array. | |
1288 | * | |
1289 | * Context: Any context. Takes and releases the RCU lock. | |
1290 | * Return: The entry at @index in @xa. | |
1291 | */ | |
1292 | void *xa_load(struct xarray *xa, unsigned long index) | |
1293 | { | |
1294 | XA_STATE(xas, xa, index); | |
1295 | void *entry; | |
1296 | ||
1297 | rcu_read_lock(); | |
1298 | do { | |
1299 | entry = xas_load(&xas); | |
9f14d4f1 MW |
1300 | if (xa_is_zero(entry)) |
1301 | entry = NULL; | |
ad3d6c72 MW |
1302 | } while (xas_retry(&xas, entry)); |
1303 | rcu_read_unlock(); | |
1304 | ||
1305 | return entry; | |
1306 | } | |
1307 | EXPORT_SYMBOL(xa_load); | |
1308 | ||
58d6ea30 MW |
1309 | static void *xas_result(struct xa_state *xas, void *curr) |
1310 | { | |
9f14d4f1 MW |
1311 | if (xa_is_zero(curr)) |
1312 | return NULL; | |
58d6ea30 MW |
1313 | XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr)); |
1314 | if (xas_error(xas)) | |
1315 | curr = xas->xa_node; | |
1316 | return curr; | |
1317 | } | |
1318 | ||
1319 | /** | |
1320 | * __xa_erase() - Erase this entry from the XArray while locked. | |
1321 | * @xa: XArray. | |
1322 | * @index: Index into array. | |
1323 | * | |
1324 | * If the entry at this index is a multi-index entry then all indices will | |
1325 | * be erased, and the entry will no longer be a multi-index entry. | |
1326 | * This function expects the xa_lock to be held on entry. | |
1327 | * | |
1328 | * Context: Any context. Expects xa_lock to be held on entry. May | |
1329 | * release and reacquire xa_lock if @gfp flags permit. | |
1330 | * Return: The old entry at this index. | |
1331 | */ | |
1332 | void *__xa_erase(struct xarray *xa, unsigned long index) | |
1333 | { | |
1334 | XA_STATE(xas, xa, index); | |
1335 | return xas_result(&xas, xas_store(&xas, NULL)); | |
1336 | } | |
9ee5a3b7 | 1337 | EXPORT_SYMBOL(__xa_erase); |
58d6ea30 | 1338 | |
9c16bb88 MW |
1339 | /** |
1340 | * xa_erase() - Erase this entry from the XArray. | |
1341 | * @xa: XArray. | |
1342 | * @index: Index of entry. | |
1343 | * | |
1344 | * This function is the equivalent of calling xa_store() with %NULL as | |
1345 | * the third argument. The XArray does not need to allocate memory, so | |
1346 | * the user does not need to provide GFP flags. | |
1347 | * | |
1348 | * Context: Any context. Takes and releases the xa_lock. | |
1349 | * Return: The entry which used to be at this index. | |
1350 | */ | |
1351 | void *xa_erase(struct xarray *xa, unsigned long index) | |
1352 | { | |
1353 | void *entry; | |
1354 | ||
1355 | xa_lock(xa); | |
1356 | entry = __xa_erase(xa, index); | |
1357 | xa_unlock(xa); | |
1358 | ||
1359 | return entry; | |
1360 | } | |
1361 | EXPORT_SYMBOL(xa_erase); | |
1362 | ||
58d6ea30 | 1363 | /** |
611f3186 | 1364 | * __xa_store() - Store this entry in the XArray. |
58d6ea30 MW |
1365 | * @xa: XArray. |
1366 | * @index: Index into array. | |
1367 | * @entry: New entry. | |
1368 | * @gfp: Memory allocation flags. | |
1369 | * | |
611f3186 MW |
1370 | * You must already be holding the xa_lock when calling this function. |
1371 | * It will drop the lock if needed to allocate memory, and then reacquire | |
1372 | * it afterwards. | |
58d6ea30 | 1373 | * |
611f3186 MW |
1374 | * Context: Any context. Expects xa_lock to be held on entry. May |
1375 | * release and reacquire xa_lock if @gfp flags permit. | |
1376 | * Return: The old entry at this index or xa_err() if an error happened. | |
58d6ea30 | 1377 | */ |
611f3186 | 1378 | void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) |
58d6ea30 MW |
1379 | { |
1380 | XA_STATE(xas, xa, index); | |
1381 | void *curr; | |
1382 | ||
1383 | if (WARN_ON_ONCE(xa_is_internal(entry))) | |
1384 | return XA_ERROR(-EINVAL); | |
d9c48043 MW |
1385 | if (xa_track_free(xa) && !entry) |
1386 | entry = XA_ZERO_ENTRY; | |
58d6ea30 MW |
1387 | |
1388 | do { | |
58d6ea30 | 1389 | curr = xas_store(&xas, entry); |
d9c48043 | 1390 | if (xa_track_free(xa)) |
371c752d | 1391 | xas_clear_mark(&xas, XA_FREE_MARK); |
611f3186 | 1392 | } while (__xas_nomem(&xas, gfp)); |
58d6ea30 MW |
1393 | |
1394 | return xas_result(&xas, curr); | |
1395 | } | |
611f3186 | 1396 | EXPORT_SYMBOL(__xa_store); |
58d6ea30 MW |
1397 | |
1398 | /** | |
611f3186 | 1399 | * xa_store() - Store this entry in the XArray. |
58d6ea30 MW |
1400 | * @xa: XArray. |
1401 | * @index: Index into array. | |
1402 | * @entry: New entry. | |
1403 | * @gfp: Memory allocation flags. | |
1404 | * | |
611f3186 MW |
1405 | * After this function returns, loads from this index will return @entry. |
1406 | * Storing into an existing multislot entry updates the entry of every index. | |
1407 | * The marks associated with @index are unaffected unless @entry is %NULL. | |
58d6ea30 | 1408 | * |
611f3186 MW |
1409 | * Context: Any context. Takes and releases the xa_lock. |
1410 | * May sleep if the @gfp flags permit. | |
1411 | * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry | |
1412 | * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation | |
1413 | * failed. | |
58d6ea30 | 1414 | */ |
611f3186 | 1415 | void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) |
58d6ea30 | 1416 | { |
58d6ea30 MW |
1417 | void *curr; |
1418 | ||
611f3186 MW |
1419 | xa_lock(xa); |
1420 | curr = __xa_store(xa, index, entry, gfp); | |
1421 | xa_unlock(xa); | |
58d6ea30 | 1422 | |
611f3186 | 1423 | return curr; |
58d6ea30 | 1424 | } |
611f3186 | 1425 | EXPORT_SYMBOL(xa_store); |
58d6ea30 | 1426 | |
41aec91f MW |
1427 | /** |
1428 | * __xa_cmpxchg() - Store this entry in the XArray. | |
1429 | * @xa: XArray. | |
1430 | * @index: Index into array. | |
1431 | * @old: Old value to test against. | |
1432 | * @entry: New entry. | |
1433 | * @gfp: Memory allocation flags. | |
1434 | * | |
1435 | * You must already be holding the xa_lock when calling this function. | |
1436 | * It will drop the lock if needed to allocate memory, and then reacquire | |
1437 | * it afterwards. | |
1438 | * | |
1439 | * Context: Any context. Expects xa_lock to be held on entry. May | |
1440 | * release and reacquire xa_lock if @gfp flags permit. | |
1441 | * Return: The old entry at this index or xa_err() if an error happened. | |
1442 | */ | |
1443 | void *__xa_cmpxchg(struct xarray *xa, unsigned long index, | |
1444 | void *old, void *entry, gfp_t gfp) | |
1445 | { | |
1446 | XA_STATE(xas, xa, index); | |
1447 | void *curr; | |
1448 | ||
1449 | if (WARN_ON_ONCE(xa_is_internal(entry))) | |
1450 | return XA_ERROR(-EINVAL); | |
d9c48043 MW |
1451 | if (xa_track_free(xa) && !entry) |
1452 | entry = XA_ZERO_ENTRY; | |
41aec91f MW |
1453 | |
1454 | do { | |
1455 | curr = xas_load(&xas); | |
9f14d4f1 MW |
1456 | if (curr == XA_ZERO_ENTRY) |
1457 | curr = NULL; | |
371c752d | 1458 | if (curr == old) { |
41aec91f | 1459 | xas_store(&xas, entry); |
d9c48043 | 1460 | if (xa_track_free(xa)) |
371c752d MW |
1461 | xas_clear_mark(&xas, XA_FREE_MARK); |
1462 | } | |
41aec91f MW |
1463 | } while (__xas_nomem(&xas, gfp)); |
1464 | ||
1465 | return xas_result(&xas, curr); | |
1466 | } | |
1467 | EXPORT_SYMBOL(__xa_cmpxchg); | |
1468 | ||
9f14d4f1 | 1469 | /** |
4c0608f4 | 1470 | * __xa_reserve() - Reserve this index in the XArray. |
9f14d4f1 MW |
1471 | * @xa: XArray. |
1472 | * @index: Index into array. | |
1473 | * @gfp: Memory allocation flags. | |
1474 | * | |
1475 | * Ensures there is somewhere to store an entry at @index in the array. | |
1476 | * If there is already something stored at @index, this function does | |
1477 | * nothing. If there was nothing there, the entry is marked as reserved. | |
4c0608f4 | 1478 | * Loading from a reserved entry returns a %NULL pointer. |
9f14d4f1 MW |
1479 | * |
1480 | * If you do not use the entry that you have reserved, call xa_release() | |
1481 | * or xa_erase() to free any unnecessary memory. | |
1482 | * | |
4c0608f4 MW |
1483 | * Context: Any context. Expects the xa_lock to be held on entry. May |
1484 | * release the lock, sleep and reacquire the lock if the @gfp flags permit. | |
9f14d4f1 MW |
1485 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. |
1486 | */ | |
4c0608f4 | 1487 | int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) |
9f14d4f1 MW |
1488 | { |
1489 | XA_STATE(xas, xa, index); | |
9f14d4f1 MW |
1490 | void *curr; |
1491 | ||
1492 | do { | |
9f14d4f1 | 1493 | curr = xas_load(&xas); |
d9c48043 | 1494 | if (!curr) { |
9f14d4f1 | 1495 | xas_store(&xas, XA_ZERO_ENTRY); |
d9c48043 MW |
1496 | if (xa_track_free(xa)) |
1497 | xas_clear_mark(&xas, XA_FREE_MARK); | |
1498 | } | |
4c0608f4 | 1499 | } while (__xas_nomem(&xas, gfp)); |
9f14d4f1 MW |
1500 | |
1501 | return xas_error(&xas); | |
1502 | } | |
4c0608f4 | 1503 | EXPORT_SYMBOL(__xa_reserve); |
9f14d4f1 | 1504 | |
0e9446c3 MW |
1505 | #ifdef CONFIG_XARRAY_MULTI |
1506 | static void xas_set_range(struct xa_state *xas, unsigned long first, | |
1507 | unsigned long last) | |
1508 | { | |
1509 | unsigned int shift = 0; | |
1510 | unsigned long sibs = last - first; | |
1511 | unsigned int offset = XA_CHUNK_MASK; | |
1512 | ||
1513 | xas_set(xas, first); | |
1514 | ||
1515 | while ((first & XA_CHUNK_MASK) == 0) { | |
1516 | if (sibs < XA_CHUNK_MASK) | |
1517 | break; | |
1518 | if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) | |
1519 | break; | |
1520 | shift += XA_CHUNK_SHIFT; | |
1521 | if (offset == XA_CHUNK_MASK) | |
1522 | offset = sibs & XA_CHUNK_MASK; | |
1523 | sibs >>= XA_CHUNK_SHIFT; | |
1524 | first >>= XA_CHUNK_SHIFT; | |
1525 | } | |
1526 | ||
1527 | offset = first & XA_CHUNK_MASK; | |
1528 | if (offset + sibs > XA_CHUNK_MASK) | |
1529 | sibs = XA_CHUNK_MASK - offset; | |
1530 | if ((((first + sibs + 1) << shift) - 1) > last) | |
1531 | sibs -= 1; | |
1532 | ||
1533 | xas->xa_shift = shift; | |
1534 | xas->xa_sibs = sibs; | |
1535 | } | |
1536 | ||
1537 | /** | |
1538 | * xa_store_range() - Store this entry at a range of indices in the XArray. | |
1539 | * @xa: XArray. | |
1540 | * @first: First index to affect. | |
1541 | * @last: Last index to affect. | |
1542 | * @entry: New entry. | |
1543 | * @gfp: Memory allocation flags. | |
1544 | * | |
1545 | * After this function returns, loads from any index between @first and @last, | |
1546 | * inclusive will return @entry. | |
1547 | * Storing into an existing multislot entry updates the entry of every index. | |
1548 | * The marks associated with @index are unaffected unless @entry is %NULL. | |
1549 | * | |
1550 | * Context: Process context. Takes and releases the xa_lock. May sleep | |
1551 | * if the @gfp flags permit. | |
1552 | * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in | |
1553 | * an XArray, or xa_err(-ENOMEM) if memory allocation failed. | |
1554 | */ | |
1555 | void *xa_store_range(struct xarray *xa, unsigned long first, | |
1556 | unsigned long last, void *entry, gfp_t gfp) | |
1557 | { | |
1558 | XA_STATE(xas, xa, 0); | |
1559 | ||
1560 | if (WARN_ON_ONCE(xa_is_internal(entry))) | |
1561 | return XA_ERROR(-EINVAL); | |
1562 | if (last < first) | |
1563 | return XA_ERROR(-EINVAL); | |
1564 | ||
1565 | do { | |
1566 | xas_lock(&xas); | |
1567 | if (entry) { | |
44a4a66b MW |
1568 | unsigned int order = BITS_PER_LONG; |
1569 | if (last + 1) | |
1570 | order = __ffs(last + 1); | |
0e9446c3 MW |
1571 | xas_set_order(&xas, last, order); |
1572 | xas_create(&xas); | |
1573 | if (xas_error(&xas)) | |
1574 | goto unlock; | |
1575 | } | |
1576 | do { | |
1577 | xas_set_range(&xas, first, last); | |
1578 | xas_store(&xas, entry); | |
1579 | if (xas_error(&xas)) | |
1580 | goto unlock; | |
1581 | first += xas_size(&xas); | |
1582 | } while (first <= last); | |
1583 | unlock: | |
1584 | xas_unlock(&xas); | |
1585 | } while (xas_nomem(&xas, gfp)); | |
1586 | ||
1587 | return xas_result(&xas, NULL); | |
1588 | } | |
1589 | EXPORT_SYMBOL(xa_store_range); | |
1590 | #endif /* CONFIG_XARRAY_MULTI */ | |
1591 | ||
371c752d MW |
1592 | /** |
1593 | * __xa_alloc() - Find somewhere to store this entry in the XArray. | |
1594 | * @xa: XArray. | |
1595 | * @id: Pointer to ID. | |
1596 | * @max: Maximum ID to allocate (inclusive). | |
1597 | * @entry: New entry. | |
1598 | * @gfp: Memory allocation flags. | |
1599 | * | |
1600 | * Allocates an unused ID in the range specified by @id and @max. | |
1601 | * Updates the @id pointer with the index, then stores the entry at that | |
1602 | * index. A concurrent lookup will not see an uninitialised @id. | |
1603 | * | |
1604 | * Context: Any context. Expects xa_lock to be held on entry. May | |
1605 | * release and reacquire xa_lock if @gfp flags permit. | |
1606 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | |
1607 | * there is no more space in the XArray. | |
1608 | */ | |
1609 | int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) | |
1610 | { | |
1611 | XA_STATE(xas, xa, 0); | |
1612 | int err; | |
1613 | ||
1614 | if (WARN_ON_ONCE(xa_is_internal(entry))) | |
1615 | return -EINVAL; | |
1616 | if (WARN_ON_ONCE(!xa_track_free(xa))) | |
1617 | return -EINVAL; | |
1618 | ||
1619 | if (!entry) | |
1620 | entry = XA_ZERO_ENTRY; | |
1621 | ||
1622 | do { | |
1623 | xas.xa_index = *id; | |
1624 | xas_find_marked(&xas, max, XA_FREE_MARK); | |
1625 | if (xas.xa_node == XAS_RESTART) | |
1626 | xas_set_err(&xas, -ENOSPC); | |
1627 | xas_store(&xas, entry); | |
1628 | xas_clear_mark(&xas, XA_FREE_MARK); | |
1629 | } while (__xas_nomem(&xas, gfp)); | |
1630 | ||
1631 | err = xas_error(&xas); | |
1632 | if (!err) | |
1633 | *id = xas.xa_index; | |
1634 | return err; | |
1635 | } | |
1636 | EXPORT_SYMBOL(__xa_alloc); | |
1637 | ||
9b89a035 MW |
1638 | /** |
1639 | * __xa_set_mark() - Set this mark on this entry while locked. | |
1640 | * @xa: XArray. | |
1641 | * @index: Index of entry. | |
1642 | * @mark: Mark number. | |
1643 | * | |
804dfaf0 | 1644 | * Attempting to set a mark on a %NULL entry does not succeed. |
9b89a035 MW |
1645 | * |
1646 | * Context: Any context. Expects xa_lock to be held on entry. | |
1647 | */ | |
1648 | void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1649 | { | |
1650 | XA_STATE(xas, xa, index); | |
1651 | void *entry = xas_load(&xas); | |
1652 | ||
1653 | if (entry) | |
1654 | xas_set_mark(&xas, mark); | |
1655 | } | |
9ee5a3b7 | 1656 | EXPORT_SYMBOL(__xa_set_mark); |
9b89a035 MW |
1657 | |
1658 | /** | |
1659 | * __xa_clear_mark() - Clear this mark on this entry while locked. | |
1660 | * @xa: XArray. | |
1661 | * @index: Index of entry. | |
1662 | * @mark: Mark number. | |
1663 | * | |
1664 | * Context: Any context. Expects xa_lock to be held on entry. | |
1665 | */ | |
1666 | void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1667 | { | |
1668 | XA_STATE(xas, xa, index); | |
1669 | void *entry = xas_load(&xas); | |
1670 | ||
1671 | if (entry) | |
1672 | xas_clear_mark(&xas, mark); | |
1673 | } | |
9ee5a3b7 | 1674 | EXPORT_SYMBOL(__xa_clear_mark); |
9b89a035 MW |
1675 | |
1676 | /** | |
1677 | * xa_get_mark() - Inquire whether this mark is set on this entry. | |
1678 | * @xa: XArray. | |
1679 | * @index: Index of entry. | |
1680 | * @mark: Mark number. | |
1681 | * | |
1682 | * This function uses the RCU read lock, so the result may be out of date | |
1683 | * by the time it returns. If you need the result to be stable, use a lock. | |
1684 | * | |
1685 | * Context: Any context. Takes and releases the RCU lock. | |
1686 | * Return: True if the entry at @index has this mark set, false if it doesn't. | |
1687 | */ | |
1688 | bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1689 | { | |
1690 | XA_STATE(xas, xa, index); | |
1691 | void *entry; | |
1692 | ||
1693 | rcu_read_lock(); | |
1694 | entry = xas_start(&xas); | |
1695 | while (xas_get_mark(&xas, mark)) { | |
1696 | if (!xa_is_node(entry)) | |
1697 | goto found; | |
1698 | entry = xas_descend(&xas, xa_to_node(entry)); | |
1699 | } | |
1700 | rcu_read_unlock(); | |
1701 | return false; | |
1702 | found: | |
1703 | rcu_read_unlock(); | |
1704 | return true; | |
1705 | } | |
1706 | EXPORT_SYMBOL(xa_get_mark); | |
1707 | ||
1708 | /** | |
1709 | * xa_set_mark() - Set this mark on this entry. | |
1710 | * @xa: XArray. | |
1711 | * @index: Index of entry. | |
1712 | * @mark: Mark number. | |
1713 | * | |
804dfaf0 | 1714 | * Attempting to set a mark on a %NULL entry does not succeed. |
9b89a035 MW |
1715 | * |
1716 | * Context: Process context. Takes and releases the xa_lock. | |
1717 | */ | |
1718 | void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1719 | { | |
1720 | xa_lock(xa); | |
1721 | __xa_set_mark(xa, index, mark); | |
1722 | xa_unlock(xa); | |
1723 | } | |
1724 | EXPORT_SYMBOL(xa_set_mark); | |
1725 | ||
1726 | /** | |
1727 | * xa_clear_mark() - Clear this mark on this entry. | |
1728 | * @xa: XArray. | |
1729 | * @index: Index of entry. | |
1730 | * @mark: Mark number. | |
1731 | * | |
1732 | * Clearing a mark always succeeds. | |
1733 | * | |
1734 | * Context: Process context. Takes and releases the xa_lock. | |
1735 | */ | |
1736 | void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) | |
1737 | { | |
1738 | xa_lock(xa); | |
1739 | __xa_clear_mark(xa, index, mark); | |
1740 | xa_unlock(xa); | |
1741 | } | |
1742 | EXPORT_SYMBOL(xa_clear_mark); | |
1743 | ||
b803b428 MW |
1744 | /** |
1745 | * xa_find() - Search the XArray for an entry. | |
1746 | * @xa: XArray. | |
1747 | * @indexp: Pointer to an index. | |
1748 | * @max: Maximum index to search to. | |
1749 | * @filter: Selection criterion. | |
1750 | * | |
1751 | * Finds the entry in @xa which matches the @filter, and has the lowest | |
1752 | * index that is at least @indexp and no more than @max. | |
1753 | * If an entry is found, @indexp is updated to be the index of the entry. | |
1754 | * This function is protected by the RCU read lock, so it may not find | |
1755 | * entries which are being simultaneously added. It will not return an | |
1756 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). | |
1757 | * | |
1758 | * Context: Any context. Takes and releases the RCU lock. | |
1759 | * Return: The entry, if found, otherwise %NULL. | |
1760 | */ | |
1761 | void *xa_find(struct xarray *xa, unsigned long *indexp, | |
1762 | unsigned long max, xa_mark_t filter) | |
1763 | { | |
1764 | XA_STATE(xas, xa, *indexp); | |
1765 | void *entry; | |
1766 | ||
1767 | rcu_read_lock(); | |
1768 | do { | |
1769 | if ((__force unsigned int)filter < XA_MAX_MARKS) | |
1770 | entry = xas_find_marked(&xas, max, filter); | |
1771 | else | |
1772 | entry = xas_find(&xas, max); | |
1773 | } while (xas_retry(&xas, entry)); | |
1774 | rcu_read_unlock(); | |
1775 | ||
1776 | if (entry) | |
1777 | *indexp = xas.xa_index; | |
1778 | return entry; | |
1779 | } | |
1780 | EXPORT_SYMBOL(xa_find); | |
1781 | ||
1782 | /** | |
1783 | * xa_find_after() - Search the XArray for a present entry. | |
1784 | * @xa: XArray. | |
1785 | * @indexp: Pointer to an index. | |
1786 | * @max: Maximum index to search to. | |
1787 | * @filter: Selection criterion. | |
1788 | * | |
1789 | * Finds the entry in @xa which matches the @filter and has the lowest | |
1790 | * index that is above @indexp and no more than @max. | |
1791 | * If an entry is found, @indexp is updated to be the index of the entry. | |
1792 | * This function is protected by the RCU read lock, so it may miss entries | |
1793 | * which are being simultaneously added. It will not return an | |
1794 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). | |
1795 | * | |
1796 | * Context: Any context. Takes and releases the RCU lock. | |
1797 | * Return: The pointer, if found, otherwise %NULL. | |
1798 | */ | |
1799 | void *xa_find_after(struct xarray *xa, unsigned long *indexp, | |
1800 | unsigned long max, xa_mark_t filter) | |
1801 | { | |
1802 | XA_STATE(xas, xa, *indexp + 1); | |
1803 | void *entry; | |
1804 | ||
1805 | rcu_read_lock(); | |
1806 | for (;;) { | |
1807 | if ((__force unsigned int)filter < XA_MAX_MARKS) | |
1808 | entry = xas_find_marked(&xas, max, filter); | |
1809 | else | |
1810 | entry = xas_find(&xas, max); | |
8229706e MW |
1811 | if (xas.xa_node == XAS_BOUNDS) |
1812 | break; | |
b803b428 MW |
1813 | if (xas.xa_shift) { |
1814 | if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) | |
1815 | continue; | |
1816 | } else { | |
1817 | if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) | |
1818 | continue; | |
1819 | } | |
1820 | if (!xas_retry(&xas, entry)) | |
1821 | break; | |
1822 | } | |
1823 | rcu_read_unlock(); | |
1824 | ||
1825 | if (entry) | |
1826 | *indexp = xas.xa_index; | |
1827 | return entry; | |
1828 | } | |
1829 | EXPORT_SYMBOL(xa_find_after); | |
1830 | ||
80a0a1a9 MW |
1831 | static unsigned int xas_extract_present(struct xa_state *xas, void **dst, |
1832 | unsigned long max, unsigned int n) | |
1833 | { | |
1834 | void *entry; | |
1835 | unsigned int i = 0; | |
1836 | ||
1837 | rcu_read_lock(); | |
1838 | xas_for_each(xas, entry, max) { | |
1839 | if (xas_retry(xas, entry)) | |
1840 | continue; | |
1841 | dst[i++] = entry; | |
1842 | if (i == n) | |
1843 | break; | |
1844 | } | |
1845 | rcu_read_unlock(); | |
1846 | ||
1847 | return i; | |
1848 | } | |
1849 | ||
1850 | static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, | |
1851 | unsigned long max, unsigned int n, xa_mark_t mark) | |
1852 | { | |
1853 | void *entry; | |
1854 | unsigned int i = 0; | |
1855 | ||
1856 | rcu_read_lock(); | |
1857 | xas_for_each_marked(xas, entry, max, mark) { | |
1858 | if (xas_retry(xas, entry)) | |
1859 | continue; | |
1860 | dst[i++] = entry; | |
1861 | if (i == n) | |
1862 | break; | |
1863 | } | |
1864 | rcu_read_unlock(); | |
1865 | ||
1866 | return i; | |
1867 | } | |
1868 | ||
1869 | /** | |
1870 | * xa_extract() - Copy selected entries from the XArray into a normal array. | |
1871 | * @xa: The source XArray to copy from. | |
1872 | * @dst: The buffer to copy entries into. | |
1873 | * @start: The first index in the XArray eligible to be selected. | |
1874 | * @max: The last index in the XArray eligible to be selected. | |
1875 | * @n: The maximum number of entries to copy. | |
1876 | * @filter: Selection criterion. | |
1877 | * | |
1878 | * Copies up to @n entries that match @filter from the XArray. The | |
1879 | * copied entries will have indices between @start and @max, inclusive. | |
1880 | * | |
1881 | * The @filter may be an XArray mark value, in which case entries which are | |
1882 | * marked with that mark will be copied. It may also be %XA_PRESENT, in | |
804dfaf0 | 1883 | * which case all entries which are not %NULL will be copied. |
80a0a1a9 MW |
1884 | * |
1885 | * The entries returned may not represent a snapshot of the XArray at a | |
1886 | * moment in time. For example, if another thread stores to index 5, then | |
1887 | * index 10, calling xa_extract() may return the old contents of index 5 | |
1888 | * and the new contents of index 10. Indices not modified while this | |
1889 | * function is running will not be skipped. | |
1890 | * | |
1891 | * If you need stronger guarantees, holding the xa_lock across calls to this | |
1892 | * function will prevent concurrent modification. | |
1893 | * | |
1894 | * Context: Any context. Takes and releases the RCU lock. | |
1895 | * Return: The number of entries copied. | |
1896 | */ | |
1897 | unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, | |
1898 | unsigned long max, unsigned int n, xa_mark_t filter) | |
1899 | { | |
1900 | XA_STATE(xas, xa, start); | |
1901 | ||
1902 | if (!n) | |
1903 | return 0; | |
1904 | ||
1905 | if ((__force unsigned int)filter < XA_MAX_MARKS) | |
1906 | return xas_extract_marked(&xas, dst, max, n, filter); | |
1907 | return xas_extract_present(&xas, dst, max, n); | |
1908 | } | |
1909 | EXPORT_SYMBOL(xa_extract); | |
1910 | ||
687149fc MW |
1911 | /** |
1912 | * xa_destroy() - Free all internal data structures. | |
1913 | * @xa: XArray. | |
1914 | * | |
1915 | * After calling this function, the XArray is empty and has freed all memory | |
1916 | * allocated for its internal data structures. You are responsible for | |
1917 | * freeing the objects referenced by the XArray. | |
1918 | * | |
1919 | * Context: Any context. Takes and releases the xa_lock, interrupt-safe. | |
1920 | */ | |
1921 | void xa_destroy(struct xarray *xa) | |
1922 | { | |
1923 | XA_STATE(xas, xa, 0); | |
1924 | unsigned long flags; | |
1925 | void *entry; | |
1926 | ||
1927 | xas.xa_node = NULL; | |
1928 | xas_lock_irqsave(&xas, flags); | |
1929 | entry = xa_head_locked(xa); | |
1930 | RCU_INIT_POINTER(xa->xa_head, NULL); | |
1931 | xas_init_marks(&xas); | |
1932 | /* lockdep checks we're still holding the lock in xas_free_nodes() */ | |
1933 | if (xa_is_node(entry)) | |
1934 | xas_free_nodes(&xas, xa_to_node(entry)); | |
1935 | xas_unlock_irqrestore(&xas, flags); | |
1936 | } | |
1937 | EXPORT_SYMBOL(xa_destroy); | |
1938 | ||
ad3d6c72 MW |
1939 | #ifdef XA_DEBUG |
1940 | void xa_dump_node(const struct xa_node *node) | |
1941 | { | |
1942 | unsigned i, j; | |
1943 | ||
1944 | if (!node) | |
1945 | return; | |
1946 | if ((unsigned long)node & 3) { | |
1947 | pr_cont("node %px\n", node); | |
1948 | return; | |
1949 | } | |
1950 | ||
1951 | pr_cont("node %px %s %d parent %px shift %d count %d values %d " | |
1952 | "array %px list %px %px marks", | |
1953 | node, node->parent ? "offset" : "max", node->offset, | |
1954 | node->parent, node->shift, node->count, node->nr_values, | |
1955 | node->array, node->private_list.prev, node->private_list.next); | |
1956 | for (i = 0; i < XA_MAX_MARKS; i++) | |
1957 | for (j = 0; j < XA_MARK_LONGS; j++) | |
1958 | pr_cont(" %lx", node->marks[i][j]); | |
1959 | pr_cont("\n"); | |
1960 | } | |
1961 | ||
1962 | void xa_dump_index(unsigned long index, unsigned int shift) | |
1963 | { | |
1964 | if (!shift) | |
1965 | pr_info("%lu: ", index); | |
1966 | else if (shift >= BITS_PER_LONG) | |
1967 | pr_info("0-%lu: ", ~0UL); | |
1968 | else | |
1969 | pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); | |
1970 | } | |
1971 | ||
1972 | void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) | |
1973 | { | |
1974 | if (!entry) | |
1975 | return; | |
1976 | ||
1977 | xa_dump_index(index, shift); | |
1978 | ||
1979 | if (xa_is_node(entry)) { | |
1980 | if (shift == 0) { | |
1981 | pr_cont("%px\n", entry); | |
1982 | } else { | |
1983 | unsigned long i; | |
1984 | struct xa_node *node = xa_to_node(entry); | |
1985 | xa_dump_node(node); | |
1986 | for (i = 0; i < XA_CHUNK_SIZE; i++) | |
1987 | xa_dump_entry(node->slots[i], | |
1988 | index + (i << node->shift), node->shift); | |
1989 | } | |
1990 | } else if (xa_is_value(entry)) | |
1991 | pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), | |
1992 | xa_to_value(entry), entry); | |
1993 | else if (!xa_is_internal(entry)) | |
1994 | pr_cont("%px\n", entry); | |
1995 | else if (xa_is_retry(entry)) | |
1996 | pr_cont("retry (%ld)\n", xa_to_internal(entry)); | |
1997 | else if (xa_is_sibling(entry)) | |
1998 | pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); | |
9f14d4f1 MW |
1999 | else if (xa_is_zero(entry)) |
2000 | pr_cont("zero (%ld)\n", xa_to_internal(entry)); | |
ad3d6c72 MW |
2001 | else |
2002 | pr_cont("UNKNOWN ENTRY (%px)\n", entry); | |
2003 | } | |
2004 | ||
2005 | void xa_dump(const struct xarray *xa) | |
2006 | { | |
2007 | void *entry = xa->xa_head; | |
2008 | unsigned int shift = 0; | |
2009 | ||
2010 | pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, | |
9b89a035 MW |
2011 | xa->xa_flags, xa_marked(xa, XA_MARK_0), |
2012 | xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); | |
ad3d6c72 MW |
2013 | if (xa_is_node(entry)) |
2014 | shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; | |
2015 | xa_dump_entry(entry, 0, shift); | |
2016 | } | |
2017 | #endif |