]>
Commit | Line | Data |
---|---|---|
da5c8135 AJ |
1 | /* |
2 | * Copyright (C) 2011 STRATO AG | |
3 | * written by Arne Jansen <[email protected]> | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | */ | |
6 | ||
7 | #include <linux/slab.h> | |
da5c8135 | 8 | #include "ulist.h" |
4c7a6f74 | 9 | #include "ctree.h" |
da5c8135 AJ |
10 | |
11 | /* | |
12 | * ulist is a generic data structure to hold a collection of unique u64 | |
13 | * values. The only operations it supports is adding to the list and | |
14 | * enumerating it. | |
15 | * It is possible to store an auxiliary value along with the key. | |
16 | * | |
da5c8135 AJ |
17 | * A sample usage for ulists is the enumeration of directed graphs without |
18 | * visiting a node twice. The pseudo-code could look like this: | |
19 | * | |
20 | * ulist = ulist_alloc(); | |
21 | * ulist_add(ulist, root); | |
cd1b413c | 22 | * ULIST_ITER_INIT(&uiter); |
da5c8135 | 23 | * |
cd1b413c | 24 | * while ((elem = ulist_next(ulist, &uiter)) { |
da5c8135 AJ |
25 | * for (all child nodes n in elem) |
26 | * ulist_add(ulist, n); | |
27 | * do something useful with the node; | |
28 | * } | |
29 | * ulist_free(ulist); | |
30 | * | |
01327610 | 31 | * This assumes the graph nodes are addressable by u64. This stems from the |
da5c8135 AJ |
32 | * usage for tree enumeration in btrfs, where the logical addresses are |
33 | * 64 bit. | |
34 | * | |
35 | * It is also useful for tree enumeration which could be done elegantly | |
36 | * recursively, but is not possible due to kernel stack limitations. The | |
37 | * loop would be similar to the above. | |
38 | */ | |
39 | ||
40 | /** | |
41 | * ulist_init - freshly initialize a ulist | |
42 | * @ulist: the ulist to initialize | |
43 | * | |
44 | * Note: don't use this function to init an already used ulist, use | |
45 | * ulist_reinit instead. | |
46 | */ | |
47 | void ulist_init(struct ulist *ulist) | |
48 | { | |
4c7a6f74 | 49 | INIT_LIST_HEAD(&ulist->nodes); |
f7f82b81 | 50 | ulist->root = RB_ROOT; |
4c7a6f74 | 51 | ulist->nnodes = 0; |
da5c8135 | 52 | } |
da5c8135 AJ |
53 | |
54 | /** | |
6655bc3d | 55 | * ulist_release - free up additionally allocated memory for the ulist |
da5c8135 AJ |
56 | * @ulist: the ulist from which to free the additional memory |
57 | * | |
58 | * This is useful in cases where the base 'struct ulist' has been statically | |
59 | * allocated. | |
60 | */ | |
6655bc3d | 61 | void ulist_release(struct ulist *ulist) |
da5c8135 | 62 | { |
4c7a6f74 WS |
63 | struct ulist_node *node; |
64 | struct ulist_node *next; | |
65 | ||
66 | list_for_each_entry_safe(node, next, &ulist->nodes, list) { | |
67 | kfree(node); | |
68 | } | |
f7f82b81 | 69 | ulist->root = RB_ROOT; |
4c7a6f74 | 70 | INIT_LIST_HEAD(&ulist->nodes); |
da5c8135 | 71 | } |
da5c8135 AJ |
72 | |
73 | /** | |
74 | * ulist_reinit - prepare a ulist for reuse | |
75 | * @ulist: ulist to be reused | |
76 | * | |
77 | * Free up all additional memory allocated for the list elements and reinit | |
78 | * the ulist. | |
79 | */ | |
80 | void ulist_reinit(struct ulist *ulist) | |
81 | { | |
6655bc3d | 82 | ulist_release(ulist); |
da5c8135 AJ |
83 | ulist_init(ulist); |
84 | } | |
da5c8135 AJ |
85 | |
86 | /** | |
87 | * ulist_alloc - dynamically allocate a ulist | |
88 | * @gfp_mask: allocation flags to for base allocation | |
89 | * | |
90 | * The allocated ulist will be returned in an initialized state. | |
91 | */ | |
2eec6c81 | 92 | struct ulist *ulist_alloc(gfp_t gfp_mask) |
da5c8135 AJ |
93 | { |
94 | struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); | |
95 | ||
96 | if (!ulist) | |
97 | return NULL; | |
98 | ||
99 | ulist_init(ulist); | |
100 | ||
101 | return ulist; | |
102 | } | |
da5c8135 AJ |
103 | |
104 | /** | |
105 | * ulist_free - free dynamically allocated ulist | |
106 | * @ulist: ulist to free | |
107 | * | |
6655bc3d | 108 | * It is not necessary to call ulist_release before. |
da5c8135 AJ |
109 | */ |
110 | void ulist_free(struct ulist *ulist) | |
111 | { | |
112 | if (!ulist) | |
113 | return; | |
6655bc3d | 114 | ulist_release(ulist); |
da5c8135 AJ |
115 | kfree(ulist); |
116 | } | |
da5c8135 | 117 | |
f7f82b81 WS |
118 | static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val) |
119 | { | |
120 | struct rb_node *n = ulist->root.rb_node; | |
121 | struct ulist_node *u = NULL; | |
122 | ||
123 | while (n) { | |
124 | u = rb_entry(n, struct ulist_node, rb_node); | |
125 | if (u->val < val) | |
126 | n = n->rb_right; | |
127 | else if (u->val > val) | |
128 | n = n->rb_left; | |
129 | else | |
130 | return u; | |
131 | } | |
132 | return NULL; | |
133 | } | |
134 | ||
d4b80404 QW |
135 | static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node) |
136 | { | |
137 | rb_erase(&node->rb_node, &ulist->root); | |
138 | list_del(&node->list); | |
139 | kfree(node); | |
140 | BUG_ON(ulist->nnodes == 0); | |
141 | ulist->nnodes--; | |
142 | } | |
143 | ||
f7f82b81 WS |
144 | static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins) |
145 | { | |
146 | struct rb_node **p = &ulist->root.rb_node; | |
147 | struct rb_node *parent = NULL; | |
148 | struct ulist_node *cur = NULL; | |
149 | ||
150 | while (*p) { | |
151 | parent = *p; | |
152 | cur = rb_entry(parent, struct ulist_node, rb_node); | |
153 | ||
154 | if (cur->val < ins->val) | |
155 | p = &(*p)->rb_right; | |
156 | else if (cur->val > ins->val) | |
157 | p = &(*p)->rb_left; | |
158 | else | |
159 | return -EEXIST; | |
160 | } | |
161 | rb_link_node(&ins->rb_node, parent, p); | |
162 | rb_insert_color(&ins->rb_node, &ulist->root); | |
163 | return 0; | |
164 | } | |
165 | ||
da5c8135 AJ |
166 | /** |
167 | * ulist_add - add an element to the ulist | |
168 | * @ulist: ulist to add the element to | |
169 | * @val: value to add to ulist | |
170 | * @aux: auxiliary value to store along with val | |
171 | * @gfp_mask: flags to use for allocation | |
172 | * | |
173 | * Note: locking must be provided by the caller. In case of rwlocks write | |
174 | * locking is needed | |
175 | * | |
176 | * Add an element to a ulist. The @val will only be added if it doesn't | |
177 | * already exist. If it is added, the auxiliary value @aux is stored along with | |
178 | * it. In case @val already exists in the ulist, @aux is ignored, even if | |
179 | * it differs from the already stored value. | |
180 | * | |
181 | * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been | |
182 | * inserted. | |
183 | * In case of allocation failure -ENOMEM is returned and the ulist stays | |
184 | * unaltered. | |
185 | */ | |
34d73f54 | 186 | int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) |
3301958b JS |
187 | { |
188 | return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); | |
189 | } | |
190 | ||
34d73f54 AB |
191 | int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, |
192 | u64 *old_aux, gfp_t gfp_mask) | |
da5c8135 | 193 | { |
4c7a6f74 WS |
194 | int ret; |
195 | struct ulist_node *node; | |
196 | ||
f7f82b81 WS |
197 | node = ulist_rbtree_search(ulist, val); |
198 | if (node) { | |
199 | if (old_aux) | |
200 | *old_aux = node->aux; | |
201 | return 0; | |
da5c8135 | 202 | } |
4c7a6f74 WS |
203 | node = kmalloc(sizeof(*node), gfp_mask); |
204 | if (!node) | |
205 | return -ENOMEM; | |
da5c8135 | 206 | |
4c7a6f74 WS |
207 | node->val = val; |
208 | node->aux = aux; | |
da5c8135 | 209 | |
4c7a6f74 WS |
210 | ret = ulist_rbtree_insert(ulist, node); |
211 | ASSERT(!ret); | |
212 | list_add_tail(&node->list, &ulist->nodes); | |
213 | ulist->nnodes++; | |
da5c8135 AJ |
214 | |
215 | return 1; | |
216 | } | |
da5c8135 | 217 | |
d4b80404 QW |
218 | /* |
219 | * ulist_del - delete one node from ulist | |
220 | * @ulist: ulist to remove node from | |
221 | * @val: value to delete | |
222 | * @aux: aux to delete | |
223 | * | |
224 | * The deletion will only be done when *BOTH* val and aux matches. | |
225 | * Return 0 for successful delete. | |
226 | * Return > 0 for not found. | |
227 | */ | |
228 | int ulist_del(struct ulist *ulist, u64 val, u64 aux) | |
229 | { | |
230 | struct ulist_node *node; | |
231 | ||
232 | node = ulist_rbtree_search(ulist, val); | |
233 | /* Not found */ | |
234 | if (!node) | |
235 | return 1; | |
236 | ||
237 | if (node->aux != aux) | |
238 | return 1; | |
239 | ||
240 | /* Found and delete */ | |
241 | ulist_rbtree_erase(ulist, node); | |
242 | return 0; | |
243 | } | |
244 | ||
da5c8135 AJ |
245 | /** |
246 | * ulist_next - iterate ulist | |
247 | * @ulist: ulist to iterate | |
cd1b413c | 248 | * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator) |
da5c8135 AJ |
249 | * |
250 | * Note: locking must be provided by the caller. In case of rwlocks only read | |
251 | * locking is needed | |
252 | * | |
cd1b413c JS |
253 | * This function is used to iterate an ulist. |
254 | * It returns the next element from the ulist or %NULL when the | |
da5c8135 AJ |
255 | * end is reached. No guarantee is made with respect to the order in which |
256 | * the elements are returned. They might neither be returned in order of | |
257 | * addition nor in ascending order. | |
258 | * It is allowed to call ulist_add during an enumeration. Newly added items | |
259 | * are guaranteed to show up in the running enumeration. | |
260 | */ | |
cd1b413c | 261 | struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter) |
da5c8135 | 262 | { |
4c7a6f74 WS |
263 | struct ulist_node *node; |
264 | ||
265 | if (list_empty(&ulist->nodes)) | |
da5c8135 | 266 | return NULL; |
4c7a6f74 | 267 | if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes) |
da5c8135 | 268 | return NULL; |
4c7a6f74 WS |
269 | if (uiter->cur_list) { |
270 | uiter->cur_list = uiter->cur_list->next; | |
271 | } else { | |
272 | uiter->cur_list = ulist->nodes.next; | |
4c7a6f74 WS |
273 | } |
274 | node = list_entry(uiter->cur_list, struct ulist_node, list); | |
4c7a6f74 | 275 | return node; |
da5c8135 | 276 | } |