1 // SPDX-License-Identifier: GPL-2.0-only
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
22 struct delayed_work gc_work;
25 struct nft_rbtree_elem {
27 struct nft_set_ext ext;
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38 return !nft_rbtree_interval_end(rbe);
41 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
42 const struct nft_rbtree_elem *interval)
44 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
47 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
48 const u32 *key, const struct nft_set_ext **ext,
51 struct nft_rbtree *priv = nft_set_priv(set);
52 const struct nft_rbtree_elem *rbe, *interval = NULL;
53 u8 genmask = nft_genmask_cur(net);
54 const struct rb_node *parent;
58 parent = rcu_dereference_raw(priv->root.rb_node);
59 while (parent != NULL) {
60 if (read_seqcount_retry(&priv->count, seq))
63 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
65 this = nft_set_ext_key(&rbe->ext);
66 d = memcmp(this, key, set->klen);
68 parent = rcu_dereference_raw(parent->rb_left);
70 nft_rbtree_equal(set, this, interval) &&
71 nft_rbtree_interval_end(rbe) &&
72 nft_rbtree_interval_start(interval))
76 parent = rcu_dereference_raw(parent->rb_right);
78 if (!nft_set_elem_active(&rbe->ext, genmask)) {
79 parent = rcu_dereference_raw(parent->rb_left);
83 if (nft_set_elem_expired(&rbe->ext))
86 if (nft_rbtree_interval_end(rbe)) {
87 if (nft_set_is_anonymous(set))
89 parent = rcu_dereference_raw(parent->rb_left);
99 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100 nft_set_elem_active(&interval->ext, genmask) &&
101 !nft_set_elem_expired(&interval->ext) &&
102 nft_rbtree_interval_start(interval)) {
103 *ext = &interval->ext;
110 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
111 const u32 *key, const struct nft_set_ext **ext)
113 struct nft_rbtree *priv = nft_set_priv(set);
114 unsigned int seq = read_seqcount_begin(&priv->count);
117 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
118 if (ret || !read_seqcount_retry(&priv->count, seq))
121 read_lock_bh(&priv->lock);
122 seq = read_seqcount_begin(&priv->count);
123 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
124 read_unlock_bh(&priv->lock);
129 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
130 const u32 *key, struct nft_rbtree_elem **elem,
131 unsigned int seq, unsigned int flags, u8 genmask)
133 struct nft_rbtree_elem *rbe, *interval = NULL;
134 struct nft_rbtree *priv = nft_set_priv(set);
135 const struct rb_node *parent;
139 parent = rcu_dereference_raw(priv->root.rb_node);
140 while (parent != NULL) {
141 if (read_seqcount_retry(&priv->count, seq))
144 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
146 this = nft_set_ext_key(&rbe->ext);
147 d = memcmp(this, key, set->klen);
149 parent = rcu_dereference_raw(parent->rb_left);
150 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
153 parent = rcu_dereference_raw(parent->rb_right);
154 if (flags & NFT_SET_ELEM_INTERVAL_END)
157 if (!nft_set_elem_active(&rbe->ext, genmask)) {
158 parent = rcu_dereference_raw(parent->rb_left);
162 if (nft_set_elem_expired(&rbe->ext))
165 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
166 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
167 (flags & NFT_SET_ELEM_INTERVAL_END)) {
172 if (nft_rbtree_interval_end(rbe))
175 parent = rcu_dereference_raw(parent->rb_left);
179 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
180 nft_set_elem_active(&interval->ext, genmask) &&
181 !nft_set_elem_expired(&interval->ext) &&
182 ((!nft_rbtree_interval_end(interval) &&
183 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
184 (nft_rbtree_interval_end(interval) &&
185 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
193 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
194 const struct nft_set_elem *elem, unsigned int flags)
196 struct nft_rbtree *priv = nft_set_priv(set);
197 unsigned int seq = read_seqcount_begin(&priv->count);
198 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
199 const u32 *key = (const u32 *)&elem->key.val;
200 u8 genmask = nft_genmask_cur(net);
203 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
204 if (ret || !read_seqcount_retry(&priv->count, seq))
207 read_lock_bh(&priv->lock);
208 seq = read_seqcount_begin(&priv->count);
209 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
211 rbe = ERR_PTR(-ENOENT);
212 read_unlock_bh(&priv->lock);
217 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
218 struct nft_rbtree_elem *new,
219 struct nft_set_ext **ext)
221 struct nft_rbtree *priv = nft_set_priv(set);
222 u8 genmask = nft_genmask_next(net);
223 struct nft_rbtree_elem *rbe;
224 struct rb_node *parent, **p;
225 bool overlap = false;
228 /* Detect overlaps as we descend the tree. Set the flag in these cases:
230 * a1. _ _ __>| ?_ _ __| (insert end before existing end)
231 * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
232 * a3. _ _ ___? >|_ _ __| (insert start before existing end)
234 * and clear it later on, as we eventually reach the points indicated by
235 * '?' above, in the cases described below. We'll always meet these
236 * later, locally, due to tree ordering, and overlaps for the intervals
237 * that are the closest together are always evaluated last.
239 * b1. _ _ __>| !_ _ __| (insert end before existing start)
240 * b2. _ _ ___| !_ _ _>| (insert end after existing start)
241 * b3. _ _ ___! >|_ _ __| (insert start after existing end)
243 * Case a3. resolves to b3.:
244 * - if the inserted start element is the leftmost, because the '0'
245 * element in the tree serves as end element
246 * - otherwise, if an existing end is found. Note that end elements are
247 * always inserted after corresponding start elements.
249 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
252 * The flag is also cleared in two special cases:
254 * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
255 * b5. |__ _ >|!__ _ _ (insert end right after existing start)
257 * which always happen as last step and imply that no further
258 * overlapping is possible.
262 p = &priv->root.rb_node;
265 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
266 d = memcmp(nft_set_ext_key(&rbe->ext),
267 nft_set_ext_key(&new->ext),
270 p = &parent->rb_left;
272 if (nft_rbtree_interval_start(new)) {
273 if (nft_rbtree_interval_end(rbe) &&
274 nft_set_elem_active(&rbe->ext, genmask) &&
275 !nft_set_elem_expired(&rbe->ext))
278 overlap = nft_rbtree_interval_end(rbe) &&
279 nft_set_elem_active(&rbe->ext,
281 !nft_set_elem_expired(&rbe->ext);
284 p = &parent->rb_right;
286 if (nft_rbtree_interval_end(new)) {
287 overlap = nft_rbtree_interval_end(rbe) &&
288 nft_set_elem_active(&rbe->ext,
290 !nft_set_elem_expired(&rbe->ext);
291 } else if (nft_rbtree_interval_end(rbe) &&
292 nft_set_elem_active(&rbe->ext, genmask) &&
293 !nft_set_elem_expired(&rbe->ext)) {
297 if (nft_rbtree_interval_end(rbe) &&
298 nft_rbtree_interval_start(new)) {
299 p = &parent->rb_left;
301 if (nft_set_elem_active(&rbe->ext, genmask) &&
302 !nft_set_elem_expired(&rbe->ext))
304 } else if (nft_rbtree_interval_start(rbe) &&
305 nft_rbtree_interval_end(new)) {
306 p = &parent->rb_right;
308 if (nft_set_elem_active(&rbe->ext, genmask) &&
309 !nft_set_elem_expired(&rbe->ext))
311 } else if (nft_set_elem_active(&rbe->ext, genmask) &&
312 !nft_set_elem_expired(&rbe->ext)) {
316 p = &parent->rb_left;
324 rb_link_node_rcu(&new->node, parent, p);
325 rb_insert_color(&new->node, &priv->root);
329 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
330 const struct nft_set_elem *elem,
331 struct nft_set_ext **ext)
333 struct nft_rbtree *priv = nft_set_priv(set);
334 struct nft_rbtree_elem *rbe = elem->priv;
337 write_lock_bh(&priv->lock);
338 write_seqcount_begin(&priv->count);
339 err = __nft_rbtree_insert(net, set, rbe, ext);
340 write_seqcount_end(&priv->count);
341 write_unlock_bh(&priv->lock);
346 static void nft_rbtree_remove(const struct net *net,
347 const struct nft_set *set,
348 const struct nft_set_elem *elem)
350 struct nft_rbtree *priv = nft_set_priv(set);
351 struct nft_rbtree_elem *rbe = elem->priv;
353 write_lock_bh(&priv->lock);
354 write_seqcount_begin(&priv->count);
355 rb_erase(&rbe->node, &priv->root);
356 write_seqcount_end(&priv->count);
357 write_unlock_bh(&priv->lock);
360 static void nft_rbtree_activate(const struct net *net,
361 const struct nft_set *set,
362 const struct nft_set_elem *elem)
364 struct nft_rbtree_elem *rbe = elem->priv;
366 nft_set_elem_change_active(net, set, &rbe->ext);
367 nft_set_elem_clear_busy(&rbe->ext);
370 static bool nft_rbtree_flush(const struct net *net,
371 const struct nft_set *set, void *priv)
373 struct nft_rbtree_elem *rbe = priv;
375 if (!nft_set_elem_mark_busy(&rbe->ext) ||
376 !nft_is_active(net, &rbe->ext)) {
377 nft_set_elem_change_active(net, set, &rbe->ext);
383 static void *nft_rbtree_deactivate(const struct net *net,
384 const struct nft_set *set,
385 const struct nft_set_elem *elem)
387 const struct nft_rbtree *priv = nft_set_priv(set);
388 const struct rb_node *parent = priv->root.rb_node;
389 struct nft_rbtree_elem *rbe, *this = elem->priv;
390 u8 genmask = nft_genmask_next(net);
393 while (parent != NULL) {
394 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
396 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
399 parent = parent->rb_left;
401 parent = parent->rb_right;
403 if (nft_rbtree_interval_end(rbe) &&
404 nft_rbtree_interval_start(this)) {
405 parent = parent->rb_left;
407 } else if (nft_rbtree_interval_start(rbe) &&
408 nft_rbtree_interval_end(this)) {
409 parent = parent->rb_right;
411 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
412 parent = parent->rb_left;
415 nft_rbtree_flush(net, set, rbe);
422 static void nft_rbtree_walk(const struct nft_ctx *ctx,
424 struct nft_set_iter *iter)
426 struct nft_rbtree *priv = nft_set_priv(set);
427 struct nft_rbtree_elem *rbe;
428 struct nft_set_elem elem;
429 struct rb_node *node;
431 read_lock_bh(&priv->lock);
432 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
433 rbe = rb_entry(node, struct nft_rbtree_elem, node);
435 if (iter->count < iter->skip)
437 if (nft_set_elem_expired(&rbe->ext))
439 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
444 iter->err = iter->fn(ctx, set, iter, &elem);
446 read_unlock_bh(&priv->lock);
452 read_unlock_bh(&priv->lock);
455 static void nft_rbtree_gc(struct work_struct *work)
457 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
458 struct nft_set_gc_batch *gcb = NULL;
459 struct nft_rbtree *priv;
460 struct rb_node *node;
463 priv = container_of(work, struct nft_rbtree, gc_work.work);
464 set = nft_set_container_of(priv);
466 write_lock_bh(&priv->lock);
467 write_seqcount_begin(&priv->count);
468 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
469 rbe = rb_entry(node, struct nft_rbtree_elem, node);
471 if (nft_rbtree_interval_end(rbe)) {
475 if (!nft_set_elem_expired(&rbe->ext))
477 if (nft_set_elem_mark_busy(&rbe->ext))
481 rb_erase(&rbe_prev->node, &priv->root);
484 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
488 atomic_dec(&set->nelems);
489 nft_set_gc_batch_add(gcb, rbe);
493 atomic_dec(&set->nelems);
494 nft_set_gc_batch_add(gcb, rbe_end);
495 rb_erase(&rbe_end->node, &priv->root);
498 node = rb_next(node);
503 rb_erase(&rbe_prev->node, &priv->root);
504 write_seqcount_end(&priv->count);
505 write_unlock_bh(&priv->lock);
507 nft_set_gc_batch_complete(gcb);
509 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
510 nft_set_gc_interval(set));
513 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
514 const struct nft_set_desc *desc)
516 return sizeof(struct nft_rbtree);
519 static int nft_rbtree_init(const struct nft_set *set,
520 const struct nft_set_desc *desc,
521 const struct nlattr * const nla[])
523 struct nft_rbtree *priv = nft_set_priv(set);
525 rwlock_init(&priv->lock);
526 seqcount_init(&priv->count);
527 priv->root = RB_ROOT;
529 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
530 if (set->flags & NFT_SET_TIMEOUT)
531 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
532 nft_set_gc_interval(set));
537 static void nft_rbtree_destroy(const struct nft_set *set)
539 struct nft_rbtree *priv = nft_set_priv(set);
540 struct nft_rbtree_elem *rbe;
541 struct rb_node *node;
543 cancel_delayed_work_sync(&priv->gc_work);
545 while ((node = priv->root.rb_node) != NULL) {
546 rb_erase(node, &priv->root);
547 rbe = rb_entry(node, struct nft_rbtree_elem, node);
548 nft_set_elem_destroy(set, rbe, true);
552 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
553 struct nft_set_estimate *est)
555 if (desc->field_count > 1)
559 est->size = sizeof(struct nft_rbtree) +
560 desc->size * sizeof(struct nft_rbtree_elem);
564 est->lookup = NFT_SET_CLASS_O_LOG_N;
565 est->space = NFT_SET_CLASS_O_N;
570 const struct nft_set_type nft_set_rbtree_type = {
571 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
573 .privsize = nft_rbtree_privsize,
574 .elemsize = offsetof(struct nft_rbtree_elem, ext),
575 .estimate = nft_rbtree_estimate,
576 .init = nft_rbtree_init,
577 .destroy = nft_rbtree_destroy,
578 .insert = nft_rbtree_insert,
579 .remove = nft_rbtree_remove,
580 .deactivate = nft_rbtree_deactivate,
581 .flush = nft_rbtree_flush,
582 .activate = nft_rbtree_activate,
583 .lookup = nft_rbtree_lookup,
584 .walk = nft_rbtree_walk,
585 .get = nft_rbtree_get,