1 // SPDX-License-Identifier: GPL-2.0-only
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
21 seqcount_rwlock_t count;
22 struct delayed_work gc_work;
25 struct nft_rbtree_elem {
27 struct nft_set_ext ext;
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
38 return !nft_rbtree_interval_end(rbe);
41 static int nft_rbtree_cmp(const struct nft_set *set,
42 const struct nft_rbtree_elem *e1,
43 const struct nft_rbtree_elem *e2)
45 return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
49 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
50 const u32 *key, const struct nft_set_ext **ext,
53 struct nft_rbtree *priv = nft_set_priv(set);
54 const struct nft_rbtree_elem *rbe, *interval = NULL;
55 u8 genmask = nft_genmask_cur(net);
56 const struct rb_node *parent;
59 parent = rcu_dereference_raw(priv->root.rb_node);
60 while (parent != NULL) {
61 if (read_seqcount_retry(&priv->count, seq))
64 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
66 d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
68 parent = rcu_dereference_raw(parent->rb_left);
70 !nft_rbtree_cmp(set, rbe, interval) &&
71 nft_rbtree_interval_end(rbe) &&
72 nft_rbtree_interval_start(interval))
76 parent = rcu_dereference_raw(parent->rb_right);
78 if (!nft_set_elem_active(&rbe->ext, genmask)) {
79 parent = rcu_dereference_raw(parent->rb_left);
83 if (nft_set_elem_expired(&rbe->ext))
86 if (nft_rbtree_interval_end(rbe)) {
87 if (nft_set_is_anonymous(set))
89 parent = rcu_dereference_raw(parent->rb_left);
99 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100 nft_set_elem_active(&interval->ext, genmask) &&
101 !nft_set_elem_expired(&interval->ext) &&
102 nft_rbtree_interval_start(interval)) {
103 *ext = &interval->ext;
110 INDIRECT_CALLABLE_SCOPE
111 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
112 const u32 *key, const struct nft_set_ext **ext)
114 struct nft_rbtree *priv = nft_set_priv(set);
115 unsigned int seq = read_seqcount_begin(&priv->count);
118 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
119 if (ret || !read_seqcount_retry(&priv->count, seq))
122 read_lock_bh(&priv->lock);
123 seq = read_seqcount_begin(&priv->count);
124 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 read_unlock_bh(&priv->lock);
130 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
131 const u32 *key, struct nft_rbtree_elem **elem,
132 unsigned int seq, unsigned int flags, u8 genmask)
134 struct nft_rbtree_elem *rbe, *interval = NULL;
135 struct nft_rbtree *priv = nft_set_priv(set);
136 const struct rb_node *parent;
140 parent = rcu_dereference_raw(priv->root.rb_node);
141 while (parent != NULL) {
142 if (read_seqcount_retry(&priv->count, seq))
145 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
147 this = nft_set_ext_key(&rbe->ext);
148 d = memcmp(this, key, set->klen);
150 parent = rcu_dereference_raw(parent->rb_left);
151 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
154 parent = rcu_dereference_raw(parent->rb_right);
155 if (flags & NFT_SET_ELEM_INTERVAL_END)
158 if (!nft_set_elem_active(&rbe->ext, genmask)) {
159 parent = rcu_dereference_raw(parent->rb_left);
163 if (nft_set_elem_expired(&rbe->ext))
166 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
167 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
168 (flags & NFT_SET_ELEM_INTERVAL_END)) {
173 if (nft_rbtree_interval_end(rbe))
176 parent = rcu_dereference_raw(parent->rb_left);
180 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
181 nft_set_elem_active(&interval->ext, genmask) &&
182 !nft_set_elem_expired(&interval->ext) &&
183 ((!nft_rbtree_interval_end(interval) &&
184 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
185 (nft_rbtree_interval_end(interval) &&
186 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
194 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
195 const struct nft_set_elem *elem, unsigned int flags)
197 struct nft_rbtree *priv = nft_set_priv(set);
198 unsigned int seq = read_seqcount_begin(&priv->count);
199 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
200 const u32 *key = (const u32 *)&elem->key.val;
201 u8 genmask = nft_genmask_cur(net);
204 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
205 if (ret || !read_seqcount_retry(&priv->count, seq))
208 read_lock_bh(&priv->lock);
209 seq = read_seqcount_begin(&priv->count);
210 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
212 rbe = ERR_PTR(-ENOENT);
213 read_unlock_bh(&priv->lock);
218 static int nft_rbtree_gc_elem(const struct nft_set *__set,
219 struct nft_rbtree *priv,
220 struct nft_rbtree_elem *rbe)
222 struct nft_set *set = (struct nft_set *)__set;
223 struct rb_node *prev = rb_prev(&rbe->node);
224 struct nft_rbtree_elem *rbe_prev;
225 struct nft_set_gc_batch *gcb;
227 gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
231 /* search for expired end interval coming before this element. */
233 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
234 if (nft_rbtree_interval_end(rbe_prev))
237 prev = rb_prev(prev);
238 } while (prev != NULL);
240 rb_erase(&rbe_prev->node, &priv->root);
241 rb_erase(&rbe->node, &priv->root);
242 atomic_sub(2, &set->nelems);
244 nft_set_gc_batch_add(gcb, rbe);
245 nft_set_gc_batch_complete(gcb);
250 static bool nft_rbtree_update_first(const struct nft_set *set,
251 struct nft_rbtree_elem *rbe,
252 struct rb_node *first)
254 struct nft_rbtree_elem *first_elem;
256 first_elem = rb_entry(first, struct nft_rbtree_elem, node);
257 /* this element is closest to where the new element is to be inserted:
258 * update the first element for the node list path.
260 if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
266 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
267 struct nft_rbtree_elem *new,
268 struct nft_set_ext **ext)
270 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
271 struct rb_node *node, *parent, **p, *first = NULL;
272 struct nft_rbtree *priv = nft_set_priv(set);
273 u8 genmask = nft_genmask_next(net);
276 /* Descend the tree to search for an existing element greater than the
277 * key value to insert that is greater than the new element. This is the
278 * first element to walk the ordered elements to find possible overlap.
281 p = &priv->root.rb_node;
284 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
285 d = nft_rbtree_cmp(set, rbe, new);
288 p = &parent->rb_left;
291 nft_rbtree_update_first(set, rbe, first))
294 p = &parent->rb_right;
296 if (nft_rbtree_interval_end(rbe))
297 p = &parent->rb_left;
299 p = &parent->rb_right;
304 first = rb_first(&priv->root);
306 /* Detect overlap by going through the list of valid tree nodes.
307 * Values stored in the tree are in reversed order, starting from
308 * highest to lowest value.
310 for (node = first; node != NULL; node = rb_next(node)) {
311 rbe = rb_entry(node, struct nft_rbtree_elem, node);
313 if (!nft_set_elem_active(&rbe->ext, genmask))
316 /* perform garbage collection to avoid bogus overlap reports. */
317 if (nft_set_elem_expired(&rbe->ext)) {
318 err = nft_rbtree_gc_elem(set, priv, rbe);
325 d = nft_rbtree_cmp(set, rbe, new);
327 /* Matching end element: no need to look for an
328 * overlapping greater or equal element.
330 if (nft_rbtree_interval_end(rbe)) {
335 /* first element that is greater or equal to key value. */
341 /* this is a closer more or equal element, update it. */
342 if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
347 /* element is equal to key value, make sure flags are
348 * the same, an existing more or equal start element
349 * must not be replaced by more or equal end element.
351 if ((nft_rbtree_interval_start(new) &&
352 nft_rbtree_interval_start(rbe_ge)) ||
353 (nft_rbtree_interval_end(new) &&
354 nft_rbtree_interval_end(rbe_ge))) {
359 /* annotate element greater than the new element. */
363 /* annotate element less than the new element. */
369 /* - new start element matching existing start element: full overlap
370 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
372 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
373 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
378 /* - new end element matching existing end element: full overlap
379 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
381 if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
382 nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
387 /* - new start element with existing closest, less or equal key value
388 * being a start element: partial overlap, reported as -ENOTEMPTY.
389 * Anonymous sets allow for two consecutive start element since they
390 * are constant, skip them to avoid bogus overlap reports.
392 if (!nft_set_is_anonymous(set) && rbe_le &&
393 nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
396 /* - new end element with existing closest, less or equal key value
397 * being a end element: partial overlap, reported as -ENOTEMPTY.
400 nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
403 /* - new end element with existing closest, greater or equal key value
404 * being an end element: partial overlap, reported as -ENOTEMPTY
407 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
410 /* Accepted element: pick insertion point depending on key value */
412 p = &priv->root.rb_node;
415 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
416 d = nft_rbtree_cmp(set, rbe, new);
419 p = &parent->rb_left;
421 p = &parent->rb_right;
422 else if (nft_rbtree_interval_end(rbe))
423 p = &parent->rb_left;
425 p = &parent->rb_right;
428 rb_link_node_rcu(&new->node, parent, p);
429 rb_insert_color(&new->node, &priv->root);
433 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
434 const struct nft_set_elem *elem,
435 struct nft_set_ext **ext)
437 struct nft_rbtree *priv = nft_set_priv(set);
438 struct nft_rbtree_elem *rbe = elem->priv;
441 write_lock_bh(&priv->lock);
442 write_seqcount_begin(&priv->count);
443 err = __nft_rbtree_insert(net, set, rbe, ext);
444 write_seqcount_end(&priv->count);
445 write_unlock_bh(&priv->lock);
450 static void nft_rbtree_remove(const struct net *net,
451 const struct nft_set *set,
452 const struct nft_set_elem *elem)
454 struct nft_rbtree *priv = nft_set_priv(set);
455 struct nft_rbtree_elem *rbe = elem->priv;
457 write_lock_bh(&priv->lock);
458 write_seqcount_begin(&priv->count);
459 rb_erase(&rbe->node, &priv->root);
460 write_seqcount_end(&priv->count);
461 write_unlock_bh(&priv->lock);
464 static void nft_rbtree_activate(const struct net *net,
465 const struct nft_set *set,
466 const struct nft_set_elem *elem)
468 struct nft_rbtree_elem *rbe = elem->priv;
470 nft_set_elem_change_active(net, set, &rbe->ext);
471 nft_set_elem_clear_busy(&rbe->ext);
474 static bool nft_rbtree_flush(const struct net *net,
475 const struct nft_set *set, void *priv)
477 struct nft_rbtree_elem *rbe = priv;
479 if (!nft_set_elem_mark_busy(&rbe->ext) ||
480 !nft_is_active(net, &rbe->ext)) {
481 nft_set_elem_change_active(net, set, &rbe->ext);
487 static void *nft_rbtree_deactivate(const struct net *net,
488 const struct nft_set *set,
489 const struct nft_set_elem *elem)
491 const struct nft_rbtree *priv = nft_set_priv(set);
492 const struct rb_node *parent = priv->root.rb_node;
493 struct nft_rbtree_elem *rbe, *this = elem->priv;
494 u8 genmask = nft_genmask_next(net);
497 while (parent != NULL) {
498 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
500 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
503 parent = parent->rb_left;
505 parent = parent->rb_right;
507 if (nft_rbtree_interval_end(rbe) &&
508 nft_rbtree_interval_start(this)) {
509 parent = parent->rb_left;
511 } else if (nft_rbtree_interval_start(rbe) &&
512 nft_rbtree_interval_end(this)) {
513 parent = parent->rb_right;
515 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
516 parent = parent->rb_left;
519 nft_rbtree_flush(net, set, rbe);
526 static void nft_rbtree_walk(const struct nft_ctx *ctx,
528 struct nft_set_iter *iter)
530 struct nft_rbtree *priv = nft_set_priv(set);
531 struct nft_rbtree_elem *rbe;
532 struct nft_set_elem elem;
533 struct rb_node *node;
535 read_lock_bh(&priv->lock);
536 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
537 rbe = rb_entry(node, struct nft_rbtree_elem, node);
539 if (iter->count < iter->skip)
541 if (nft_set_elem_expired(&rbe->ext))
543 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
548 iter->err = iter->fn(ctx, set, iter, &elem);
550 read_unlock_bh(&priv->lock);
556 read_unlock_bh(&priv->lock);
559 static void nft_rbtree_gc(struct work_struct *work)
561 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
562 struct nft_set_gc_batch *gcb = NULL;
563 struct nft_rbtree *priv;
564 struct rb_node *node;
569 priv = container_of(work, struct nft_rbtree, gc_work.work);
570 set = nft_set_container_of(priv);
571 net = read_pnet(&set->net);
572 genmask = nft_genmask_cur(net);
574 write_lock_bh(&priv->lock);
575 write_seqcount_begin(&priv->count);
576 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
577 rbe = rb_entry(node, struct nft_rbtree_elem, node);
579 if (!nft_set_elem_active(&rbe->ext, genmask))
582 /* elements are reversed in the rbtree for historical reasons,
583 * from highest to lowest value, that is why end element is
584 * always visited before the start element.
586 if (nft_rbtree_interval_end(rbe)) {
590 if (!nft_set_elem_expired(&rbe->ext))
593 if (nft_set_elem_mark_busy(&rbe->ext)) {
599 rb_erase(&rbe_prev->node, &priv->root);
602 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
606 atomic_dec(&set->nelems);
607 nft_set_gc_batch_add(gcb, rbe);
611 atomic_dec(&set->nelems);
612 nft_set_gc_batch_add(gcb, rbe_end);
613 rb_erase(&rbe_end->node, &priv->root);
616 node = rb_next(node);
621 rb_erase(&rbe_prev->node, &priv->root);
622 write_seqcount_end(&priv->count);
623 write_unlock_bh(&priv->lock);
625 rbe = nft_set_catchall_gc(set);
627 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
629 nft_set_gc_batch_add(gcb, rbe);
631 nft_set_gc_batch_complete(gcb);
633 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
634 nft_set_gc_interval(set));
637 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
638 const struct nft_set_desc *desc)
640 return sizeof(struct nft_rbtree);
643 static int nft_rbtree_init(const struct nft_set *set,
644 const struct nft_set_desc *desc,
645 const struct nlattr * const nla[])
647 struct nft_rbtree *priv = nft_set_priv(set);
649 rwlock_init(&priv->lock);
650 seqcount_rwlock_init(&priv->count, &priv->lock);
651 priv->root = RB_ROOT;
653 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
654 if (set->flags & NFT_SET_TIMEOUT)
655 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
656 nft_set_gc_interval(set));
661 static void nft_rbtree_destroy(const struct nft_set *set)
663 struct nft_rbtree *priv = nft_set_priv(set);
664 struct nft_rbtree_elem *rbe;
665 struct rb_node *node;
667 cancel_delayed_work_sync(&priv->gc_work);
669 while ((node = priv->root.rb_node) != NULL) {
670 rb_erase(node, &priv->root);
671 rbe = rb_entry(node, struct nft_rbtree_elem, node);
672 nft_set_elem_destroy(set, rbe, true);
676 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
677 struct nft_set_estimate *est)
679 if (desc->field_count > 1)
683 est->size = sizeof(struct nft_rbtree) +
684 desc->size * sizeof(struct nft_rbtree_elem);
688 est->lookup = NFT_SET_CLASS_O_LOG_N;
689 est->space = NFT_SET_CLASS_O_N;
694 const struct nft_set_type nft_set_rbtree_type = {
695 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
697 .privsize = nft_rbtree_privsize,
698 .elemsize = offsetof(struct nft_rbtree_elem, ext),
699 .estimate = nft_rbtree_estimate,
700 .init = nft_rbtree_init,
701 .destroy = nft_rbtree_destroy,
702 .insert = nft_rbtree_insert,
703 .remove = nft_rbtree_remove,
704 .deactivate = nft_rbtree_deactivate,
705 .flush = nft_rbtree_flush,
706 .activate = nft_rbtree_activate,
707 .lookup = nft_rbtree_lookup,
708 .walk = nft_rbtree_walk,
709 .get = nft_rbtree_get,