2 * count the number of connections matching an arbitrary key.
4 * (C) 2017 Red Hat GmbH
7 * split from xt_connlimit.c:
10 * only ignore TIME_WAIT or gone connections
11 * (C) CC Computer Consultants GmbH, 2007
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/in6.h>
17 #include <linux/ipv6.h>
18 #include <linux/jhash.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/rbtree.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/netfilter/nf_conntrack_tcp.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <net/netfilter/nf_conntrack.h>
29 #include <net/netfilter/nf_conntrack_count.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
34 #define CONNCOUNT_SLOTS 256U
37 #define CONNCOUNT_LOCK_SLOTS 8U
39 #define CONNCOUNT_LOCK_SLOTS 256U
42 #define CONNCOUNT_GC_MAX_NODES 8
45 /* we will save the tuples of all connections we care about */
46 struct nf_conncount_tuple {
47 struct list_head node;
48 struct nf_conntrack_tuple tuple;
49 struct nf_conntrack_zone zone;
52 struct rcu_head rcu_head;
55 struct nf_conncount_rb {
57 struct nf_conncount_list list;
59 struct rcu_head rcu_head;
62 static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp;
64 struct nf_conncount_data {
66 struct rb_root root[CONNCOUNT_SLOTS];
68 struct work_struct gc_work;
69 unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
73 static u_int32_t conncount_rnd __read_mostly;
74 static struct kmem_cache *conncount_rb_cachep __read_mostly;
75 static struct kmem_cache *conncount_conn_cachep __read_mostly;
77 static inline bool already_closed(const struct nf_conn *conn)
79 if (nf_ct_protonum(conn) == IPPROTO_TCP)
80 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
81 conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
86 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
88 return memcmp(a, b, klen * sizeof(u32));
91 enum nf_conncount_list_add
92 nf_conncount_add(struct nf_conncount_list *list,
93 const struct nf_conntrack_tuple *tuple,
94 const struct nf_conntrack_zone *zone)
96 struct nf_conncount_tuple *conn;
98 if (WARN_ON_ONCE(list->count > INT_MAX))
99 return NF_CONNCOUNT_ERR;
101 conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
103 return NF_CONNCOUNT_ERR;
105 conn->tuple = *tuple;
107 conn->cpu = raw_smp_processor_id();
108 conn->jiffies32 = (u32)jiffies;
109 spin_lock(&list->list_lock);
110 if (list->dead == true) {
111 kmem_cache_free(conncount_conn_cachep, conn);
112 spin_unlock(&list->list_lock);
113 return NF_CONNCOUNT_SKIP;
115 list_add_tail(&conn->node, &list->head);
117 spin_unlock(&list->list_lock);
118 return NF_CONNCOUNT_ADDED;
120 EXPORT_SYMBOL_GPL(nf_conncount_add);
122 static void __conn_free(struct rcu_head *h)
124 struct nf_conncount_tuple *conn;
126 conn = container_of(h, struct nf_conncount_tuple, rcu_head);
127 kmem_cache_free(conncount_conn_cachep, conn);
130 static bool conn_free(struct nf_conncount_list *list,
131 struct nf_conncount_tuple *conn)
133 bool free_entry = false;
135 spin_lock(&list->list_lock);
137 if (list->count == 0) {
138 spin_unlock(&list->list_lock);
143 list_del_rcu(&conn->node);
144 if (list->count == 0)
147 spin_unlock(&list->list_lock);
148 call_rcu(&conn->rcu_head, __conn_free);
152 static const struct nf_conntrack_tuple_hash *
153 find_or_evict(struct net *net, struct nf_conncount_list *list,
154 struct nf_conncount_tuple *conn, bool *free_entry)
156 const struct nf_conntrack_tuple_hash *found;
158 int cpu = raw_smp_processor_id();
161 found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
167 /* conn might have been added just before by another cpu and
168 * might still be unconfirmed. In this case, nf_conntrack_find()
169 * returns no result. Thus only evict if this cpu added the
170 * stale entry or if the entry is older than two jiffies.
173 if (conn->cpu == cpu || age >= 2) {
174 *free_entry = conn_free(list, conn);
175 return ERR_PTR(-ENOENT);
178 return ERR_PTR(-EAGAIN);
181 void nf_conncount_lookup(struct net *net,
182 struct nf_conncount_list *list,
183 const struct nf_conntrack_tuple *tuple,
184 const struct nf_conntrack_zone *zone,
187 const struct nf_conntrack_tuple_hash *found;
188 struct nf_conncount_tuple *conn, *conn_n;
189 struct nf_conn *found_ct;
190 unsigned int collect = 0;
191 bool free_entry = false;
193 /* best effort only */
194 *addit = tuple ? true : false;
196 /* check the saved connections */
197 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
198 if (collect > CONNCOUNT_GC_MAX_NODES)
201 found = find_or_evict(net, list, conn, &free_entry);
203 /* Not found, but might be about to be confirmed */
204 if (PTR_ERR(found) == -EAGAIN) {
208 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
209 nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
210 nf_ct_zone_id(zone, zone->dir))
212 } else if (PTR_ERR(found) == -ENOENT)
217 found_ct = nf_ct_tuplehash_to_ctrack(found);
219 if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) &&
220 nf_ct_zone_equal(found_ct, zone, zone->dir)) {
222 * We should not see tuples twice unless someone hooks
223 * this into a table without "-p tcp --syn".
225 * Attempt to avoid a re-add in this case.
228 } else if (already_closed(found_ct)) {
230 * we do not care about connections which are
231 * closed already -> ditch it
234 conn_free(list, conn);
242 EXPORT_SYMBOL_GPL(nf_conncount_lookup);
244 void nf_conncount_list_init(struct nf_conncount_list *list)
246 spin_lock_init(&list->list_lock);
247 INIT_LIST_HEAD(&list->head);
251 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
253 /* Return true if the list is empty */
254 bool nf_conncount_gc_list(struct net *net,
255 struct nf_conncount_list *list)
257 const struct nf_conntrack_tuple_hash *found;
258 struct nf_conncount_tuple *conn, *conn_n;
259 struct nf_conn *found_ct;
260 unsigned int collected = 0;
261 bool free_entry = false;
263 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
264 found = find_or_evict(net, list, conn, &free_entry);
266 if (PTR_ERR(found) == -ENOENT) {
274 found_ct = nf_ct_tuplehash_to_ctrack(found);
275 if (already_closed(found_ct)) {
277 * we do not care about connections which are
278 * closed already -> ditch it
281 if (conn_free(list, conn))
288 if (collected > CONNCOUNT_GC_MAX_NODES)
293 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
295 static void __tree_nodes_free(struct rcu_head *h)
297 struct nf_conncount_rb *rbconn;
299 rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
300 kmem_cache_free(conncount_rb_cachep, rbconn);
303 static void tree_nodes_free(struct rb_root *root,
304 struct nf_conncount_rb *gc_nodes[],
305 unsigned int gc_count)
307 struct nf_conncount_rb *rbconn;
310 rbconn = gc_nodes[--gc_count];
311 spin_lock(&rbconn->list.list_lock);
312 if (rbconn->list.count == 0 && rbconn->list.dead == false) {
313 rbconn->list.dead = true;
314 rb_erase(&rbconn->node, root);
315 call_rcu(&rbconn->rcu_head, __tree_nodes_free);
317 spin_unlock(&rbconn->list.list_lock);
321 static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
323 set_bit(tree, data->pending_trees);
324 schedule_work(&data->gc_work);
328 insert_tree(struct net *net,
329 struct nf_conncount_data *data,
330 struct rb_root *root,
334 const struct nf_conntrack_tuple *tuple,
335 const struct nf_conntrack_zone *zone)
337 enum nf_conncount_list_add ret;
338 struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
339 struct rb_node **rbnode, *parent;
340 struct nf_conncount_rb *rbconn;
341 struct nf_conncount_tuple *conn;
342 unsigned int count = 0, gc_count = 0;
343 bool node_found = false;
345 spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
348 rbnode = &(root->rb_node);
351 rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
354 diff = key_diff(key, rbconn->key, keylen);
356 rbnode = &((*rbnode)->rb_left);
357 } else if (diff > 0) {
358 rbnode = &((*rbnode)->rb_right);
360 /* unlikely: other cpu added node already */
362 ret = nf_conncount_add(&rbconn->list, tuple, zone);
363 if (ret == NF_CONNCOUNT_ERR) {
364 count = 0; /* hotdrop */
365 } else if (ret == NF_CONNCOUNT_ADDED) {
366 count = rbconn->list.count;
368 /* NF_CONNCOUNT_SKIP, rbconn is already
369 * reclaimed by gc, insert a new tree node
376 if (gc_count >= ARRAY_SIZE(gc_nodes))
379 if (nf_conncount_gc_list(net, &rbconn->list))
380 gc_nodes[gc_count++] = rbconn;
384 tree_nodes_free(root, gc_nodes, gc_count);
385 /* tree_node_free before new allocation permits
386 * allocator to re-use newly free'd object.
388 * This is a rare event; in most cases we will find
389 * existing node to re-use. (or gc_count is 0).
392 if (gc_count >= ARRAY_SIZE(gc_nodes))
393 schedule_gc_worker(data, hash);
399 /* expected case: match, insert new node */
400 rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
404 conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
406 kmem_cache_free(conncount_rb_cachep, rbconn);
410 conn->tuple = *tuple;
412 memcpy(rbconn->key, key, sizeof(u32) * keylen);
414 nf_conncount_list_init(&rbconn->list);
415 list_add(&conn->node, &rbconn->list.head);
418 rb_link_node(&rbconn->node, parent, rbnode);
419 rb_insert_color(&rbconn->node, root);
421 spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
426 count_tree(struct net *net,
427 struct nf_conncount_data *data,
429 const struct nf_conntrack_tuple *tuple,
430 const struct nf_conntrack_zone *zone)
432 enum nf_conncount_list_add ret;
433 struct rb_root *root;
434 struct rb_node *parent;
435 struct nf_conncount_rb *rbconn;
437 u8 keylen = data->keylen;
439 hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
440 root = &data->root[hash];
442 parent = rcu_dereference_raw(root->rb_node);
447 rbconn = rb_entry(parent, struct nf_conncount_rb, node);
449 diff = key_diff(key, rbconn->key, keylen);
451 parent = rcu_dereference_raw(parent->rb_left);
452 } else if (diff > 0) {
453 parent = rcu_dereference_raw(parent->rb_right);
455 /* same source network -> be counted! */
456 nf_conncount_lookup(net, &rbconn->list, tuple, zone,
460 return rbconn->list.count;
462 ret = nf_conncount_add(&rbconn->list, tuple, zone);
463 if (ret == NF_CONNCOUNT_ERR) {
464 return 0; /* hotdrop */
465 } else if (ret == NF_CONNCOUNT_ADDED) {
466 return rbconn->list.count;
468 /* NF_CONNCOUNT_SKIP, rbconn is already
469 * reclaimed by gc, insert a new tree node
479 return insert_tree(net, data, root, hash, key, keylen, tuple, zone);
482 static void tree_gc_worker(struct work_struct *work)
484 struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
485 struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
486 struct rb_root *root;
487 struct rb_node *node;
488 unsigned int tree, next_tree, gc_count = 0;
490 tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS;
491 root = &data->root[tree];
494 for (node = rb_first(root); node != NULL; node = rb_next(node)) {
495 rbconn = rb_entry(node, struct nf_conncount_rb, node);
496 if (nf_conncount_gc_list(data->net, &rbconn->list))
497 gc_nodes[gc_count++] = rbconn;
501 spin_lock_bh(&nf_conncount_locks[tree]);
504 tree_nodes_free(root, gc_nodes, gc_count);
507 clear_bit(tree, data->pending_trees);
509 next_tree = (tree + 1) % CONNCOUNT_SLOTS;
510 next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS);
512 if (next_tree < CONNCOUNT_SLOTS) {
513 data->gc_tree = next_tree;
517 spin_unlock_bh(&nf_conncount_locks[tree]);
520 /* Count and return number of conntrack entries in 'net' with particular 'key'.
521 * If 'tuple' is not null, insert it into the accounting data structure.
522 * Call with RCU read lock.
524 unsigned int nf_conncount_count(struct net *net,
525 struct nf_conncount_data *data,
527 const struct nf_conntrack_tuple *tuple,
528 const struct nf_conntrack_zone *zone)
530 return count_tree(net, data, key, tuple, zone);
532 EXPORT_SYMBOL_GPL(nf_conncount_count);
534 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
537 struct nf_conncount_data *data;
540 if (keylen % sizeof(u32) ||
541 keylen / sizeof(u32) > MAX_KEYLEN ||
543 return ERR_PTR(-EINVAL);
545 net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
547 data = kmalloc(sizeof(*data), GFP_KERNEL);
549 return ERR_PTR(-ENOMEM);
551 ret = nf_ct_netns_get(net, family);
557 for (i = 0; i < ARRAY_SIZE(data->root); ++i)
558 data->root[i] = RB_ROOT;
560 data->keylen = keylen / sizeof(u32);
562 INIT_WORK(&data->gc_work, tree_gc_worker);
566 EXPORT_SYMBOL_GPL(nf_conncount_init);
568 void nf_conncount_cache_free(struct nf_conncount_list *list)
570 struct nf_conncount_tuple *conn, *conn_n;
572 list_for_each_entry_safe(conn, conn_n, &list->head, node)
573 kmem_cache_free(conncount_conn_cachep, conn);
575 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
577 static void destroy_tree(struct rb_root *r)
579 struct nf_conncount_rb *rbconn;
580 struct rb_node *node;
582 while ((node = rb_first(r)) != NULL) {
583 rbconn = rb_entry(node, struct nf_conncount_rb, node);
587 nf_conncount_cache_free(&rbconn->list);
589 kmem_cache_free(conncount_rb_cachep, rbconn);
593 void nf_conncount_destroy(struct net *net, unsigned int family,
594 struct nf_conncount_data *data)
598 cancel_work_sync(&data->gc_work);
599 nf_ct_netns_put(net, family);
601 for (i = 0; i < ARRAY_SIZE(data->root); ++i)
602 destroy_tree(&data->root[i]);
606 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
608 static int __init nf_conncount_modinit(void)
612 BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS);
613 BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
615 for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
616 spin_lock_init(&nf_conncount_locks[i]);
618 conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
619 sizeof(struct nf_conncount_tuple),
621 if (!conncount_conn_cachep)
624 conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
625 sizeof(struct nf_conncount_rb),
627 if (!conncount_rb_cachep) {
628 kmem_cache_destroy(conncount_conn_cachep);
635 static void __exit nf_conncount_modexit(void)
637 kmem_cache_destroy(conncount_conn_cachep);
638 kmem_cache_destroy(conncount_rb_cachep);
641 module_init(nf_conncount_modinit);
642 module_exit(nf_conncount_modexit);
645 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
646 MODULE_LICENSE("GPL");