1 /* Expectation handling for nf_conntrack. */
3 /* (C) 1999-2001 Paul `Rusty' Russell
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stddef.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/percpu.h>
22 #include <linux/kernel.h>
23 #include <linux/jhash.h>
24 #include <linux/moduleparam.h>
25 #include <linux/export.h>
26 #include <net/net_namespace.h>
28 #include <net/netfilter/nf_conntrack.h>
29 #include <net/netfilter/nf_conntrack_core.h>
30 #include <net/netfilter/nf_conntrack_expect.h>
31 #include <net/netfilter/nf_conntrack_helper.h>
32 #include <net/netfilter/nf_conntrack_tuple.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
35 unsigned int nf_ct_expect_hsize __read_mostly;
36 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
38 unsigned int nf_ct_expect_max __read_mostly;
40 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
42 /* nf_conntrack_expect helper functions */
43 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
44 u32 portid, int report)
46 struct nf_conn_help *master_help = nfct_help(exp->master);
47 struct net *net = nf_ct_exp_net(exp);
49 NF_CT_ASSERT(master_help);
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
52 hlist_del_rcu(&exp->hnode);
53 net->ct.expect_count--;
55 hlist_del(&exp->lnode);
56 master_help->expecting[exp->class]--;
58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
59 nf_ct_expect_put(exp);
61 NF_CT_STAT_INC(net, expect_delete);
63 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
65 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
69 spin_lock_bh(&nf_conntrack_lock);
70 nf_ct_unlink_expect(exp);
71 spin_unlock_bh(&nf_conntrack_lock);
72 nf_ct_expect_put(exp);
75 static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
86 return ((u64)hash * nf_ct_expect_hsize) >> 32;
89 struct nf_conntrack_expect *
90 __nf_ct_expect_find(struct net *net, u16 zone,
91 const struct nf_conntrack_tuple *tuple)
93 struct nf_conntrack_expect *i;
96 if (!net->ct.expect_count)
99 h = nf_ct_expect_dst_hash(tuple);
100 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
101 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102 nf_ct_zone(i->master) == zone)
107 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
109 /* Just find a expectation corresponding to a tuple. */
110 struct nf_conntrack_expect *
111 nf_ct_expect_find_get(struct net *net, u16 zone,
112 const struct nf_conntrack_tuple *tuple)
114 struct nf_conntrack_expect *i;
117 i = __nf_ct_expect_find(net, zone, tuple);
118 if (i && !atomic_inc_not_zero(&i->use))
124 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
126 /* If an expectation for this connection is found, it gets delete from
127 * global list then returned. */
128 struct nf_conntrack_expect *
129 nf_ct_find_expectation(struct net *net, u16 zone,
130 const struct nf_conntrack_tuple *tuple)
132 struct nf_conntrack_expect *i, *exp = NULL;
135 if (!net->ct.expect_count)
138 h = nf_ct_expect_dst_hash(tuple);
139 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
140 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
141 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
142 nf_ct_zone(i->master) == zone) {
150 /* If master is not in hash table yet (ie. packet hasn't left
151 this machine yet), how can other end know about expected?
152 Hence these are not the droids you are looking for (if
153 master ct never got confirmed, we'd hold a reference to it
154 and weird things would happen to future packets). */
155 if (!nf_ct_is_confirmed(exp->master))
158 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
159 atomic_inc(&exp->use);
161 } else if (del_timer(&exp->timeout)) {
162 nf_ct_unlink_expect(exp);
169 /* delete all expectations for this conntrack */
170 void nf_ct_remove_expectations(struct nf_conn *ct)
172 struct nf_conn_help *help = nfct_help(ct);
173 struct nf_conntrack_expect *exp;
174 struct hlist_node *next;
176 /* Optimization: most connection never expect any others. */
180 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
181 if (del_timer(&exp->timeout)) {
182 nf_ct_unlink_expect(exp);
183 nf_ct_expect_put(exp);
187 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
189 /* Would two expected things clash? */
190 static inline int expect_clash(const struct nf_conntrack_expect *a,
191 const struct nf_conntrack_expect *b)
193 /* Part covered by intersection of masks must be unequal,
194 otherwise they clash */
195 struct nf_conntrack_tuple_mask intersect_mask;
198 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
200 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
201 intersect_mask.src.u3.all[count] =
202 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
205 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
208 static inline int expect_matches(const struct nf_conntrack_expect *a,
209 const struct nf_conntrack_expect *b)
211 return a->master == b->master && a->class == b->class &&
212 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
213 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
214 nf_ct_zone(a->master) == nf_ct_zone(b->master);
217 /* Generally a bad idea to call this: could have matched already. */
218 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
220 spin_lock_bh(&nf_conntrack_lock);
221 if (del_timer(&exp->timeout)) {
222 nf_ct_unlink_expect(exp);
223 nf_ct_expect_put(exp);
225 spin_unlock_bh(&nf_conntrack_lock);
227 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
229 /* We don't increase the master conntrack refcount for non-fulfilled
230 * conntracks. During the conntrack destruction, the expectations are
231 * always killed before the conntrack itself */
232 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
234 struct nf_conntrack_expect *new;
236 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
241 atomic_set(&new->use, 1);
244 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
246 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
248 const union nf_inet_addr *saddr,
249 const union nf_inet_addr *daddr,
250 u_int8_t proto, const __be16 *src, const __be16 *dst)
254 if (family == AF_INET)
261 exp->expectfn = NULL;
263 exp->tuple.src.l3num = family;
264 exp->tuple.dst.protonum = proto;
267 memcpy(&exp->tuple.src.u3, saddr, len);
268 if (sizeof(exp->tuple.src.u3) > len)
269 /* address needs to be cleared for nf_ct_tuple_equal */
270 memset((void *)&exp->tuple.src.u3 + len, 0x00,
271 sizeof(exp->tuple.src.u3) - len);
272 memset(&exp->mask.src.u3, 0xFF, len);
273 if (sizeof(exp->mask.src.u3) > len)
274 memset((void *)&exp->mask.src.u3 + len, 0x00,
275 sizeof(exp->mask.src.u3) - len);
277 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
278 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
282 exp->tuple.src.u.all = *src;
283 exp->mask.src.u.all = htons(0xFFFF);
285 exp->tuple.src.u.all = 0;
286 exp->mask.src.u.all = 0;
289 memcpy(&exp->tuple.dst.u3, daddr, len);
290 if (sizeof(exp->tuple.dst.u3) > len)
291 /* address needs to be cleared for nf_ct_tuple_equal */
292 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
293 sizeof(exp->tuple.dst.u3) - len);
295 exp->tuple.dst.u.all = *dst;
297 #ifdef CONFIG_NF_NAT_NEEDED
298 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
299 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
302 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
304 static void nf_ct_expect_free_rcu(struct rcu_head *head)
306 struct nf_conntrack_expect *exp;
308 exp = container_of(head, struct nf_conntrack_expect, rcu);
309 kmem_cache_free(nf_ct_expect_cachep, exp);
312 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
314 if (atomic_dec_and_test(&exp->use))
315 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
317 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
319 static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
321 struct nf_conn_help *master_help = nfct_help(exp->master);
322 struct nf_conntrack_helper *helper;
323 struct net *net = nf_ct_exp_net(exp);
324 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
326 /* two references : one for hash insert, one for the timer */
327 atomic_add(2, &exp->use);
329 hlist_add_head(&exp->lnode, &master_help->expectations);
330 master_help->expecting[exp->class]++;
332 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
333 net->ct.expect_count++;
335 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
337 helper = rcu_dereference_protected(master_help->helper,
338 lockdep_is_held(&nf_conntrack_lock));
340 exp->timeout.expires = jiffies +
341 helper->expect_policy[exp->class].timeout * HZ;
343 add_timer(&exp->timeout);
345 NF_CT_STAT_INC(net, expect_create);
349 /* Race with expectations being used means we could have none to find; OK. */
350 static void evict_oldest_expect(struct nf_conn *master,
351 struct nf_conntrack_expect *new)
353 struct nf_conn_help *master_help = nfct_help(master);
354 struct nf_conntrack_expect *exp, *last = NULL;
356 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
357 if (exp->class == new->class)
361 if (last && del_timer(&last->timeout)) {
362 nf_ct_unlink_expect(last);
363 nf_ct_expect_put(last);
367 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
369 const struct nf_conntrack_expect_policy *p;
370 struct nf_conntrack_expect *i;
371 struct nf_conn *master = expect->master;
372 struct nf_conn_help *master_help = nfct_help(master);
373 struct nf_conntrack_helper *helper;
374 struct net *net = nf_ct_exp_net(expect);
375 struct hlist_node *next;
383 h = nf_ct_expect_dst_hash(&expect->tuple);
384 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
385 if (expect_matches(i, expect)) {
386 if (del_timer(&i->timeout)) {
387 nf_ct_unlink_expect(i);
391 } else if (expect_clash(i, expect)) {
396 /* Will be over limit? */
397 helper = rcu_dereference_protected(master_help->helper,
398 lockdep_is_held(&nf_conntrack_lock));
400 p = &helper->expect_policy[expect->class];
401 if (p->max_expected &&
402 master_help->expecting[expect->class] >= p->max_expected) {
403 evict_oldest_expect(master, expect);
404 if (master_help->expecting[expect->class]
405 >= p->max_expected) {
412 if (net->ct.expect_count >= nf_ct_expect_max) {
413 net_warn_ratelimited("nf_conntrack: expectation table full\n");
420 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
421 u32 portid, int report)
425 spin_lock_bh(&nf_conntrack_lock);
426 ret = __nf_ct_expect_check(expect);
430 ret = nf_ct_expect_insert(expect);
433 spin_unlock_bh(&nf_conntrack_lock);
434 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
437 spin_unlock_bh(&nf_conntrack_lock);
440 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
442 #ifdef CONFIG_NF_CONNTRACK_PROCFS
443 struct ct_expect_iter_state {
444 struct seq_net_private p;
448 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
450 struct net *net = seq_file_net(seq);
451 struct ct_expect_iter_state *st = seq->private;
452 struct hlist_node *n;
454 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
455 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
462 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
463 struct hlist_node *head)
465 struct net *net = seq_file_net(seq);
466 struct ct_expect_iter_state *st = seq->private;
468 head = rcu_dereference(hlist_next_rcu(head));
469 while (head == NULL) {
470 if (++st->bucket >= nf_ct_expect_hsize)
472 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
477 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
479 struct hlist_node *head = ct_expect_get_first(seq);
482 while (pos && (head = ct_expect_get_next(seq, head)))
484 return pos ? NULL : head;
487 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
491 return ct_expect_get_idx(seq, *pos);
494 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
497 return ct_expect_get_next(seq, v);
500 static void exp_seq_stop(struct seq_file *seq, void *v)
506 static int exp_seq_show(struct seq_file *s, void *v)
508 struct nf_conntrack_expect *expect;
509 struct nf_conntrack_helper *helper;
510 struct hlist_node *n = v;
513 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
515 if (expect->timeout.function)
516 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
517 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
520 seq_printf(s, "l3proto = %u proto=%u ",
521 expect->tuple.src.l3num,
522 expect->tuple.dst.protonum);
523 print_tuple(s, &expect->tuple,
524 __nf_ct_l3proto_find(expect->tuple.src.l3num),
525 __nf_ct_l4proto_find(expect->tuple.src.l3num,
526 expect->tuple.dst.protonum));
528 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
529 seq_printf(s, "PERMANENT");
532 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
533 seq_printf(s, "%sINACTIVE", delim);
536 if (expect->flags & NF_CT_EXPECT_USERSPACE)
537 seq_printf(s, "%sUSERSPACE", delim);
539 helper = rcu_dereference(nfct_help(expect->master)->helper);
541 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
542 if (helper->expect_policy[expect->class].name)
544 helper->expect_policy[expect->class].name);
547 return seq_putc(s, '\n');
550 static const struct seq_operations exp_seq_ops = {
551 .start = exp_seq_start,
552 .next = exp_seq_next,
553 .stop = exp_seq_stop,
557 static int exp_open(struct inode *inode, struct file *file)
559 return seq_open_net(inode, file, &exp_seq_ops,
560 sizeof(struct ct_expect_iter_state));
563 static const struct file_operations exp_file_ops = {
564 .owner = THIS_MODULE,
568 .release = seq_release_net,
570 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
572 static int exp_proc_init(struct net *net)
574 #ifdef CONFIG_NF_CONNTRACK_PROCFS
575 struct proc_dir_entry *proc;
577 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
581 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
585 static void exp_proc_remove(struct net *net)
587 #ifdef CONFIG_NF_CONNTRACK_PROCFS
588 remove_proc_entry("nf_conntrack_expect", net->proc_net);
589 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
592 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
594 int nf_conntrack_expect_pernet_init(struct net *net)
598 net->ct.expect_count = 0;
599 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
600 if (net->ct.expect_hash == NULL)
603 err = exp_proc_init(net);
609 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
614 void nf_conntrack_expect_pernet_fini(struct net *net)
616 exp_proc_remove(net);
617 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
620 int nf_conntrack_expect_init(void)
622 if (!nf_ct_expect_hsize) {
623 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
624 if (!nf_ct_expect_hsize)
625 nf_ct_expect_hsize = 1;
627 nf_ct_expect_max = nf_ct_expect_hsize * 4;
628 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
629 sizeof(struct nf_conntrack_expect),
631 if (!nf_ct_expect_cachep)
636 void nf_conntrack_expect_fini(void)
638 rcu_barrier(); /* Wait for call_rcu() before destroy */
639 kmem_cache_destroy(nf_ct_expect_cachep);