]>
Commit | Line | Data |
---|---|---|
77ab9cff MJ |
1 | /* Expectation handling for nf_conntrack. */ |
2 | ||
3 | /* (C) 1999-2001 Paul `Rusty' Russell | |
4 | * (C) 2002-2006 Netfilter Core Team <[email protected]> | |
5 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/types.h> | |
13 | #include <linux/netfilter.h> | |
14 | #include <linux/skbuff.h> | |
15 | #include <linux/proc_fs.h> | |
16 | #include <linux/seq_file.h> | |
17 | #include <linux/stddef.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/err.h> | |
20 | #include <linux/percpu.h> | |
21 | #include <linux/kernel.h> | |
a71c0855 | 22 | #include <linux/jhash.h> |
457c4cbc | 23 | #include <net/net_namespace.h> |
77ab9cff MJ |
24 | |
25 | #include <net/netfilter/nf_conntrack.h> | |
26 | #include <net/netfilter/nf_conntrack_core.h> | |
27 | #include <net/netfilter/nf_conntrack_expect.h> | |
28 | #include <net/netfilter/nf_conntrack_helper.h> | |
29 | #include <net/netfilter/nf_conntrack_tuple.h> | |
30 | ||
a71c0855 PM |
31 | unsigned int nf_ct_expect_hsize __read_mostly; |
32 | EXPORT_SYMBOL_GPL(nf_ct_expect_hsize); | |
33 | ||
34 | static unsigned int nf_ct_expect_hash_rnd __read_mostly; | |
f264a7df | 35 | unsigned int nf_ct_expect_max __read_mostly; |
a71c0855 | 36 | static int nf_ct_expect_hash_rnd_initted __read_mostly; |
a71c0855 | 37 | |
e9c1b084 | 38 | static struct kmem_cache *nf_ct_expect_cachep __read_mostly; |
77ab9cff MJ |
39 | |
40 | /* nf_conntrack_expect helper functions */ | |
41 | void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) | |
42 | { | |
43 | struct nf_conn_help *master_help = nfct_help(exp->master); | |
9b03f38d | 44 | struct net *net = nf_ct_exp_net(exp); |
77ab9cff MJ |
45 | |
46 | NF_CT_ASSERT(master_help); | |
47 | NF_CT_ASSERT(!timer_pending(&exp->timeout)); | |
48 | ||
7d0742da | 49 | hlist_del_rcu(&exp->hnode); |
9b03f38d | 50 | net->ct.expect_count--; |
a71c0855 | 51 | |
b560580a | 52 | hlist_del(&exp->lnode); |
6002f266 | 53 | master_help->expecting[exp->class]--; |
6823645d | 54 | nf_ct_expect_put(exp); |
b560580a | 55 | |
0d55af87 | 56 | NF_CT_STAT_INC(net, expect_delete); |
77ab9cff | 57 | } |
13b18339 | 58 | EXPORT_SYMBOL_GPL(nf_ct_unlink_expect); |
77ab9cff | 59 | |
6823645d | 60 | static void nf_ct_expectation_timed_out(unsigned long ul_expect) |
77ab9cff MJ |
61 | { |
62 | struct nf_conntrack_expect *exp = (void *)ul_expect; | |
63 | ||
f8ba1aff | 64 | spin_lock_bh(&nf_conntrack_lock); |
77ab9cff | 65 | nf_ct_unlink_expect(exp); |
f8ba1aff | 66 | spin_unlock_bh(&nf_conntrack_lock); |
6823645d | 67 | nf_ct_expect_put(exp); |
77ab9cff MJ |
68 | } |
69 | ||
a71c0855 PM |
70 | static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple) |
71 | { | |
34498825 PM |
72 | unsigned int hash; |
73 | ||
a71c0855 | 74 | if (unlikely(!nf_ct_expect_hash_rnd_initted)) { |
af07d241 HPP |
75 | get_random_bytes(&nf_ct_expect_hash_rnd, |
76 | sizeof(nf_ct_expect_hash_rnd)); | |
a71c0855 PM |
77 | nf_ct_expect_hash_rnd_initted = 1; |
78 | } | |
79 | ||
34498825 | 80 | hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all), |
a71c0855 | 81 | (((tuple->dst.protonum ^ tuple->src.l3num) << 16) | |
34498825 PM |
82 | (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd); |
83 | return ((u64)hash * nf_ct_expect_hsize) >> 32; | |
a71c0855 PM |
84 | } |
85 | ||
77ab9cff | 86 | struct nf_conntrack_expect * |
9b03f38d | 87 | __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple) |
77ab9cff MJ |
88 | { |
89 | struct nf_conntrack_expect *i; | |
a71c0855 PM |
90 | struct hlist_node *n; |
91 | unsigned int h; | |
92 | ||
9b03f38d | 93 | if (!net->ct.expect_count) |
a71c0855 | 94 | return NULL; |
77ab9cff | 95 | |
a71c0855 | 96 | h = nf_ct_expect_dst_hash(tuple); |
9b03f38d | 97 | hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) { |
77ab9cff MJ |
98 | if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) |
99 | return i; | |
100 | } | |
101 | return NULL; | |
102 | } | |
6823645d | 103 | EXPORT_SYMBOL_GPL(__nf_ct_expect_find); |
77ab9cff MJ |
104 | |
105 | /* Just find a expectation corresponding to a tuple. */ | |
106 | struct nf_conntrack_expect * | |
9b03f38d | 107 | nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) |
77ab9cff MJ |
108 | { |
109 | struct nf_conntrack_expect *i; | |
110 | ||
7d0742da | 111 | rcu_read_lock(); |
9b03f38d | 112 | i = __nf_ct_expect_find(net, tuple); |
7d0742da PM |
113 | if (i && !atomic_inc_not_zero(&i->use)) |
114 | i = NULL; | |
115 | rcu_read_unlock(); | |
77ab9cff MJ |
116 | |
117 | return i; | |
118 | } | |
6823645d | 119 | EXPORT_SYMBOL_GPL(nf_ct_expect_find_get); |
77ab9cff MJ |
120 | |
121 | /* If an expectation for this connection is found, it gets delete from | |
122 | * global list then returned. */ | |
123 | struct nf_conntrack_expect * | |
9b03f38d | 124 | nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple) |
77ab9cff | 125 | { |
359b9ab6 PM |
126 | struct nf_conntrack_expect *i, *exp = NULL; |
127 | struct hlist_node *n; | |
128 | unsigned int h; | |
129 | ||
9b03f38d | 130 | if (!net->ct.expect_count) |
359b9ab6 | 131 | return NULL; |
ece00641 | 132 | |
359b9ab6 | 133 | h = nf_ct_expect_dst_hash(tuple); |
9b03f38d | 134 | hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { |
359b9ab6 PM |
135 | if (!(i->flags & NF_CT_EXPECT_INACTIVE) && |
136 | nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) { | |
137 | exp = i; | |
138 | break; | |
139 | } | |
140 | } | |
ece00641 YK |
141 | if (!exp) |
142 | return NULL; | |
77ab9cff | 143 | |
77ab9cff MJ |
144 | /* If master is not in hash table yet (ie. packet hasn't left |
145 | this machine yet), how can other end know about expected? | |
146 | Hence these are not the droids you are looking for (if | |
147 | master ct never got confirmed, we'd hold a reference to it | |
148 | and weird things would happen to future packets). */ | |
ece00641 YK |
149 | if (!nf_ct_is_confirmed(exp->master)) |
150 | return NULL; | |
151 | ||
152 | if (exp->flags & NF_CT_EXPECT_PERMANENT) { | |
153 | atomic_inc(&exp->use); | |
154 | return exp; | |
155 | } else if (del_timer(&exp->timeout)) { | |
156 | nf_ct_unlink_expect(exp); | |
157 | return exp; | |
77ab9cff | 158 | } |
ece00641 | 159 | |
77ab9cff MJ |
160 | return NULL; |
161 | } | |
162 | ||
163 | /* delete all expectations for this conntrack */ | |
164 | void nf_ct_remove_expectations(struct nf_conn *ct) | |
165 | { | |
77ab9cff | 166 | struct nf_conn_help *help = nfct_help(ct); |
b560580a PM |
167 | struct nf_conntrack_expect *exp; |
168 | struct hlist_node *n, *next; | |
77ab9cff MJ |
169 | |
170 | /* Optimization: most connection never expect any others. */ | |
6002f266 | 171 | if (!help) |
77ab9cff MJ |
172 | return; |
173 | ||
b560580a PM |
174 | hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) { |
175 | if (del_timer(&exp->timeout)) { | |
176 | nf_ct_unlink_expect(exp); | |
177 | nf_ct_expect_put(exp); | |
601e68e1 | 178 | } |
77ab9cff MJ |
179 | } |
180 | } | |
13b18339 | 181 | EXPORT_SYMBOL_GPL(nf_ct_remove_expectations); |
77ab9cff MJ |
182 | |
183 | /* Would two expected things clash? */ | |
184 | static inline int expect_clash(const struct nf_conntrack_expect *a, | |
185 | const struct nf_conntrack_expect *b) | |
186 | { | |
187 | /* Part covered by intersection of masks must be unequal, | |
188 | otherwise they clash */ | |
d4156e8c | 189 | struct nf_conntrack_tuple_mask intersect_mask; |
77ab9cff MJ |
190 | int count; |
191 | ||
77ab9cff | 192 | intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; |
77ab9cff MJ |
193 | |
194 | for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){ | |
195 | intersect_mask.src.u3.all[count] = | |
196 | a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; | |
197 | } | |
198 | ||
77ab9cff MJ |
199 | return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); |
200 | } | |
201 | ||
202 | static inline int expect_matches(const struct nf_conntrack_expect *a, | |
203 | const struct nf_conntrack_expect *b) | |
204 | { | |
6002f266 | 205 | return a->master == b->master && a->class == b->class |
77ab9cff | 206 | && nf_ct_tuple_equal(&a->tuple, &b->tuple) |
d4156e8c | 207 | && nf_ct_tuple_mask_equal(&a->mask, &b->mask); |
77ab9cff MJ |
208 | } |
209 | ||
210 | /* Generally a bad idea to call this: could have matched already. */ | |
6823645d | 211 | void nf_ct_unexpect_related(struct nf_conntrack_expect *exp) |
77ab9cff | 212 | { |
f8ba1aff | 213 | spin_lock_bh(&nf_conntrack_lock); |
4e1d4e6c PM |
214 | if (del_timer(&exp->timeout)) { |
215 | nf_ct_unlink_expect(exp); | |
216 | nf_ct_expect_put(exp); | |
77ab9cff | 217 | } |
f8ba1aff | 218 | spin_unlock_bh(&nf_conntrack_lock); |
77ab9cff | 219 | } |
6823645d | 220 | EXPORT_SYMBOL_GPL(nf_ct_unexpect_related); |
77ab9cff MJ |
221 | |
222 | /* We don't increase the master conntrack refcount for non-fulfilled | |
223 | * conntracks. During the conntrack destruction, the expectations are | |
224 | * always killed before the conntrack itself */ | |
6823645d | 225 | struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me) |
77ab9cff MJ |
226 | { |
227 | struct nf_conntrack_expect *new; | |
228 | ||
6823645d | 229 | new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC); |
77ab9cff MJ |
230 | if (!new) |
231 | return NULL; | |
232 | ||
233 | new->master = me; | |
234 | atomic_set(&new->use, 1); | |
7d0742da | 235 | INIT_RCU_HEAD(&new->rcu); |
77ab9cff MJ |
236 | return new; |
237 | } | |
6823645d | 238 | EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); |
77ab9cff | 239 | |
6002f266 | 240 | void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class, |
76108cea | 241 | u_int8_t family, |
1d9d7522 PM |
242 | const union nf_inet_addr *saddr, |
243 | const union nf_inet_addr *daddr, | |
244 | u_int8_t proto, const __be16 *src, const __be16 *dst) | |
d6a9b650 PM |
245 | { |
246 | int len; | |
247 | ||
248 | if (family == AF_INET) | |
249 | len = 4; | |
250 | else | |
251 | len = 16; | |
252 | ||
253 | exp->flags = 0; | |
6002f266 | 254 | exp->class = class; |
d6a9b650 PM |
255 | exp->expectfn = NULL; |
256 | exp->helper = NULL; | |
257 | exp->tuple.src.l3num = family; | |
258 | exp->tuple.dst.protonum = proto; | |
d6a9b650 PM |
259 | |
260 | if (saddr) { | |
261 | memcpy(&exp->tuple.src.u3, saddr, len); | |
262 | if (sizeof(exp->tuple.src.u3) > len) | |
263 | /* address needs to be cleared for nf_ct_tuple_equal */ | |
264 | memset((void *)&exp->tuple.src.u3 + len, 0x00, | |
265 | sizeof(exp->tuple.src.u3) - len); | |
266 | memset(&exp->mask.src.u3, 0xFF, len); | |
267 | if (sizeof(exp->mask.src.u3) > len) | |
268 | memset((void *)&exp->mask.src.u3 + len, 0x00, | |
269 | sizeof(exp->mask.src.u3) - len); | |
270 | } else { | |
271 | memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3)); | |
272 | memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3)); | |
273 | } | |
274 | ||
d6a9b650 | 275 | if (src) { |
a34c4589 AV |
276 | exp->tuple.src.u.all = *src; |
277 | exp->mask.src.u.all = htons(0xFFFF); | |
d6a9b650 PM |
278 | } else { |
279 | exp->tuple.src.u.all = 0; | |
280 | exp->mask.src.u.all = 0; | |
281 | } | |
282 | ||
d4156e8c PM |
283 | memcpy(&exp->tuple.dst.u3, daddr, len); |
284 | if (sizeof(exp->tuple.dst.u3) > len) | |
285 | /* address needs to be cleared for nf_ct_tuple_equal */ | |
286 | memset((void *)&exp->tuple.dst.u3 + len, 0x00, | |
287 | sizeof(exp->tuple.dst.u3) - len); | |
288 | ||
a34c4589 | 289 | exp->tuple.dst.u.all = *dst; |
d6a9b650 | 290 | } |
6823645d | 291 | EXPORT_SYMBOL_GPL(nf_ct_expect_init); |
d6a9b650 | 292 | |
7d0742da PM |
293 | static void nf_ct_expect_free_rcu(struct rcu_head *head) |
294 | { | |
295 | struct nf_conntrack_expect *exp; | |
296 | ||
297 | exp = container_of(head, struct nf_conntrack_expect, rcu); | |
298 | kmem_cache_free(nf_ct_expect_cachep, exp); | |
299 | } | |
300 | ||
6823645d | 301 | void nf_ct_expect_put(struct nf_conntrack_expect *exp) |
77ab9cff MJ |
302 | { |
303 | if (atomic_dec_and_test(&exp->use)) | |
7d0742da | 304 | call_rcu(&exp->rcu, nf_ct_expect_free_rcu); |
77ab9cff | 305 | } |
6823645d | 306 | EXPORT_SYMBOL_GPL(nf_ct_expect_put); |
77ab9cff | 307 | |
6823645d | 308 | static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) |
77ab9cff MJ |
309 | { |
310 | struct nf_conn_help *master_help = nfct_help(exp->master); | |
9b03f38d | 311 | struct net *net = nf_ct_exp_net(exp); |
6002f266 | 312 | const struct nf_conntrack_expect_policy *p; |
a71c0855 | 313 | unsigned int h = nf_ct_expect_dst_hash(&exp->tuple); |
77ab9cff MJ |
314 | |
315 | atomic_inc(&exp->use); | |
b560580a PM |
316 | |
317 | hlist_add_head(&exp->lnode, &master_help->expectations); | |
6002f266 | 318 | master_help->expecting[exp->class]++; |
a71c0855 | 319 | |
9b03f38d AD |
320 | hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]); |
321 | net->ct.expect_count++; | |
77ab9cff | 322 | |
6823645d PM |
323 | setup_timer(&exp->timeout, nf_ct_expectation_timed_out, |
324 | (unsigned long)exp); | |
6002f266 PM |
325 | p = &master_help->helper->expect_policy[exp->class]; |
326 | exp->timeout.expires = jiffies + p->timeout * HZ; | |
77ab9cff MJ |
327 | add_timer(&exp->timeout); |
328 | ||
77ab9cff | 329 | atomic_inc(&exp->use); |
0d55af87 | 330 | NF_CT_STAT_INC(net, expect_create); |
77ab9cff MJ |
331 | } |
332 | ||
333 | /* Race with expectations being used means we could have none to find; OK. */ | |
6002f266 PM |
334 | static void evict_oldest_expect(struct nf_conn *master, |
335 | struct nf_conntrack_expect *new) | |
77ab9cff | 336 | { |
b560580a | 337 | struct nf_conn_help *master_help = nfct_help(master); |
6002f266 | 338 | struct nf_conntrack_expect *exp, *last = NULL; |
b560580a | 339 | struct hlist_node *n; |
77ab9cff | 340 | |
6002f266 PM |
341 | hlist_for_each_entry(exp, n, &master_help->expectations, lnode) { |
342 | if (exp->class == new->class) | |
343 | last = exp; | |
344 | } | |
b560580a | 345 | |
6002f266 PM |
346 | if (last && del_timer(&last->timeout)) { |
347 | nf_ct_unlink_expect(last); | |
348 | nf_ct_expect_put(last); | |
77ab9cff MJ |
349 | } |
350 | } | |
351 | ||
352 | static inline int refresh_timer(struct nf_conntrack_expect *i) | |
353 | { | |
354 | struct nf_conn_help *master_help = nfct_help(i->master); | |
6002f266 | 355 | const struct nf_conntrack_expect_policy *p; |
77ab9cff MJ |
356 | |
357 | if (!del_timer(&i->timeout)) | |
358 | return 0; | |
359 | ||
6002f266 PM |
360 | p = &master_help->helper->expect_policy[i->class]; |
361 | i->timeout.expires = jiffies + p->timeout * HZ; | |
77ab9cff MJ |
362 | add_timer(&i->timeout); |
363 | return 1; | |
364 | } | |
365 | ||
19abb7b0 | 366 | static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) |
77ab9cff | 367 | { |
6002f266 | 368 | const struct nf_conntrack_expect_policy *p; |
77ab9cff MJ |
369 | struct nf_conntrack_expect *i; |
370 | struct nf_conn *master = expect->master; | |
371 | struct nf_conn_help *master_help = nfct_help(master); | |
9b03f38d | 372 | struct net *net = nf_ct_exp_net(expect); |
a71c0855 PM |
373 | struct hlist_node *n; |
374 | unsigned int h; | |
19abb7b0 | 375 | int ret = 0; |
77ab9cff | 376 | |
3c158f7f PM |
377 | if (!master_help->helper) { |
378 | ret = -ESHUTDOWN; | |
379 | goto out; | |
380 | } | |
a71c0855 | 381 | h = nf_ct_expect_dst_hash(&expect->tuple); |
9b03f38d | 382 | hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { |
77ab9cff MJ |
383 | if (expect_matches(i, expect)) { |
384 | /* Refresh timer: if it's dying, ignore.. */ | |
385 | if (refresh_timer(i)) { | |
386 | ret = 0; | |
387 | goto out; | |
388 | } | |
389 | } else if (expect_clash(i, expect)) { | |
390 | ret = -EBUSY; | |
391 | goto out; | |
392 | } | |
393 | } | |
394 | /* Will be over limit? */ | |
6002f266 PM |
395 | p = &master_help->helper->expect_policy[expect->class]; |
396 | if (p->max_expected && | |
397 | master_help->expecting[expect->class] >= p->max_expected) { | |
398 | evict_oldest_expect(master, expect); | |
399 | if (master_help->expecting[expect->class] >= p->max_expected) { | |
400 | ret = -EMFILE; | |
401 | goto out; | |
402 | } | |
403 | } | |
77ab9cff | 404 | |
9b03f38d | 405 | if (net->ct.expect_count >= nf_ct_expect_max) { |
f264a7df PM |
406 | if (net_ratelimit()) |
407 | printk(KERN_WARNING | |
3d89e9cf | 408 | "nf_conntrack: expectation table full\n"); |
f264a7df | 409 | ret = -EMFILE; |
f264a7df | 410 | } |
19abb7b0 PNA |
411 | out: |
412 | return ret; | |
413 | } | |
414 | ||
415 | int nf_ct_expect_related(struct nf_conntrack_expect *expect) | |
416 | { | |
417 | int ret; | |
418 | ||
419 | spin_lock_bh(&nf_conntrack_lock); | |
420 | ret = __nf_ct_expect_check(expect); | |
421 | if (ret < 0) | |
422 | goto out; | |
f264a7df | 423 | |
6823645d | 424 | nf_ct_expect_insert(expect); |
19abb7b0 PNA |
425 | atomic_inc(&expect->use); |
426 | spin_unlock_bh(&nf_conntrack_lock); | |
6823645d | 427 | nf_ct_expect_event(IPEXP_NEW, expect); |
19abb7b0 PNA |
428 | nf_ct_expect_put(expect); |
429 | return ret; | |
77ab9cff | 430 | out: |
f8ba1aff | 431 | spin_unlock_bh(&nf_conntrack_lock); |
77ab9cff MJ |
432 | return ret; |
433 | } | |
6823645d | 434 | EXPORT_SYMBOL_GPL(nf_ct_expect_related); |
77ab9cff | 435 | |
19abb7b0 PNA |
436 | int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, |
437 | u32 pid, int report) | |
438 | { | |
439 | int ret; | |
440 | ||
441 | spin_lock_bh(&nf_conntrack_lock); | |
442 | ret = __nf_ct_expect_check(expect); | |
443 | if (ret < 0) | |
444 | goto out; | |
445 | nf_ct_expect_insert(expect); | |
446 | out: | |
447 | spin_unlock_bh(&nf_conntrack_lock); | |
448 | if (ret == 0) | |
449 | nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report); | |
450 | return ret; | |
451 | } | |
452 | EXPORT_SYMBOL_GPL(nf_ct_expect_related_report); | |
453 | ||
77ab9cff | 454 | #ifdef CONFIG_PROC_FS |
5d08ad44 | 455 | struct ct_expect_iter_state { |
dc5129f8 | 456 | struct seq_net_private p; |
5d08ad44 PM |
457 | unsigned int bucket; |
458 | }; | |
459 | ||
460 | static struct hlist_node *ct_expect_get_first(struct seq_file *seq) | |
77ab9cff | 461 | { |
dc5129f8 | 462 | struct net *net = seq_file_net(seq); |
5d08ad44 | 463 | struct ct_expect_iter_state *st = seq->private; |
7d0742da | 464 | struct hlist_node *n; |
77ab9cff | 465 | |
5d08ad44 | 466 | for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { |
9b03f38d | 467 | n = rcu_dereference(net->ct.expect_hash[st->bucket].first); |
7d0742da PM |
468 | if (n) |
469 | return n; | |
5d08ad44 PM |
470 | } |
471 | return NULL; | |
472 | } | |
77ab9cff | 473 | |
5d08ad44 PM |
474 | static struct hlist_node *ct_expect_get_next(struct seq_file *seq, |
475 | struct hlist_node *head) | |
476 | { | |
dc5129f8 | 477 | struct net *net = seq_file_net(seq); |
5d08ad44 | 478 | struct ct_expect_iter_state *st = seq->private; |
77ab9cff | 479 | |
7d0742da | 480 | head = rcu_dereference(head->next); |
5d08ad44 PM |
481 | while (head == NULL) { |
482 | if (++st->bucket >= nf_ct_expect_hsize) | |
77ab9cff | 483 | return NULL; |
9b03f38d | 484 | head = rcu_dereference(net->ct.expect_hash[st->bucket].first); |
77ab9cff | 485 | } |
5d08ad44 | 486 | return head; |
77ab9cff MJ |
487 | } |
488 | ||
5d08ad44 | 489 | static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos) |
77ab9cff | 490 | { |
5d08ad44 | 491 | struct hlist_node *head = ct_expect_get_first(seq); |
77ab9cff | 492 | |
5d08ad44 PM |
493 | if (head) |
494 | while (pos && (head = ct_expect_get_next(seq, head))) | |
495 | pos--; | |
496 | return pos ? NULL : head; | |
497 | } | |
77ab9cff | 498 | |
5d08ad44 | 499 | static void *exp_seq_start(struct seq_file *seq, loff_t *pos) |
7d0742da | 500 | __acquires(RCU) |
5d08ad44 | 501 | { |
7d0742da | 502 | rcu_read_lock(); |
5d08ad44 PM |
503 | return ct_expect_get_idx(seq, *pos); |
504 | } | |
77ab9cff | 505 | |
5d08ad44 PM |
506 | static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
507 | { | |
508 | (*pos)++; | |
509 | return ct_expect_get_next(seq, v); | |
77ab9cff MJ |
510 | } |
511 | ||
5d08ad44 | 512 | static void exp_seq_stop(struct seq_file *seq, void *v) |
7d0742da | 513 | __releases(RCU) |
77ab9cff | 514 | { |
7d0742da | 515 | rcu_read_unlock(); |
77ab9cff MJ |
516 | } |
517 | ||
518 | static int exp_seq_show(struct seq_file *s, void *v) | |
519 | { | |
5d08ad44 PM |
520 | struct nf_conntrack_expect *expect; |
521 | struct hlist_node *n = v; | |
359b9ab6 | 522 | char *delim = ""; |
5d08ad44 PM |
523 | |
524 | expect = hlist_entry(n, struct nf_conntrack_expect, hnode); | |
77ab9cff MJ |
525 | |
526 | if (expect->timeout.function) | |
527 | seq_printf(s, "%ld ", timer_pending(&expect->timeout) | |
528 | ? (long)(expect->timeout.expires - jiffies)/HZ : 0); | |
529 | else | |
530 | seq_printf(s, "- "); | |
531 | seq_printf(s, "l3proto = %u proto=%u ", | |
532 | expect->tuple.src.l3num, | |
533 | expect->tuple.dst.protonum); | |
534 | print_tuple(s, &expect->tuple, | |
535 | __nf_ct_l3proto_find(expect->tuple.src.l3num), | |
605dcad6 | 536 | __nf_ct_l4proto_find(expect->tuple.src.l3num, |
77ab9cff | 537 | expect->tuple.dst.protonum)); |
4bb119ea | 538 | |
359b9ab6 PM |
539 | if (expect->flags & NF_CT_EXPECT_PERMANENT) { |
540 | seq_printf(s, "PERMANENT"); | |
541 | delim = ","; | |
542 | } | |
543 | if (expect->flags & NF_CT_EXPECT_INACTIVE) | |
544 | seq_printf(s, "%sINACTIVE", delim); | |
4bb119ea | 545 | |
77ab9cff MJ |
546 | return seq_putc(s, '\n'); |
547 | } | |
548 | ||
56b3d975 | 549 | static const struct seq_operations exp_seq_ops = { |
77ab9cff MJ |
550 | .start = exp_seq_start, |
551 | .next = exp_seq_next, | |
552 | .stop = exp_seq_stop, | |
553 | .show = exp_seq_show | |
554 | }; | |
555 | ||
556 | static int exp_open(struct inode *inode, struct file *file) | |
557 | { | |
dc5129f8 | 558 | return seq_open_net(inode, file, &exp_seq_ops, |
e2da5913 | 559 | sizeof(struct ct_expect_iter_state)); |
77ab9cff MJ |
560 | } |
561 | ||
5d08ad44 | 562 | static const struct file_operations exp_file_ops = { |
77ab9cff MJ |
563 | .owner = THIS_MODULE, |
564 | .open = exp_open, | |
565 | .read = seq_read, | |
566 | .llseek = seq_lseek, | |
dc5129f8 | 567 | .release = seq_release_net, |
77ab9cff MJ |
568 | }; |
569 | #endif /* CONFIG_PROC_FS */ | |
e9c1b084 | 570 | |
dc5129f8 | 571 | static int exp_proc_init(struct net *net) |
e9c1b084 PM |
572 | { |
573 | #ifdef CONFIG_PROC_FS | |
574 | struct proc_dir_entry *proc; | |
575 | ||
dc5129f8 | 576 | proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops); |
e9c1b084 PM |
577 | if (!proc) |
578 | return -ENOMEM; | |
579 | #endif /* CONFIG_PROC_FS */ | |
580 | return 0; | |
581 | } | |
582 | ||
dc5129f8 | 583 | static void exp_proc_remove(struct net *net) |
e9c1b084 PM |
584 | { |
585 | #ifdef CONFIG_PROC_FS | |
dc5129f8 | 586 | proc_net_remove(net, "nf_conntrack_expect"); |
e9c1b084 PM |
587 | #endif /* CONFIG_PROC_FS */ |
588 | } | |
589 | ||
a71c0855 PM |
590 | module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600); |
591 | ||
9b03f38d | 592 | int nf_conntrack_expect_init(struct net *net) |
e9c1b084 | 593 | { |
a71c0855 PM |
594 | int err = -ENOMEM; |
595 | ||
08f6547d AD |
596 | if (net_eq(net, &init_net)) { |
597 | if (!nf_ct_expect_hsize) { | |
598 | nf_ct_expect_hsize = nf_conntrack_htable_size / 256; | |
599 | if (!nf_ct_expect_hsize) | |
600 | nf_ct_expect_hsize = 1; | |
601 | } | |
602 | nf_ct_expect_max = nf_ct_expect_hsize * 4; | |
a71c0855 PM |
603 | } |
604 | ||
9b03f38d AD |
605 | net->ct.expect_count = 0; |
606 | net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, | |
607 | &net->ct.expect_vmalloc); | |
608 | if (net->ct.expect_hash == NULL) | |
a71c0855 | 609 | goto err1; |
e9c1b084 | 610 | |
08f6547d AD |
611 | if (net_eq(net, &init_net)) { |
612 | nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect", | |
e9c1b084 | 613 | sizeof(struct nf_conntrack_expect), |
20c2df83 | 614 | 0, 0, NULL); |
08f6547d AD |
615 | if (!nf_ct_expect_cachep) |
616 | goto err2; | |
617 | } | |
e9c1b084 | 618 | |
dc5129f8 | 619 | err = exp_proc_init(net); |
e9c1b084 | 620 | if (err < 0) |
a71c0855 | 621 | goto err3; |
e9c1b084 PM |
622 | |
623 | return 0; | |
624 | ||
a71c0855 | 625 | err3: |
08f6547d AD |
626 | if (net_eq(net, &init_net)) |
627 | kmem_cache_destroy(nf_ct_expect_cachep); | |
12293bf9 | 628 | err2: |
9b03f38d | 629 | nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, |
a71c0855 | 630 | nf_ct_expect_hsize); |
a71c0855 | 631 | err1: |
e9c1b084 PM |
632 | return err; |
633 | } | |
634 | ||
9b03f38d | 635 | void nf_conntrack_expect_fini(struct net *net) |
e9c1b084 | 636 | { |
dc5129f8 | 637 | exp_proc_remove(net); |
08f6547d AD |
638 | if (net_eq(net, &init_net)) |
639 | kmem_cache_destroy(nf_ct_expect_cachep); | |
9b03f38d | 640 | nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, |
a71c0855 | 641 | nf_ct_expect_hsize); |
e9c1b084 | 642 | } |