]>
Commit | Line | Data |
---|---|---|
b57dc7c1 PB |
1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
2 | /* - | |
3 | * net/sched/act_ct.c Connection Tracking action | |
4 | * | |
5 | * Authors: Paul Blakey <[email protected]> | |
6 | * Yossi Kuperman <[email protected]> | |
7 | * Marcelo Ricardo Leitner <[email protected]> | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/skbuff.h> | |
14 | #include <linux/rtnetlink.h> | |
15 | #include <linux/pkt_cls.h> | |
16 | #include <linux/ip.h> | |
17 | #include <linux/ipv6.h> | |
c34b961a | 18 | #include <linux/rhashtable.h> |
b57dc7c1 PB |
19 | #include <net/netlink.h> |
20 | #include <net/pkt_sched.h> | |
21 | #include <net/pkt_cls.h> | |
22 | #include <net/act_api.h> | |
23 | #include <net/ip.h> | |
24 | #include <net/ipv6_frag.h> | |
25 | #include <uapi/linux/tc_act/tc_ct.h> | |
26 | #include <net/tc_act/tc_ct.h> | |
27 | ||
c34b961a | 28 | #include <net/netfilter/nf_flow_table.h> |
b57dc7c1 PB |
29 | #include <net/netfilter/nf_conntrack.h> |
30 | #include <net/netfilter/nf_conntrack_core.h> | |
31 | #include <net/netfilter/nf_conntrack_zones.h> | |
32 | #include <net/netfilter/nf_conntrack_helper.h> | |
beb97d3a | 33 | #include <net/netfilter/nf_conntrack_acct.h> |
b57dc7c1 | 34 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> |
40d102cd | 35 | #include <uapi/linux/netfilter/nf_nat.h> |
b57dc7c1 | 36 | |
c34b961a PB |
37 | static struct workqueue_struct *act_ct_wq; |
38 | static struct rhashtable zones_ht; | |
138470a9 | 39 | static DEFINE_MUTEX(zones_mutex); |
c34b961a PB |
40 | |
41 | struct tcf_ct_flow_table { | |
42 | struct rhash_head node; /* In zones tables */ | |
43 | ||
44 | struct rcu_work rwork; | |
45 | struct nf_flowtable nf_ft; | |
138470a9 | 46 | refcount_t ref; |
c34b961a | 47 | u16 zone; |
c34b961a PB |
48 | |
49 | bool dying; | |
50 | }; | |
51 | ||
52 | static const struct rhashtable_params zones_params = { | |
53 | .head_offset = offsetof(struct tcf_ct_flow_table, node), | |
54 | .key_offset = offsetof(struct tcf_ct_flow_table, zone), | |
55 | .key_len = sizeof_field(struct tcf_ct_flow_table, zone), | |
56 | .automatic_shrinking = true, | |
57 | }; | |
58 | ||
9c26ba9b PB |
59 | static struct flow_action_entry * |
60 | tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action) | |
61 | { | |
62 | int i = flow_action->num_entries++; | |
63 | ||
64 | return &flow_action->entries[i]; | |
65 | } | |
66 | ||
67 | static void tcf_ct_add_mangle_action(struct flow_action *action, | |
68 | enum flow_action_mangle_base htype, | |
69 | u32 offset, | |
70 | u32 mask, | |
71 | u32 val) | |
72 | { | |
73 | struct flow_action_entry *entry; | |
74 | ||
75 | entry = tcf_ct_flow_table_flow_action_get_next(action); | |
76 | entry->id = FLOW_ACTION_MANGLE; | |
77 | entry->mangle.htype = htype; | |
78 | entry->mangle.mask = ~mask; | |
79 | entry->mangle.offset = offset; | |
80 | entry->mangle.val = val; | |
81 | } | |
82 | ||
83 | /* The following nat helper functions check if the inverted reverse tuple | |
84 | * (target) is different then the current dir tuple - meaning nat for ports | |
85 | * and/or ip is needed, and add the relevant mangle actions. | |
86 | */ | |
87 | static void | |
88 | tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple, | |
89 | struct nf_conntrack_tuple target, | |
90 | struct flow_action *action) | |
91 | { | |
92 | if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3))) | |
93 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4, | |
94 | offsetof(struct iphdr, saddr), | |
95 | 0xFFFFFFFF, | |
96 | be32_to_cpu(target.src.u3.ip)); | |
97 | if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3))) | |
98 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4, | |
99 | offsetof(struct iphdr, daddr), | |
100 | 0xFFFFFFFF, | |
101 | be32_to_cpu(target.dst.u3.ip)); | |
102 | } | |
103 | ||
104 | static void | |
105 | tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action, | |
106 | union nf_inet_addr *addr, | |
107 | u32 offset) | |
108 | { | |
109 | int i; | |
110 | ||
111 | for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) | |
112 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6, | |
113 | i * sizeof(u32) + offset, | |
114 | 0xFFFFFFFF, be32_to_cpu(addr->ip6[i])); | |
115 | } | |
116 | ||
117 | static void | |
118 | tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple, | |
119 | struct nf_conntrack_tuple target, | |
120 | struct flow_action *action) | |
121 | { | |
122 | if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3))) | |
123 | tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3, | |
124 | offsetof(struct ipv6hdr, | |
125 | saddr)); | |
126 | if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3))) | |
127 | tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3, | |
128 | offsetof(struct ipv6hdr, | |
129 | daddr)); | |
130 | } | |
131 | ||
132 | static void | |
133 | tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple, | |
134 | struct nf_conntrack_tuple target, | |
135 | struct flow_action *action) | |
136 | { | |
137 | __be16 target_src = target.src.u.tcp.port; | |
138 | __be16 target_dst = target.dst.u.tcp.port; | |
139 | ||
140 | if (target_src != tuple->src.u.tcp.port) | |
141 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
142 | offsetof(struct tcphdr, source), | |
143 | 0xFFFF, be16_to_cpu(target_src)); | |
144 | if (target_dst != tuple->dst.u.tcp.port) | |
145 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
146 | offsetof(struct tcphdr, dest), | |
147 | 0xFFFF, be16_to_cpu(target_dst)); | |
148 | } | |
149 | ||
150 | static void | |
151 | tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple, | |
152 | struct nf_conntrack_tuple target, | |
153 | struct flow_action *action) | |
154 | { | |
155 | __be16 target_src = target.src.u.udp.port; | |
156 | __be16 target_dst = target.dst.u.udp.port; | |
157 | ||
158 | if (target_src != tuple->src.u.udp.port) | |
159 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
160 | offsetof(struct udphdr, source), | |
161 | 0xFFFF, be16_to_cpu(target_src)); | |
162 | if (target_dst != tuple->dst.u.udp.port) | |
163 | tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP, | |
164 | offsetof(struct udphdr, dest), | |
165 | 0xFFFF, be16_to_cpu(target_dst)); | |
166 | } | |
167 | ||
168 | static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct, | |
169 | enum ip_conntrack_dir dir, | |
170 | struct flow_action *action) | |
171 | { | |
172 | struct nf_conn_labels *ct_labels; | |
173 | struct flow_action_entry *entry; | |
30b0cf90 | 174 | enum ip_conntrack_info ctinfo; |
9c26ba9b PB |
175 | u32 *act_ct_labels; |
176 | ||
177 | entry = tcf_ct_flow_table_flow_action_get_next(action); | |
178 | entry->id = FLOW_ACTION_CT_METADATA; | |
179 | #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) | |
180 | entry->ct_metadata.mark = ct->mark; | |
181 | #endif | |
30b0cf90 PB |
182 | ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED : |
183 | IP_CT_ESTABLISHED_REPLY; | |
184 | /* aligns with the CT reference on the SKB nf_ct_set */ | |
185 | entry->ct_metadata.cookie = (unsigned long)ct | ctinfo; | |
9c26ba9b PB |
186 | |
187 | act_ct_labels = entry->ct_metadata.labels; | |
188 | ct_labels = nf_ct_labels_find(ct); | |
189 | if (ct_labels) | |
190 | memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE); | |
191 | else | |
192 | memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE); | |
193 | } | |
194 | ||
195 | static int tcf_ct_flow_table_add_action_nat(struct net *net, | |
196 | struct nf_conn *ct, | |
197 | enum ip_conntrack_dir dir, | |
198 | struct flow_action *action) | |
199 | { | |
200 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; | |
201 | struct nf_conntrack_tuple target; | |
202 | ||
05aa69e5 | 203 | if (!(ct->status & IPS_NAT_MASK)) |
204 | return 0; | |
205 | ||
9c26ba9b PB |
206 | nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); |
207 | ||
208 | switch (tuple->src.l3num) { | |
209 | case NFPROTO_IPV4: | |
210 | tcf_ct_flow_table_add_action_nat_ipv4(tuple, target, | |
211 | action); | |
212 | break; | |
213 | case NFPROTO_IPV6: | |
214 | tcf_ct_flow_table_add_action_nat_ipv6(tuple, target, | |
215 | action); | |
216 | break; | |
217 | default: | |
218 | return -EOPNOTSUPP; | |
219 | } | |
220 | ||
221 | switch (nf_ct_protonum(ct)) { | |
222 | case IPPROTO_TCP: | |
223 | tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action); | |
224 | break; | |
225 | case IPPROTO_UDP: | |
226 | tcf_ct_flow_table_add_action_nat_udp(tuple, target, action); | |
227 | break; | |
228 | default: | |
229 | return -EOPNOTSUPP; | |
230 | } | |
231 | ||
232 | return 0; | |
233 | } | |
234 | ||
235 | static int tcf_ct_flow_table_fill_actions(struct net *net, | |
236 | const struct flow_offload *flow, | |
237 | enum flow_offload_tuple_dir tdir, | |
238 | struct nf_flow_rule *flow_rule) | |
239 | { | |
240 | struct flow_action *action = &flow_rule->rule->action; | |
241 | int num_entries = action->num_entries; | |
242 | struct nf_conn *ct = flow->ct; | |
243 | enum ip_conntrack_dir dir; | |
244 | int i, err; | |
245 | ||
246 | switch (tdir) { | |
247 | case FLOW_OFFLOAD_DIR_ORIGINAL: | |
248 | dir = IP_CT_DIR_ORIGINAL; | |
249 | break; | |
250 | case FLOW_OFFLOAD_DIR_REPLY: | |
251 | dir = IP_CT_DIR_REPLY; | |
252 | break; | |
253 | default: | |
254 | return -EOPNOTSUPP; | |
255 | } | |
256 | ||
257 | err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action); | |
258 | if (err) | |
259 | goto err_nat; | |
260 | ||
261 | tcf_ct_flow_table_add_action_meta(ct, dir, action); | |
262 | return 0; | |
263 | ||
264 | err_nat: | |
265 | /* Clear filled actions */ | |
266 | for (i = num_entries; i < action->num_entries; i++) | |
267 | memset(&action->entries[i], 0, sizeof(action->entries[i])); | |
268 | action->num_entries = num_entries; | |
269 | ||
270 | return err; | |
271 | } | |
272 | ||
c34b961a | 273 | static struct nf_flowtable_type flowtable_ct = { |
9c26ba9b | 274 | .action = tcf_ct_flow_table_fill_actions, |
c34b961a PB |
275 | .owner = THIS_MODULE, |
276 | }; | |
277 | ||
278 | static int tcf_ct_flow_table_get(struct tcf_ct_params *params) | |
279 | { | |
280 | struct tcf_ct_flow_table *ct_ft; | |
281 | int err = -ENOMEM; | |
282 | ||
138470a9 | 283 | mutex_lock(&zones_mutex); |
c34b961a | 284 | ct_ft = rhashtable_lookup_fast(&zones_ht, ¶ms->zone, zones_params); |
138470a9 ED |
285 | if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) |
286 | goto out_unlock; | |
c34b961a | 287 | |
138470a9 | 288 | ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL); |
c34b961a PB |
289 | if (!ct_ft) |
290 | goto err_alloc; | |
138470a9 | 291 | refcount_set(&ct_ft->ref, 1); |
c34b961a PB |
292 | |
293 | ct_ft->zone = params->zone; | |
294 | err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params); | |
295 | if (err) | |
296 | goto err_insert; | |
297 | ||
298 | ct_ft->nf_ft.type = &flowtable_ct; | |
edd5861e | 299 | ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD; |
c34b961a PB |
300 | err = nf_flow_table_init(&ct_ft->nf_ft); |
301 | if (err) | |
302 | goto err_init; | |
303 | ||
304 | __module_get(THIS_MODULE); | |
138470a9 | 305 | out_unlock: |
c34b961a | 306 | params->ct_ft = ct_ft; |
edd5861e | 307 | params->nf_ft = &ct_ft->nf_ft; |
138470a9 | 308 | mutex_unlock(&zones_mutex); |
c34b961a PB |
309 | |
310 | return 0; | |
311 | ||
312 | err_init: | |
313 | rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); | |
314 | err_insert: | |
315 | kfree(ct_ft); | |
316 | err_alloc: | |
138470a9 | 317 | mutex_unlock(&zones_mutex); |
c34b961a PB |
318 | return err; |
319 | } | |
320 | ||
321 | static void tcf_ct_flow_table_cleanup_work(struct work_struct *work) | |
322 | { | |
323 | struct tcf_ct_flow_table *ct_ft; | |
324 | ||
325 | ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table, | |
326 | rwork); | |
327 | nf_flow_table_free(&ct_ft->nf_ft); | |
328 | kfree(ct_ft); | |
329 | ||
330 | module_put(THIS_MODULE); | |
331 | } | |
332 | ||
333 | static void tcf_ct_flow_table_put(struct tcf_ct_params *params) | |
334 | { | |
335 | struct tcf_ct_flow_table *ct_ft = params->ct_ft; | |
336 | ||
138470a9 | 337 | if (refcount_dec_and_test(¶ms->ct_ft->ref)) { |
c34b961a PB |
338 | rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params); |
339 | INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work); | |
340 | queue_rcu_work(act_ct_wq, &ct_ft->rwork); | |
341 | } | |
c34b961a PB |
342 | } |
343 | ||
64ff70b8 PB |
344 | static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft, |
345 | struct nf_conn *ct, | |
346 | bool tcp) | |
347 | { | |
348 | struct flow_offload *entry; | |
349 | int err; | |
350 | ||
351 | if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status)) | |
352 | return; | |
353 | ||
354 | entry = flow_offload_alloc(ct); | |
355 | if (!entry) { | |
356 | WARN_ON_ONCE(1); | |
357 | goto err_alloc; | |
358 | } | |
359 | ||
360 | if (tcp) { | |
361 | ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; | |
362 | ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; | |
363 | } | |
364 | ||
365 | err = flow_offload_add(&ct_ft->nf_ft, entry); | |
366 | if (err) | |
367 | goto err_add; | |
368 | ||
369 | return; | |
370 | ||
371 | err_add: | |
372 | flow_offload_free(entry); | |
373 | err_alloc: | |
374 | clear_bit(IPS_OFFLOAD_BIT, &ct->status); | |
375 | } | |
376 | ||
377 | static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft, | |
378 | struct nf_conn *ct, | |
379 | enum ip_conntrack_info ctinfo) | |
380 | { | |
381 | bool tcp = false; | |
382 | ||
383 | if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) | |
384 | return; | |
385 | ||
386 | switch (nf_ct_protonum(ct)) { | |
387 | case IPPROTO_TCP: | |
388 | tcp = true; | |
389 | if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) | |
390 | return; | |
391 | break; | |
392 | case IPPROTO_UDP: | |
393 | break; | |
394 | default: | |
395 | return; | |
396 | } | |
397 | ||
398 | if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) || | |
399 | ct->status & IPS_SEQ_ADJUST) | |
400 | return; | |
401 | ||
402 | tcf_ct_flow_table_add(ct_ft, ct, tcp); | |
403 | } | |
404 | ||
46475bb2 PB |
405 | static bool |
406 | tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb, | |
07ac9d16 PB |
407 | struct flow_offload_tuple *tuple, |
408 | struct tcphdr **tcph) | |
46475bb2 PB |
409 | { |
410 | struct flow_ports *ports; | |
411 | unsigned int thoff; | |
412 | struct iphdr *iph; | |
413 | ||
4cc5fdec | 414 | if (!pskb_network_may_pull(skb, sizeof(*iph))) |
46475bb2 PB |
415 | return false; |
416 | ||
417 | iph = ip_hdr(skb); | |
418 | thoff = iph->ihl * 4; | |
419 | ||
420 | if (ip_is_fragment(iph) || | |
421 | unlikely(thoff != sizeof(struct iphdr))) | |
422 | return false; | |
423 | ||
424 | if (iph->protocol != IPPROTO_TCP && | |
425 | iph->protocol != IPPROTO_UDP) | |
426 | return false; | |
427 | ||
428 | if (iph->ttl <= 1) | |
429 | return false; | |
430 | ||
4cc5fdec PB |
431 | if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ? |
432 | thoff + sizeof(struct tcphdr) : | |
433 | thoff + sizeof(*ports))) | |
46475bb2 PB |
434 | return false; |
435 | ||
07ac9d16 PB |
436 | iph = ip_hdr(skb); |
437 | if (iph->protocol == IPPROTO_TCP) | |
438 | *tcph = (void *)(skb_network_header(skb) + thoff); | |
46475bb2 | 439 | |
07ac9d16 | 440 | ports = (struct flow_ports *)(skb_network_header(skb) + thoff); |
46475bb2 PB |
441 | tuple->src_v4.s_addr = iph->saddr; |
442 | tuple->dst_v4.s_addr = iph->daddr; | |
443 | tuple->src_port = ports->source; | |
444 | tuple->dst_port = ports->dest; | |
445 | tuple->l3proto = AF_INET; | |
446 | tuple->l4proto = iph->protocol; | |
447 | ||
448 | return true; | |
449 | } | |
450 | ||
451 | static bool | |
452 | tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb, | |
07ac9d16 PB |
453 | struct flow_offload_tuple *tuple, |
454 | struct tcphdr **tcph) | |
46475bb2 PB |
455 | { |
456 | struct flow_ports *ports; | |
457 | struct ipv6hdr *ip6h; | |
458 | unsigned int thoff; | |
459 | ||
4cc5fdec | 460 | if (!pskb_network_may_pull(skb, sizeof(*ip6h))) |
46475bb2 PB |
461 | return false; |
462 | ||
463 | ip6h = ipv6_hdr(skb); | |
464 | ||
465 | if (ip6h->nexthdr != IPPROTO_TCP && | |
466 | ip6h->nexthdr != IPPROTO_UDP) | |
467 | return false; | |
468 | ||
469 | if (ip6h->hop_limit <= 1) | |
470 | return false; | |
471 | ||
472 | thoff = sizeof(*ip6h); | |
4cc5fdec PB |
473 | if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ? |
474 | thoff + sizeof(struct tcphdr) : | |
475 | thoff + sizeof(*ports))) | |
46475bb2 PB |
476 | return false; |
477 | ||
07ac9d16 PB |
478 | ip6h = ipv6_hdr(skb); |
479 | if (ip6h->nexthdr == IPPROTO_TCP) | |
480 | *tcph = (void *)(skb_network_header(skb) + thoff); | |
46475bb2 | 481 | |
07ac9d16 | 482 | ports = (struct flow_ports *)(skb_network_header(skb) + thoff); |
46475bb2 PB |
483 | tuple->src_v6 = ip6h->saddr; |
484 | tuple->dst_v6 = ip6h->daddr; | |
485 | tuple->src_port = ports->source; | |
486 | tuple->dst_port = ports->dest; | |
487 | tuple->l3proto = AF_INET6; | |
488 | tuple->l4proto = ip6h->nexthdr; | |
489 | ||
490 | return true; | |
491 | } | |
492 | ||
46475bb2 PB |
493 | static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p, |
494 | struct sk_buff *skb, | |
495 | u8 family) | |
496 | { | |
497 | struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft; | |
498 | struct flow_offload_tuple_rhash *tuplehash; | |
499 | struct flow_offload_tuple tuple = {}; | |
500 | enum ip_conntrack_info ctinfo; | |
07ac9d16 | 501 | struct tcphdr *tcph = NULL; |
46475bb2 PB |
502 | struct flow_offload *flow; |
503 | struct nf_conn *ct; | |
46475bb2 PB |
504 | u8 dir; |
505 | ||
506 | /* Previously seen or loopback */ | |
507 | ct = nf_ct_get(skb, &ctinfo); | |
508 | if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) | |
509 | return false; | |
510 | ||
511 | switch (family) { | |
512 | case NFPROTO_IPV4: | |
07ac9d16 | 513 | if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph)) |
46475bb2 PB |
514 | return false; |
515 | break; | |
516 | case NFPROTO_IPV6: | |
07ac9d16 | 517 | if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph)) |
46475bb2 PB |
518 | return false; |
519 | break; | |
520 | default: | |
521 | return false; | |
522 | } | |
523 | ||
524 | tuplehash = flow_offload_lookup(nf_ft, &tuple); | |
525 | if (!tuplehash) | |
526 | return false; | |
527 | ||
528 | dir = tuplehash->tuple.dir; | |
529 | flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); | |
530 | ct = flow->ct; | |
531 | ||
07ac9d16 PB |
532 | if (tcph && (unlikely(tcph->fin || tcph->rst))) { |
533 | flow_offload_teardown(flow); | |
534 | return false; | |
535 | } | |
536 | ||
46475bb2 PB |
537 | ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED : |
538 | IP_CT_ESTABLISHED_REPLY; | |
539 | ||
8b3646d6 | 540 | flow_offload_refresh(nf_ft, flow); |
46475bb2 PB |
541 | nf_conntrack_get(&ct->ct_general); |
542 | nf_ct_set(skb, ct, ctinfo); | |
beb97d3a | 543 | nf_ct_acct_update(ct, dir, skb->len); |
46475bb2 PB |
544 | |
545 | return true; | |
546 | } | |
547 | ||
c34b961a PB |
548 | static int tcf_ct_flow_tables_init(void) |
549 | { | |
550 | return rhashtable_init(&zones_ht, &zones_params); | |
551 | } | |
552 | ||
553 | static void tcf_ct_flow_tables_uninit(void) | |
554 | { | |
555 | rhashtable_destroy(&zones_ht); | |
556 | } | |
557 | ||
b57dc7c1 PB |
558 | static struct tc_action_ops act_ct_ops; |
559 | static unsigned int ct_net_id; | |
560 | ||
561 | struct tc_ct_action_net { | |
562 | struct tc_action_net tn; /* Must be first */ | |
563 | bool labels; | |
564 | }; | |
565 | ||
566 | /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */ | |
567 | static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb, | |
568 | u16 zone_id, bool force) | |
569 | { | |
570 | enum ip_conntrack_info ctinfo; | |
571 | struct nf_conn *ct; | |
572 | ||
573 | ct = nf_ct_get(skb, &ctinfo); | |
574 | if (!ct) | |
575 | return false; | |
576 | if (!net_eq(net, read_pnet(&ct->ct_net))) | |
577 | return false; | |
578 | if (nf_ct_zone(ct)->id != zone_id) | |
579 | return false; | |
580 | ||
581 | /* Force conntrack entry direction. */ | |
582 | if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) { | |
583 | if (nf_ct_is_confirmed(ct)) | |
584 | nf_ct_kill(ct); | |
585 | ||
586 | nf_conntrack_put(&ct->ct_general); | |
587 | nf_ct_set(skb, NULL, IP_CT_UNTRACKED); | |
588 | ||
589 | return false; | |
590 | } | |
591 | ||
592 | return true; | |
593 | } | |
594 | ||
595 | /* Trim the skb to the length specified by the IP/IPv6 header, | |
596 | * removing any trailing lower-layer padding. This prepares the skb | |
597 | * for higher-layer processing that assumes skb->len excludes padding | |
598 | * (such as nf_ip_checksum). The caller needs to pull the skb to the | |
599 | * network header, and ensure ip_hdr/ipv6_hdr points to valid data. | |
600 | */ | |
601 | static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family) | |
602 | { | |
603 | unsigned int len; | |
604 | int err; | |
605 | ||
606 | switch (family) { | |
607 | case NFPROTO_IPV4: | |
608 | len = ntohs(ip_hdr(skb)->tot_len); | |
609 | break; | |
610 | case NFPROTO_IPV6: | |
611 | len = sizeof(struct ipv6hdr) | |
612 | + ntohs(ipv6_hdr(skb)->payload_len); | |
613 | break; | |
614 | default: | |
615 | len = skb->len; | |
616 | } | |
617 | ||
618 | err = pskb_trim_rcsum(skb, len); | |
619 | ||
620 | return err; | |
621 | } | |
622 | ||
623 | static u8 tcf_ct_skb_nf_family(struct sk_buff *skb) | |
624 | { | |
625 | u8 family = NFPROTO_UNSPEC; | |
626 | ||
d7bf2ebe | 627 | switch (skb_protocol(skb, true)) { |
b57dc7c1 PB |
628 | case htons(ETH_P_IP): |
629 | family = NFPROTO_IPV4; | |
630 | break; | |
631 | case htons(ETH_P_IPV6): | |
632 | family = NFPROTO_IPV6; | |
633 | break; | |
634 | default: | |
635 | break; | |
636 | } | |
637 | ||
638 | return family; | |
639 | } | |
640 | ||
641 | static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag) | |
642 | { | |
643 | unsigned int len; | |
644 | ||
645 | len = skb_network_offset(skb) + sizeof(struct iphdr); | |
646 | if (unlikely(skb->len < len)) | |
647 | return -EINVAL; | |
648 | if (unlikely(!pskb_may_pull(skb, len))) | |
649 | return -ENOMEM; | |
650 | ||
651 | *frag = ip_is_fragment(ip_hdr(skb)); | |
652 | return 0; | |
653 | } | |
654 | ||
655 | static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag) | |
656 | { | |
657 | unsigned int flags = 0, len, payload_ofs = 0; | |
658 | unsigned short frag_off; | |
659 | int nexthdr; | |
660 | ||
661 | len = skb_network_offset(skb) + sizeof(struct ipv6hdr); | |
662 | if (unlikely(skb->len < len)) | |
663 | return -EINVAL; | |
664 | if (unlikely(!pskb_may_pull(skb, len))) | |
665 | return -ENOMEM; | |
666 | ||
667 | nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); | |
668 | if (unlikely(nexthdr < 0)) | |
669 | return -EPROTO; | |
670 | ||
671 | *frag = flags & IP6_FH_F_FRAG; | |
672 | return 0; | |
673 | } | |
674 | ||
675 | static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, | |
ae372cb1 | 676 | u8 family, u16 zone, bool *defrag) |
b57dc7c1 PB |
677 | { |
678 | enum ip_conntrack_info ctinfo; | |
ae372cb1 | 679 | struct qdisc_skb_cb cb; |
b57dc7c1 PB |
680 | struct nf_conn *ct; |
681 | int err = 0; | |
682 | bool frag; | |
683 | ||
684 | /* Previously seen (loopback)? Ignore. */ | |
685 | ct = nf_ct_get(skb, &ctinfo); | |
686 | if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED) | |
687 | return 0; | |
688 | ||
689 | if (family == NFPROTO_IPV4) | |
690 | err = tcf_ct_ipv4_is_fragment(skb, &frag); | |
691 | else | |
692 | err = tcf_ct_ipv6_is_fragment(skb, &frag); | |
693 | if (err || !frag) | |
694 | return err; | |
695 | ||
696 | skb_get(skb); | |
ae372cb1 | 697 | cb = *qdisc_skb_cb(skb); |
b57dc7c1 PB |
698 | |
699 | if (family == NFPROTO_IPV4) { | |
700 | enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; | |
701 | ||
702 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | |
703 | local_bh_disable(); | |
704 | err = ip_defrag(net, skb, user); | |
705 | local_bh_enable(); | |
706 | if (err && err != -EINPROGRESS) | |
707 | goto out_free; | |
ae372cb1 | 708 | |
038ebb1a | 709 | if (!err) { |
ae372cb1 | 710 | *defrag = true; |
038ebb1a | 711 | cb.mru = IPCB(skb)->frag_max_size; |
712 | } | |
b57dc7c1 PB |
713 | } else { /* NFPROTO_IPV6 */ |
714 | #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) | |
715 | enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; | |
716 | ||
717 | memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); | |
718 | err = nf_ct_frag6_gather(net, skb, user); | |
719 | if (err && err != -EINPROGRESS) | |
720 | goto out_free; | |
ae372cb1 | 721 | |
038ebb1a | 722 | if (!err) { |
ae372cb1 | 723 | *defrag = true; |
038ebb1a | 724 | cb.mru = IP6CB(skb)->frag_max_size; |
725 | } | |
b57dc7c1 PB |
726 | #else |
727 | err = -EOPNOTSUPP; | |
728 | goto out_free; | |
729 | #endif | |
730 | } | |
731 | ||
ae372cb1 | 732 | *qdisc_skb_cb(skb) = cb; |
b57dc7c1 PB |
733 | skb_clear_hash(skb); |
734 | skb->ignore_df = 1; | |
735 | return err; | |
736 | ||
737 | out_free: | |
738 | kfree_skb(skb); | |
739 | return err; | |
740 | } | |
741 | ||
742 | static void tcf_ct_params_free(struct rcu_head *head) | |
743 | { | |
744 | struct tcf_ct_params *params = container_of(head, | |
745 | struct tcf_ct_params, rcu); | |
746 | ||
c34b961a PB |
747 | tcf_ct_flow_table_put(params); |
748 | ||
b57dc7c1 PB |
749 | if (params->tmpl) |
750 | nf_conntrack_put(¶ms->tmpl->ct_general); | |
751 | kfree(params); | |
752 | } | |
753 | ||
754 | #if IS_ENABLED(CONFIG_NF_NAT) | |
755 | /* Modelled after nf_nat_ipv[46]_fn(). | |
756 | * range is only used for new, uninitialized NAT state. | |
757 | * Returns either NF_ACCEPT or NF_DROP. | |
758 | */ | |
759 | static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, | |
760 | enum ip_conntrack_info ctinfo, | |
761 | const struct nf_nat_range2 *range, | |
762 | enum nf_nat_manip_type maniptype) | |
763 | { | |
d7bf2ebe | 764 | __be16 proto = skb_protocol(skb, true); |
b57dc7c1 PB |
765 | int hooknum, err = NF_ACCEPT; |
766 | ||
767 | /* See HOOK2MANIP(). */ | |
768 | if (maniptype == NF_NAT_MANIP_SRC) | |
769 | hooknum = NF_INET_LOCAL_IN; /* Source NAT */ | |
770 | else | |
771 | hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */ | |
772 | ||
773 | switch (ctinfo) { | |
774 | case IP_CT_RELATED: | |
775 | case IP_CT_RELATED_REPLY: | |
d7bf2ebe | 776 | if (proto == htons(ETH_P_IP) && |
b57dc7c1 PB |
777 | ip_hdr(skb)->protocol == IPPROTO_ICMP) { |
778 | if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, | |
779 | hooknum)) | |
780 | err = NF_DROP; | |
781 | goto out; | |
d7bf2ebe | 782 | } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) { |
b57dc7c1 PB |
783 | __be16 frag_off; |
784 | u8 nexthdr = ipv6_hdr(skb)->nexthdr; | |
785 | int hdrlen = ipv6_skip_exthdr(skb, | |
786 | sizeof(struct ipv6hdr), | |
787 | &nexthdr, &frag_off); | |
788 | ||
789 | if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { | |
790 | if (!nf_nat_icmpv6_reply_translation(skb, ct, | |
791 | ctinfo, | |
792 | hooknum, | |
793 | hdrlen)) | |
794 | err = NF_DROP; | |
795 | goto out; | |
796 | } | |
797 | } | |
798 | /* Non-ICMP, fall thru to initialize if needed. */ | |
964201de | 799 | fallthrough; |
b57dc7c1 PB |
800 | case IP_CT_NEW: |
801 | /* Seen it before? This can happen for loopback, retrans, | |
802 | * or local packets. | |
803 | */ | |
804 | if (!nf_nat_initialized(ct, maniptype)) { | |
805 | /* Initialize according to the NAT action. */ | |
806 | err = (range && range->flags & NF_NAT_RANGE_MAP_IPS) | |
807 | /* Action is set up to establish a new | |
808 | * mapping. | |
809 | */ | |
810 | ? nf_nat_setup_info(ct, range, maniptype) | |
811 | : nf_nat_alloc_null_binding(ct, hooknum); | |
812 | if (err != NF_ACCEPT) | |
813 | goto out; | |
814 | } | |
815 | break; | |
816 | ||
817 | case IP_CT_ESTABLISHED: | |
818 | case IP_CT_ESTABLISHED_REPLY: | |
819 | break; | |
820 | ||
821 | default: | |
822 | err = NF_DROP; | |
823 | goto out; | |
824 | } | |
825 | ||
826 | err = nf_nat_packet(ct, ctinfo, hooknum, skb); | |
827 | out: | |
828 | return err; | |
829 | } | |
830 | #endif /* CONFIG_NF_NAT */ | |
831 | ||
832 | static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask) | |
833 | { | |
834 | #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) | |
835 | u32 new_mark; | |
836 | ||
837 | if (!mask) | |
838 | return; | |
839 | ||
840 | new_mark = mark | (ct->mark & ~(mask)); | |
841 | if (ct->mark != new_mark) { | |
842 | ct->mark = new_mark; | |
843 | if (nf_ct_is_confirmed(ct)) | |
844 | nf_conntrack_event_cache(IPCT_MARK, ct); | |
845 | } | |
846 | #endif | |
847 | } | |
848 | ||
849 | static void tcf_ct_act_set_labels(struct nf_conn *ct, | |
850 | u32 *labels, | |
851 | u32 *labels_m) | |
852 | { | |
853 | #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) | |
c593642c | 854 | size_t labels_sz = sizeof_field(struct tcf_ct_params, labels); |
b57dc7c1 PB |
855 | |
856 | if (!memchr_inv(labels_m, 0, labels_sz)) | |
857 | return; | |
858 | ||
859 | nf_connlabels_replace(ct, labels, labels_m, 4); | |
860 | #endif | |
861 | } | |
862 | ||
863 | static int tcf_ct_act_nat(struct sk_buff *skb, | |
864 | struct nf_conn *ct, | |
865 | enum ip_conntrack_info ctinfo, | |
866 | int ct_action, | |
867 | struct nf_nat_range2 *range, | |
868 | bool commit) | |
869 | { | |
870 | #if IS_ENABLED(CONFIG_NF_NAT) | |
95219afb | 871 | int err; |
b57dc7c1 PB |
872 | enum nf_nat_manip_type maniptype; |
873 | ||
874 | if (!(ct_action & TCA_CT_ACT_NAT)) | |
875 | return NF_ACCEPT; | |
876 | ||
877 | /* Add NAT extension if not confirmed yet. */ | |
878 | if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct)) | |
879 | return NF_DROP; /* Can't NAT. */ | |
880 | ||
881 | if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) && | |
882 | (ctinfo != IP_CT_RELATED || commit)) { | |
883 | /* NAT an established or related connection like before. */ | |
884 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) | |
885 | /* This is the REPLY direction for a connection | |
886 | * for which NAT was applied in the forward | |
887 | * direction. Do the reverse NAT. | |
888 | */ | |
889 | maniptype = ct->status & IPS_SRC_NAT | |
890 | ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC; | |
891 | else | |
892 | maniptype = ct->status & IPS_SRC_NAT | |
893 | ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST; | |
894 | } else if (ct_action & TCA_CT_ACT_NAT_SRC) { | |
895 | maniptype = NF_NAT_MANIP_SRC; | |
896 | } else if (ct_action & TCA_CT_ACT_NAT_DST) { | |
897 | maniptype = NF_NAT_MANIP_DST; | |
898 | } else { | |
899 | return NF_ACCEPT; | |
900 | } | |
901 | ||
95219afb AC |
902 | err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); |
903 | if (err == NF_ACCEPT && | |
904 | ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { | |
905 | if (maniptype == NF_NAT_MANIP_SRC) | |
906 | maniptype = NF_NAT_MANIP_DST; | |
907 | else | |
908 | maniptype = NF_NAT_MANIP_SRC; | |
909 | ||
910 | err = ct_nat_execute(skb, ct, ctinfo, range, maniptype); | |
911 | } | |
912 | return err; | |
b57dc7c1 PB |
913 | #else |
914 | return NF_ACCEPT; | |
915 | #endif | |
916 | } | |
917 | ||
918 | static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, | |
919 | struct tcf_result *res) | |
920 | { | |
921 | struct net *net = dev_net(skb->dev); | |
922 | bool cached, commit, clear, force; | |
923 | enum ip_conntrack_info ctinfo; | |
924 | struct tcf_ct *c = to_ct(a); | |
925 | struct nf_conn *tmpl = NULL; | |
926 | struct nf_hook_state state; | |
927 | int nh_ofs, err, retval; | |
928 | struct tcf_ct_params *p; | |
46475bb2 | 929 | bool skip_add = false; |
ae372cb1 | 930 | bool defrag = false; |
b57dc7c1 PB |
931 | struct nf_conn *ct; |
932 | u8 family; | |
933 | ||
934 | p = rcu_dereference_bh(c->params); | |
935 | ||
936 | retval = READ_ONCE(c->tcf_action); | |
937 | commit = p->ct_action & TCA_CT_ACT_COMMIT; | |
938 | clear = p->ct_action & TCA_CT_ACT_CLEAR; | |
939 | force = p->ct_action & TCA_CT_ACT_FORCE; | |
940 | tmpl = p->tmpl; | |
941 | ||
8367b3ab | 942 | tcf_lastuse_update(&c->tcf_tm); |
943 | ||
b57dc7c1 PB |
944 | if (clear) { |
945 | ct = nf_ct_get(skb, &ctinfo); | |
946 | if (ct) { | |
947 | nf_conntrack_put(&ct->ct_general); | |
948 | nf_ct_set(skb, NULL, IP_CT_UNTRACKED); | |
949 | } | |
950 | ||
951 | goto out; | |
952 | } | |
953 | ||
954 | family = tcf_ct_skb_nf_family(skb); | |
955 | if (family == NFPROTO_UNSPEC) | |
956 | goto drop; | |
957 | ||
958 | /* The conntrack module expects to be working at L3. | |
959 | * We also try to pull the IPv4/6 header to linear area | |
960 | */ | |
961 | nh_ofs = skb_network_offset(skb); | |
962 | skb_pull_rcsum(skb, nh_ofs); | |
ae372cb1 | 963 | err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag); |
b57dc7c1 PB |
964 | if (err == -EINPROGRESS) { |
965 | retval = TC_ACT_STOLEN; | |
966 | goto out; | |
967 | } | |
968 | if (err) | |
969 | goto drop; | |
970 | ||
971 | err = tcf_ct_skb_network_trim(skb, family); | |
972 | if (err) | |
973 | goto drop; | |
974 | ||
975 | /* If we are recirculating packets to match on ct fields and | |
976 | * committing with a separate ct action, then we don't need to | |
977 | * actually run the packet through conntrack twice unless it's for a | |
978 | * different zone. | |
979 | */ | |
980 | cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force); | |
981 | if (!cached) { | |
46475bb2 PB |
982 | if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) { |
983 | skip_add = true; | |
984 | goto do_nat; | |
985 | } | |
986 | ||
b57dc7c1 PB |
987 | /* Associate skb with specified zone. */ |
988 | if (tmpl) { | |
989 | ct = nf_ct_get(skb, &ctinfo); | |
990 | if (skb_nfct(skb)) | |
991 | nf_conntrack_put(skb_nfct(skb)); | |
992 | nf_conntrack_get(&tmpl->ct_general); | |
993 | nf_ct_set(skb, tmpl, IP_CT_NEW); | |
994 | } | |
995 | ||
996 | state.hook = NF_INET_PRE_ROUTING; | |
997 | state.net = net; | |
998 | state.pf = family; | |
999 | err = nf_conntrack_in(skb, &state); | |
1000 | if (err != NF_ACCEPT) | |
1001 | goto out_push; | |
1002 | } | |
1003 | ||
46475bb2 | 1004 | do_nat: |
b57dc7c1 PB |
1005 | ct = nf_ct_get(skb, &ctinfo); |
1006 | if (!ct) | |
1007 | goto out_push; | |
1008 | nf_ct_deliver_cached_events(ct); | |
1009 | ||
1010 | err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit); | |
1011 | if (err != NF_ACCEPT) | |
1012 | goto drop; | |
1013 | ||
1014 | if (commit) { | |
1015 | tcf_ct_act_set_mark(ct, p->mark, p->mark_mask); | |
1016 | tcf_ct_act_set_labels(ct, p->labels, p->labels_mask); | |
1017 | ||
1018 | /* This will take care of sending queued events | |
1019 | * even if the connection is already confirmed. | |
1020 | */ | |
1021 | nf_conntrack_confirm(skb); | |
46475bb2 PB |
1022 | } else if (!skip_add) { |
1023 | tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); | |
b57dc7c1 PB |
1024 | } |
1025 | ||
1026 | out_push: | |
1027 | skb_push_rcsum(skb, nh_ofs); | |
1028 | ||
1029 | out: | |
5e1ad95b | 1030 | tcf_action_update_bstats(&c->common, skb); |
ae372cb1 | 1031 | if (defrag) |
1032 | qdisc_skb_cb(skb)->pkt_len = skb->len; | |
b57dc7c1 PB |
1033 | return retval; |
1034 | ||
1035 | drop: | |
26b537a8 | 1036 | tcf_action_inc_drop_qstats(&c->common); |
b57dc7c1 PB |
1037 | return TC_ACT_SHOT; |
1038 | } | |
1039 | ||
1040 | static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = { | |
b57dc7c1 PB |
1041 | [TCA_CT_ACTION] = { .type = NLA_U16 }, |
1042 | [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) }, | |
1043 | [TCA_CT_ZONE] = { .type = NLA_U16 }, | |
1044 | [TCA_CT_MARK] = { .type = NLA_U32 }, | |
1045 | [TCA_CT_MARK_MASK] = { .type = NLA_U32 }, | |
1046 | [TCA_CT_LABELS] = { .type = NLA_BINARY, | |
1047 | .len = 128 / BITS_PER_BYTE }, | |
1048 | [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY, | |
1049 | .len = 128 / BITS_PER_BYTE }, | |
1050 | [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 }, | |
1051 | [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 }, | |
1052 | [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN, | |
1053 | .len = sizeof(struct in6_addr) }, | |
1054 | [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN, | |
1055 | .len = sizeof(struct in6_addr) }, | |
1056 | [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 }, | |
1057 | [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 }, | |
1058 | }; | |
1059 | ||
1060 | static int tcf_ct_fill_params_nat(struct tcf_ct_params *p, | |
1061 | struct tc_ct *parm, | |
1062 | struct nlattr **tb, | |
1063 | struct netlink_ext_ack *extack) | |
1064 | { | |
1065 | struct nf_nat_range2 *range; | |
1066 | ||
1067 | if (!(p->ct_action & TCA_CT_ACT_NAT)) | |
1068 | return 0; | |
1069 | ||
1070 | if (!IS_ENABLED(CONFIG_NF_NAT)) { | |
1071 | NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel"); | |
1072 | return -EOPNOTSUPP; | |
1073 | } | |
1074 | ||
1075 | if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) | |
1076 | return 0; | |
1077 | ||
1078 | if ((p->ct_action & TCA_CT_ACT_NAT_SRC) && | |
1079 | (p->ct_action & TCA_CT_ACT_NAT_DST)) { | |
1080 | NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time"); | |
1081 | return -EOPNOTSUPP; | |
1082 | } | |
1083 | ||
1084 | range = &p->range; | |
1085 | if (tb[TCA_CT_NAT_IPV4_MIN]) { | |
1086 | struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX]; | |
1087 | ||
1088 | p->ipv4_range = true; | |
1089 | range->flags |= NF_NAT_RANGE_MAP_IPS; | |
1090 | range->min_addr.ip = | |
1091 | nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]); | |
1092 | ||
1093 | range->max_addr.ip = max_attr ? | |
1094 | nla_get_in_addr(max_attr) : | |
1095 | range->min_addr.ip; | |
1096 | } else if (tb[TCA_CT_NAT_IPV6_MIN]) { | |
1097 | struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX]; | |
1098 | ||
1099 | p->ipv4_range = false; | |
1100 | range->flags |= NF_NAT_RANGE_MAP_IPS; | |
1101 | range->min_addr.in6 = | |
1102 | nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]); | |
1103 | ||
1104 | range->max_addr.in6 = max_attr ? | |
1105 | nla_get_in6_addr(max_attr) : | |
1106 | range->min_addr.in6; | |
1107 | } | |
1108 | ||
1109 | if (tb[TCA_CT_NAT_PORT_MIN]) { | |
1110 | range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | |
1111 | range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]); | |
1112 | ||
1113 | range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ? | |
1114 | nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) : | |
1115 | range->min_proto.all; | |
1116 | } | |
1117 | ||
1118 | return 0; | |
1119 | } | |
1120 | ||
1121 | static void tcf_ct_set_key_val(struct nlattr **tb, | |
1122 | void *val, int val_type, | |
1123 | void *mask, int mask_type, | |
1124 | int len) | |
1125 | { | |
1126 | if (!tb[val_type]) | |
1127 | return; | |
1128 | nla_memcpy(val, tb[val_type], len); | |
1129 | ||
1130 | if (!mask) | |
1131 | return; | |
1132 | ||
1133 | if (mask_type == TCA_CT_UNSPEC || !tb[mask_type]) | |
1134 | memset(mask, 0xff, len); | |
1135 | else | |
1136 | nla_memcpy(mask, tb[mask_type], len); | |
1137 | } | |
1138 | ||
1139 | static int tcf_ct_fill_params(struct net *net, | |
1140 | struct tcf_ct_params *p, | |
1141 | struct tc_ct *parm, | |
1142 | struct nlattr **tb, | |
1143 | struct netlink_ext_ack *extack) | |
1144 | { | |
1145 | struct tc_ct_action_net *tn = net_generic(net, ct_net_id); | |
1146 | struct nf_conntrack_zone zone; | |
1147 | struct nf_conn *tmpl; | |
1148 | int err; | |
1149 | ||
1150 | p->zone = NF_CT_DEFAULT_ZONE_ID; | |
1151 | ||
1152 | tcf_ct_set_key_val(tb, | |
1153 | &p->ct_action, TCA_CT_ACTION, | |
1154 | NULL, TCA_CT_UNSPEC, | |
1155 | sizeof(p->ct_action)); | |
1156 | ||
1157 | if (p->ct_action & TCA_CT_ACT_CLEAR) | |
1158 | return 0; | |
1159 | ||
1160 | err = tcf_ct_fill_params_nat(p, parm, tb, extack); | |
1161 | if (err) | |
1162 | return err; | |
1163 | ||
1164 | if (tb[TCA_CT_MARK]) { | |
1165 | if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { | |
1166 | NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled."); | |
1167 | return -EOPNOTSUPP; | |
1168 | } | |
1169 | tcf_ct_set_key_val(tb, | |
1170 | &p->mark, TCA_CT_MARK, | |
1171 | &p->mark_mask, TCA_CT_MARK_MASK, | |
1172 | sizeof(p->mark)); | |
1173 | } | |
1174 | ||
1175 | if (tb[TCA_CT_LABELS]) { | |
1176 | if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { | |
1177 | NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled."); | |
1178 | return -EOPNOTSUPP; | |
1179 | } | |
1180 | ||
1181 | if (!tn->labels) { | |
1182 | NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length"); | |
1183 | return -EOPNOTSUPP; | |
1184 | } | |
1185 | tcf_ct_set_key_val(tb, | |
1186 | p->labels, TCA_CT_LABELS, | |
1187 | p->labels_mask, TCA_CT_LABELS_MASK, | |
1188 | sizeof(p->labels)); | |
1189 | } | |
1190 | ||
1191 | if (tb[TCA_CT_ZONE]) { | |
1192 | if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { | |
1193 | NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled."); | |
1194 | return -EOPNOTSUPP; | |
1195 | } | |
1196 | ||
1197 | tcf_ct_set_key_val(tb, | |
1198 | &p->zone, TCA_CT_ZONE, | |
1199 | NULL, TCA_CT_UNSPEC, | |
1200 | sizeof(p->zone)); | |
1201 | } | |
1202 | ||
1203 | if (p->zone == NF_CT_DEFAULT_ZONE_ID) | |
1204 | return 0; | |
1205 | ||
1206 | nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); | |
1207 | tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); | |
1208 | if (!tmpl) { | |
1209 | NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template"); | |
1210 | return -ENOMEM; | |
1211 | } | |
1212 | __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); | |
1213 | nf_conntrack_get(&tmpl->ct_general); | |
1214 | p->tmpl = tmpl; | |
1215 | ||
1216 | return 0; | |
1217 | } | |
1218 | ||
1219 | static int tcf_ct_init(struct net *net, struct nlattr *nla, | |
1220 | struct nlattr *est, struct tc_action **a, | |
1221 | int replace, int bind, bool rtnl_held, | |
abbb0d33 | 1222 | struct tcf_proto *tp, u32 flags, |
b57dc7c1 PB |
1223 | struct netlink_ext_ack *extack) |
1224 | { | |
1225 | struct tc_action_net *tn = net_generic(net, ct_net_id); | |
1226 | struct tcf_ct_params *params = NULL; | |
1227 | struct nlattr *tb[TCA_CT_MAX + 1]; | |
1228 | struct tcf_chain *goto_ch = NULL; | |
1229 | struct tc_ct *parm; | |
1230 | struct tcf_ct *c; | |
1231 | int err, res = 0; | |
7be8ef2c | 1232 | u32 index; |
b57dc7c1 PB |
1233 | |
1234 | if (!nla) { | |
1235 | NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed"); | |
1236 | return -EINVAL; | |
1237 | } | |
1238 | ||
1239 | err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack); | |
1240 | if (err < 0) | |
1241 | return err; | |
1242 | ||
1243 | if (!tb[TCA_CT_PARMS]) { | |
1244 | NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters"); | |
1245 | return -EINVAL; | |
1246 | } | |
1247 | parm = nla_data(tb[TCA_CT_PARMS]); | |
7be8ef2c DL |
1248 | index = parm->index; |
1249 | err = tcf_idr_check_alloc(tn, &index, a, bind); | |
b57dc7c1 PB |
1250 | if (err < 0) |
1251 | return err; | |
1252 | ||
1253 | if (!err) { | |
e3822678 VB |
1254 | err = tcf_idr_create_from_flags(tn, index, est, a, |
1255 | &act_ct_ops, bind, flags); | |
b57dc7c1 | 1256 | if (err) { |
7be8ef2c | 1257 | tcf_idr_cleanup(tn, index); |
b57dc7c1 PB |
1258 | return err; |
1259 | } | |
1260 | res = ACT_P_CREATED; | |
1261 | } else { | |
1262 | if (bind) | |
1263 | return 0; | |
1264 | ||
1265 | if (!replace) { | |
1266 | tcf_idr_release(*a, bind); | |
1267 | return -EEXIST; | |
1268 | } | |
1269 | } | |
1270 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); | |
1271 | if (err < 0) | |
1272 | goto cleanup; | |
1273 | ||
1274 | c = to_ct(*a); | |
1275 | ||
1276 | params = kzalloc(sizeof(*params), GFP_KERNEL); | |
1277 | if (unlikely(!params)) { | |
1278 | err = -ENOMEM; | |
1279 | goto cleanup; | |
1280 | } | |
1281 | ||
1282 | err = tcf_ct_fill_params(net, params, parm, tb, extack); | |
1283 | if (err) | |
1284 | goto cleanup; | |
1285 | ||
c34b961a PB |
1286 | err = tcf_ct_flow_table_get(params); |
1287 | if (err) | |
1288 | goto cleanup; | |
1289 | ||
b57dc7c1 PB |
1290 | spin_lock_bh(&c->tcf_lock); |
1291 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); | |
445d3749 PM |
1292 | params = rcu_replace_pointer(c->params, params, |
1293 | lockdep_is_held(&c->tcf_lock)); | |
b57dc7c1 PB |
1294 | spin_unlock_bh(&c->tcf_lock); |
1295 | ||
1296 | if (goto_ch) | |
1297 | tcf_chain_put_by_act(goto_ch); | |
1298 | if (params) | |
dd2af104 | 1299 | call_rcu(¶ms->rcu, tcf_ct_params_free); |
b57dc7c1 PB |
1300 | if (res == ACT_P_CREATED) |
1301 | tcf_idr_insert(tn, *a); | |
1302 | ||
1303 | return res; | |
1304 | ||
1305 | cleanup: | |
1306 | if (goto_ch) | |
1307 | tcf_chain_put_by_act(goto_ch); | |
1308 | kfree(params); | |
1309 | tcf_idr_release(*a, bind); | |
1310 | return err; | |
1311 | } | |
1312 | ||
1313 | static void tcf_ct_cleanup(struct tc_action *a) | |
1314 | { | |
1315 | struct tcf_ct_params *params; | |
1316 | struct tcf_ct *c = to_ct(a); | |
1317 | ||
1318 | params = rcu_dereference_protected(c->params, 1); | |
1319 | if (params) | |
1320 | call_rcu(¶ms->rcu, tcf_ct_params_free); | |
1321 | } | |
1322 | ||
1323 | static int tcf_ct_dump_key_val(struct sk_buff *skb, | |
1324 | void *val, int val_type, | |
1325 | void *mask, int mask_type, | |
1326 | int len) | |
1327 | { | |
1328 | int err; | |
1329 | ||
1330 | if (mask && !memchr_inv(mask, 0, len)) | |
1331 | return 0; | |
1332 | ||
1333 | err = nla_put(skb, val_type, len, val); | |
1334 | if (err) | |
1335 | return err; | |
1336 | ||
1337 | if (mask_type != TCA_CT_UNSPEC) { | |
1338 | err = nla_put(skb, mask_type, len, mask); | |
1339 | if (err) | |
1340 | return err; | |
1341 | } | |
1342 | ||
1343 | return 0; | |
1344 | } | |
1345 | ||
1346 | static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p) | |
1347 | { | |
1348 | struct nf_nat_range2 *range = &p->range; | |
1349 | ||
1350 | if (!(p->ct_action & TCA_CT_ACT_NAT)) | |
1351 | return 0; | |
1352 | ||
1353 | if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST))) | |
1354 | return 0; | |
1355 | ||
1356 | if (range->flags & NF_NAT_RANGE_MAP_IPS) { | |
1357 | if (p->ipv4_range) { | |
1358 | if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN, | |
1359 | range->min_addr.ip)) | |
1360 | return -1; | |
1361 | if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX, | |
1362 | range->max_addr.ip)) | |
1363 | return -1; | |
1364 | } else { | |
1365 | if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN, | |
1366 | &range->min_addr.in6)) | |
1367 | return -1; | |
1368 | if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX, | |
1369 | &range->max_addr.in6)) | |
1370 | return -1; | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) { | |
1375 | if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN, | |
1376 | range->min_proto.all)) | |
1377 | return -1; | |
1378 | if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX, | |
1379 | range->max_proto.all)) | |
1380 | return -1; | |
1381 | } | |
1382 | ||
1383 | return 0; | |
1384 | } | |
1385 | ||
1386 | static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a, | |
1387 | int bind, int ref) | |
1388 | { | |
1389 | unsigned char *b = skb_tail_pointer(skb); | |
1390 | struct tcf_ct *c = to_ct(a); | |
1391 | struct tcf_ct_params *p; | |
1392 | ||
1393 | struct tc_ct opt = { | |
1394 | .index = c->tcf_index, | |
1395 | .refcnt = refcount_read(&c->tcf_refcnt) - ref, | |
1396 | .bindcnt = atomic_read(&c->tcf_bindcnt) - bind, | |
1397 | }; | |
1398 | struct tcf_t t; | |
1399 | ||
1400 | spin_lock_bh(&c->tcf_lock); | |
1401 | p = rcu_dereference_protected(c->params, | |
1402 | lockdep_is_held(&c->tcf_lock)); | |
1403 | opt.action = c->tcf_action; | |
1404 | ||
1405 | if (tcf_ct_dump_key_val(skb, | |
1406 | &p->ct_action, TCA_CT_ACTION, | |
1407 | NULL, TCA_CT_UNSPEC, | |
1408 | sizeof(p->ct_action))) | |
1409 | goto nla_put_failure; | |
1410 | ||
1411 | if (p->ct_action & TCA_CT_ACT_CLEAR) | |
1412 | goto skip_dump; | |
1413 | ||
1414 | if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && | |
1415 | tcf_ct_dump_key_val(skb, | |
1416 | &p->mark, TCA_CT_MARK, | |
1417 | &p->mark_mask, TCA_CT_MARK_MASK, | |
1418 | sizeof(p->mark))) | |
1419 | goto nla_put_failure; | |
1420 | ||
1421 | if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && | |
1422 | tcf_ct_dump_key_val(skb, | |
1423 | p->labels, TCA_CT_LABELS, | |
1424 | p->labels_mask, TCA_CT_LABELS_MASK, | |
1425 | sizeof(p->labels))) | |
1426 | goto nla_put_failure; | |
1427 | ||
1428 | if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && | |
1429 | tcf_ct_dump_key_val(skb, | |
1430 | &p->zone, TCA_CT_ZONE, | |
1431 | NULL, TCA_CT_UNSPEC, | |
1432 | sizeof(p->zone))) | |
1433 | goto nla_put_failure; | |
1434 | ||
1435 | if (tcf_ct_dump_nat(skb, p)) | |
1436 | goto nla_put_failure; | |
1437 | ||
1438 | skip_dump: | |
1439 | if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt)) | |
1440 | goto nla_put_failure; | |
1441 | ||
1442 | tcf_tm_dump(&t, &c->tcf_tm); | |
1443 | if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD)) | |
1444 | goto nla_put_failure; | |
1445 | spin_unlock_bh(&c->tcf_lock); | |
1446 | ||
1447 | return skb->len; | |
1448 | nla_put_failure: | |
1449 | spin_unlock_bh(&c->tcf_lock); | |
1450 | nlmsg_trim(skb, b); | |
1451 | return -1; | |
1452 | } | |
1453 | ||
1454 | static int tcf_ct_walker(struct net *net, struct sk_buff *skb, | |
1455 | struct netlink_callback *cb, int type, | |
1456 | const struct tc_action_ops *ops, | |
1457 | struct netlink_ext_ack *extack) | |
1458 | { | |
1459 | struct tc_action_net *tn = net_generic(net, ct_net_id); | |
1460 | ||
1461 | return tcf_generic_walker(tn, skb, cb, type, ops, extack); | |
1462 | } | |
1463 | ||
1464 | static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index) | |
1465 | { | |
1466 | struct tc_action_net *tn = net_generic(net, ct_net_id); | |
1467 | ||
1468 | return tcf_idr_search(tn, a, index); | |
1469 | } | |
1470 | ||
4b61d3e8 PL |
1471 | static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, |
1472 | u64 drops, u64 lastuse, bool hw) | |
b57dc7c1 PB |
1473 | { |
1474 | struct tcf_ct *c = to_ct(a); | |
1475 | ||
4b61d3e8 | 1476 | tcf_action_update_stats(a, bytes, packets, drops, hw); |
b57dc7c1 PB |
1477 | c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); |
1478 | } | |
1479 | ||
1480 | static struct tc_action_ops act_ct_ops = { | |
1481 | .kind = "ct", | |
1482 | .id = TCA_ID_CT, | |
1483 | .owner = THIS_MODULE, | |
1484 | .act = tcf_ct_act, | |
1485 | .dump = tcf_ct_dump, | |
1486 | .init = tcf_ct_init, | |
1487 | .cleanup = tcf_ct_cleanup, | |
1488 | .walk = tcf_ct_walker, | |
1489 | .lookup = tcf_ct_search, | |
1490 | .stats_update = tcf_stats_update, | |
1491 | .size = sizeof(struct tcf_ct), | |
1492 | }; | |
1493 | ||
1494 | static __net_init int ct_init_net(struct net *net) | |
1495 | { | |
c593642c | 1496 | unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8; |
b57dc7c1 PB |
1497 | struct tc_ct_action_net *tn = net_generic(net, ct_net_id); |
1498 | ||
1499 | if (nf_connlabels_get(net, n_bits - 1)) { | |
1500 | tn->labels = false; | |
1501 | pr_err("act_ct: Failed to set connlabels length"); | |
1502 | } else { | |
1503 | tn->labels = true; | |
1504 | } | |
1505 | ||
981471bd | 1506 | return tc_action_net_init(net, &tn->tn, &act_ct_ops); |
b57dc7c1 PB |
1507 | } |
1508 | ||
1509 | static void __net_exit ct_exit_net(struct list_head *net_list) | |
1510 | { | |
1511 | struct net *net; | |
1512 | ||
1513 | rtnl_lock(); | |
1514 | list_for_each_entry(net, net_list, exit_list) { | |
1515 | struct tc_ct_action_net *tn = net_generic(net, ct_net_id); | |
1516 | ||
1517 | if (tn->labels) | |
1518 | nf_connlabels_put(net); | |
1519 | } | |
1520 | rtnl_unlock(); | |
1521 | ||
1522 | tc_action_net_exit(net_list, ct_net_id); | |
1523 | } | |
1524 | ||
1525 | static struct pernet_operations ct_net_ops = { | |
1526 | .init = ct_init_net, | |
1527 | .exit_batch = ct_exit_net, | |
1528 | .id = &ct_net_id, | |
1529 | .size = sizeof(struct tc_ct_action_net), | |
1530 | }; | |
1531 | ||
1532 | static int __init ct_init_module(void) | |
1533 | { | |
c34b961a PB |
1534 | int err; |
1535 | ||
1536 | act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0); | |
1537 | if (!act_ct_wq) | |
1538 | return -ENOMEM; | |
1539 | ||
1540 | err = tcf_ct_flow_tables_init(); | |
1541 | if (err) | |
1542 | goto err_tbl_init; | |
1543 | ||
1544 | err = tcf_register_action(&act_ct_ops, &ct_net_ops); | |
1545 | if (err) | |
1546 | goto err_register; | |
1547 | ||
1548 | return 0; | |
1549 | ||
c34b961a PB |
1550 | err_register: |
1551 | tcf_ct_flow_tables_uninit(); | |
8c5c51f5 | 1552 | err_tbl_init: |
1553 | destroy_workqueue(act_ct_wq); | |
c34b961a | 1554 | return err; |
b57dc7c1 PB |
1555 | } |
1556 | ||
1557 | static void __exit ct_cleanup_module(void) | |
1558 | { | |
1559 | tcf_unregister_action(&act_ct_ops, &ct_net_ops); | |
c34b961a PB |
1560 | tcf_ct_flow_tables_uninit(); |
1561 | destroy_workqueue(act_ct_wq); | |
b57dc7c1 PB |
1562 | } |
1563 | ||
1564 | module_init(ct_init_module); | |
1565 | module_exit(ct_cleanup_module); | |
1566 | MODULE_AUTHOR("Paul Blakey <[email protected]>"); | |
1567 | MODULE_AUTHOR("Yossi Kuperman <[email protected]>"); | |
1568 | MODULE_AUTHOR("Marcelo Ricardo Leitner <[email protected]>"); | |
1569 | MODULE_DESCRIPTION("Connection tracking action"); | |
1570 | MODULE_LICENSE("GPL v2"); |