2 * Stateless NAT actions
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netfilter.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/tc_act/tc_nat.h>
23 #include <net/act_api.h>
24 #include <net/pkt_cls.h>
27 #include <net/netlink.h>
28 #include <net/tc_act/tc_nat.h>
33 static unsigned int nat_net_id;
34 static struct tc_action_ops act_nat_ops;
36 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
37 [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
40 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
41 struct tc_action **a, int ovr, int bind,
42 bool rtnl_held, struct tcf_proto *tp,
43 struct netlink_ext_ack *extack)
45 struct tc_action_net *tn = net_generic(net, nat_net_id);
46 struct nlattr *tb[TCA_NAT_MAX + 1];
47 struct tcf_chain *goto_ch = NULL;
55 err = nla_parse_nested_deprecated(tb, TCA_NAT_MAX, nla, nat_policy,
60 if (tb[TCA_NAT_PARMS] == NULL)
62 parm = nla_data(tb[TCA_NAT_PARMS]);
64 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
66 ret = tcf_idr_create(tn, parm->index, est, a,
67 &act_nat_ops, bind, false);
69 tcf_idr_cleanup(tn, parm->index);
77 tcf_idr_release(*a, bind);
83 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
88 spin_lock_bh(&p->tcf_lock);
89 p->old_addr = parm->old_addr;
90 p->new_addr = parm->new_addr;
92 p->flags = parm->flags;
94 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
95 spin_unlock_bh(&p->tcf_lock);
97 tcf_chain_put_by_act(goto_ch);
99 if (ret == ACT_P_CREATED)
100 tcf_idr_insert(tn, *a);
104 tcf_idr_release(*a, bind);
108 static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
109 struct tcf_result *res)
111 struct tcf_nat *p = to_tcf_nat(a);
122 spin_lock(&p->tcf_lock);
124 tcf_lastuse_update(&p->tcf_tm);
125 old_addr = p->old_addr;
126 new_addr = p->new_addr;
128 egress = p->flags & TCA_NAT_FLAG_EGRESS;
129 action = p->tcf_action;
131 bstats_update(&p->tcf_bstats, skb);
133 spin_unlock(&p->tcf_lock);
135 if (unlikely(action == TC_ACT_SHOT))
138 noff = skb_network_offset(skb);
139 if (!pskb_may_pull(skb, sizeof(*iph) + noff))
149 if (!((old_addr ^ addr) & mask)) {
150 if (skb_try_make_writable(skb, sizeof(*iph) + noff))
154 new_addr |= addr & ~mask;
156 /* Rewrite IP header */
159 iph->saddr = new_addr;
161 iph->daddr = new_addr;
163 csum_replace4(&iph->check, addr, new_addr);
164 } else if ((iph->frag_off & htons(IP_OFFSET)) ||
165 iph->protocol != IPPROTO_ICMP) {
171 /* It would be nice to share code with stateful NAT. */
172 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
177 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
178 skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
181 tcph = (void *)(skb_network_header(skb) + ihl);
182 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
190 if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
191 skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
194 udph = (void *)(skb_network_header(skb) + ihl);
195 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
196 inet_proto_csum_replace4(&udph->check, skb, addr,
199 udph->check = CSUM_MANGLED_0;
205 struct icmphdr *icmph;
207 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff))
210 icmph = (void *)(skb_network_header(skb) + ihl);
212 if ((icmph->type != ICMP_DEST_UNREACH) &&
213 (icmph->type != ICMP_TIME_EXCEEDED) &&
214 (icmph->type != ICMP_PARAMETERPROB))
217 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) +
221 icmph = (void *)(skb_network_header(skb) + ihl);
222 iph = (void *)(icmph + 1);
228 if ((old_addr ^ addr) & mask)
231 if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
232 sizeof(*iph) + noff))
235 icmph = (void *)(skb_network_header(skb) + ihl);
236 iph = (void *)(icmph + 1);
239 new_addr |= addr & ~mask;
241 /* XXX Fix up the inner checksums. */
243 iph->daddr = new_addr;
245 iph->saddr = new_addr;
247 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
259 spin_lock(&p->tcf_lock);
260 p->tcf_qstats.drops++;
261 spin_unlock(&p->tcf_lock);
265 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
268 unsigned char *b = skb_tail_pointer(skb);
269 struct tcf_nat *p = to_tcf_nat(a);
270 struct tc_nat opt = {
271 .index = p->tcf_index,
272 .refcnt = refcount_read(&p->tcf_refcnt) - ref,
273 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
277 spin_lock_bh(&p->tcf_lock);
278 opt.old_addr = p->old_addr;
279 opt.new_addr = p->new_addr;
281 opt.flags = p->flags;
282 opt.action = p->tcf_action;
284 if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
285 goto nla_put_failure;
287 tcf_tm_dump(&t, &p->tcf_tm);
288 if (nla_put_64bit(skb, TCA_NAT_TM, sizeof(t), &t, TCA_NAT_PAD))
289 goto nla_put_failure;
290 spin_unlock_bh(&p->tcf_lock);
295 spin_unlock_bh(&p->tcf_lock);
300 static int tcf_nat_walker(struct net *net, struct sk_buff *skb,
301 struct netlink_callback *cb, int type,
302 const struct tc_action_ops *ops,
303 struct netlink_ext_ack *extack)
305 struct tc_action_net *tn = net_generic(net, nat_net_id);
307 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
310 static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index)
312 struct tc_action_net *tn = net_generic(net, nat_net_id);
314 return tcf_idr_search(tn, a, index);
317 static struct tc_action_ops act_nat_ops = {
320 .owner = THIS_MODULE,
322 .dump = tcf_nat_dump,
323 .init = tcf_nat_init,
324 .walk = tcf_nat_walker,
325 .lookup = tcf_nat_search,
326 .size = sizeof(struct tcf_nat),
329 static __net_init int nat_init_net(struct net *net)
331 struct tc_action_net *tn = net_generic(net, nat_net_id);
333 return tc_action_net_init(tn, &act_nat_ops);
336 static void __net_exit nat_exit_net(struct list_head *net_list)
338 tc_action_net_exit(net_list, nat_net_id);
341 static struct pernet_operations nat_net_ops = {
342 .init = nat_init_net,
343 .exit_batch = nat_exit_net,
345 .size = sizeof(struct tc_action_net),
348 MODULE_DESCRIPTION("Stateless NAT actions");
349 MODULE_LICENSE("GPL");
351 static int __init nat_init_module(void)
353 return tcf_register_action(&act_nat_ops, &nat_net_ops);
356 static void __exit nat_cleanup_module(void)
358 tcf_unregister_action(&act_nat_ops, &nat_net_ops);
361 module_init(nat_init_module);
362 module_exit(nat_cleanup_module);