1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
6 #include <linux/types.h>
7 #include <linux/export.h>
8 #include <linux/init.h>
10 #include <linux/tcp.h>
11 #include <linux/icmp.h>
12 #include <linux/icmpv6.h>
14 #include <linux/dccp.h>
15 #include <linux/sctp.h>
16 #include <net/sctp/checksum.h>
18 #include <linux/netfilter.h>
19 #include <net/netfilter/nf_nat.h>
21 #include <linux/ipv6.h>
22 #include <linux/netfilter_ipv6.h>
23 #include <net/checksum.h>
24 #include <net/ip6_checksum.h>
25 #include <net/ip6_route.h>
29 #include <net/netfilter/nf_conntrack_core.h>
30 #include <net/netfilter/nf_conntrack.h>
31 #include <linux/netfilter/nfnetlink_conntrack.h>
33 static void nf_csum_update(struct sk_buff *skb,
34 unsigned int iphdroff, __sum16 *check,
35 const struct nf_conntrack_tuple *t,
36 enum nf_nat_manip_type maniptype);
39 __udp_manip_pkt(struct sk_buff *skb,
40 unsigned int iphdroff, struct udphdr *hdr,
41 const struct nf_conntrack_tuple *tuple,
42 enum nf_nat_manip_type maniptype, bool do_csum)
44 __be16 *portptr, newport;
46 if (maniptype == NF_NAT_MANIP_SRC) {
47 /* Get rid of src port */
48 newport = tuple->src.u.udp.port;
49 portptr = &hdr->source;
51 /* Get rid of dst port */
52 newport = tuple->dst.u.udp.port;
56 nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
57 inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
60 hdr->check = CSUM_MANGLED_0;
65 static bool udp_manip_pkt(struct sk_buff *skb,
66 unsigned int iphdroff, unsigned int hdroff,
67 const struct nf_conntrack_tuple *tuple,
68 enum nf_nat_manip_type maniptype)
73 if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
76 hdr = (struct udphdr *)(skb->data + hdroff);
77 do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
79 __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
83 static bool udplite_manip_pkt(struct sk_buff *skb,
84 unsigned int iphdroff, unsigned int hdroff,
85 const struct nf_conntrack_tuple *tuple,
86 enum nf_nat_manip_type maniptype)
88 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
91 if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
94 hdr = (struct udphdr *)(skb->data + hdroff);
95 __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, true);
101 sctp_manip_pkt(struct sk_buff *skb,
102 unsigned int iphdroff, unsigned int hdroff,
103 const struct nf_conntrack_tuple *tuple,
104 enum nf_nat_manip_type maniptype)
106 #ifdef CONFIG_NF_CT_PROTO_SCTP
110 /* This could be an inner header returned in imcp packet; in such
111 * cases we cannot update the checksum field since it is outside
112 * of the 8 bytes of transport layer headers we are guaranteed.
114 if (skb->len >= hdroff + sizeof(*hdr))
115 hdrsize = sizeof(*hdr);
117 if (skb_ensure_writable(skb, hdroff + hdrsize))
120 hdr = (struct sctphdr *)(skb->data + hdroff);
122 if (maniptype == NF_NAT_MANIP_SRC) {
123 /* Get rid of src port */
124 hdr->source = tuple->src.u.sctp.port;
126 /* Get rid of dst port */
127 hdr->dest = tuple->dst.u.sctp.port;
130 if (hdrsize < sizeof(*hdr))
133 if (skb->ip_summed != CHECKSUM_PARTIAL) {
134 hdr->checksum = sctp_compute_cksum(skb, hdroff);
135 skb->ip_summed = CHECKSUM_NONE;
143 tcp_manip_pkt(struct sk_buff *skb,
144 unsigned int iphdroff, unsigned int hdroff,
145 const struct nf_conntrack_tuple *tuple,
146 enum nf_nat_manip_type maniptype)
149 __be16 *portptr, newport, oldport;
150 int hdrsize = 8; /* TCP connection tracking guarantees this much */
152 /* this could be a inner header returned in icmp packet; in such
153 cases we cannot update the checksum field since it is outside of
154 the 8 bytes of transport layer headers we are guaranteed */
155 if (skb->len >= hdroff + sizeof(struct tcphdr))
156 hdrsize = sizeof(struct tcphdr);
158 if (skb_ensure_writable(skb, hdroff + hdrsize))
161 hdr = (struct tcphdr *)(skb->data + hdroff);
163 if (maniptype == NF_NAT_MANIP_SRC) {
164 /* Get rid of src port */
165 newport = tuple->src.u.tcp.port;
166 portptr = &hdr->source;
168 /* Get rid of dst port */
169 newport = tuple->dst.u.tcp.port;
170 portptr = &hdr->dest;
176 if (hdrsize < sizeof(*hdr))
179 nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
180 inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
185 dccp_manip_pkt(struct sk_buff *skb,
186 unsigned int iphdroff, unsigned int hdroff,
187 const struct nf_conntrack_tuple *tuple,
188 enum nf_nat_manip_type maniptype)
190 #ifdef CONFIG_NF_CT_PROTO_DCCP
191 struct dccp_hdr *hdr;
192 __be16 *portptr, oldport, newport;
193 int hdrsize = 8; /* DCCP connection tracking guarantees this much */
195 if (skb->len >= hdroff + sizeof(struct dccp_hdr))
196 hdrsize = sizeof(struct dccp_hdr);
198 if (skb_ensure_writable(skb, hdroff + hdrsize))
201 hdr = (struct dccp_hdr *)(skb->data + hdroff);
203 if (maniptype == NF_NAT_MANIP_SRC) {
204 newport = tuple->src.u.dccp.port;
205 portptr = &hdr->dccph_sport;
207 newport = tuple->dst.u.dccp.port;
208 portptr = &hdr->dccph_dport;
214 if (hdrsize < sizeof(*hdr))
217 nf_csum_update(skb, iphdroff, &hdr->dccph_checksum, tuple, maniptype);
218 inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
225 icmp_manip_pkt(struct sk_buff *skb,
226 unsigned int iphdroff, unsigned int hdroff,
227 const struct nf_conntrack_tuple *tuple,
228 enum nf_nat_manip_type maniptype)
232 if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
235 hdr = (struct icmphdr *)(skb->data + hdroff);
240 case ICMP_TIMESTAMPREPLY:
241 case ICMP_INFO_REQUEST:
242 case ICMP_INFO_REPLY:
244 case ICMP_ADDRESSREPLY:
249 inet_proto_csum_replace2(&hdr->checksum, skb,
250 hdr->un.echo.id, tuple->src.u.icmp.id, false);
251 hdr->un.echo.id = tuple->src.u.icmp.id;
256 icmpv6_manip_pkt(struct sk_buff *skb,
257 unsigned int iphdroff, unsigned int hdroff,
258 const struct nf_conntrack_tuple *tuple,
259 enum nf_nat_manip_type maniptype)
261 struct icmp6hdr *hdr;
263 if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
266 hdr = (struct icmp6hdr *)(skb->data + hdroff);
267 nf_csum_update(skb, iphdroff, &hdr->icmp6_cksum, tuple, maniptype);
268 if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST ||
269 hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
270 inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
271 hdr->icmp6_identifier,
272 tuple->src.u.icmp.id, false);
273 hdr->icmp6_identifier = tuple->src.u.icmp.id;
278 /* manipulate a GRE packet according to maniptype */
280 gre_manip_pkt(struct sk_buff *skb,
281 unsigned int iphdroff, unsigned int hdroff,
282 const struct nf_conntrack_tuple *tuple,
283 enum nf_nat_manip_type maniptype)
285 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
286 const struct gre_base_hdr *greh;
287 struct pptp_gre_header *pgreh;
289 /* pgreh includes two optional 32bit fields which are not required
290 * to be there. That's where the magic '8' comes from */
291 if (skb_ensure_writable(skb, hdroff + sizeof(*pgreh) - 8))
294 greh = (void *)skb->data + hdroff;
295 pgreh = (struct pptp_gre_header *)greh;
297 /* we only have destination manip of a packet, since 'source key'
298 * is not present in the packet itself */
299 if (maniptype != NF_NAT_MANIP_DST)
302 switch (greh->flags & GRE_VERSION) {
304 /* We do not currently NAT any GREv0 packets.
305 * Try to behave like "nf_nat_proto_unknown" */
308 pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
309 pgreh->call_id = tuple->dst.u.gre.key;
312 pr_debug("can't nat unknown GRE version\n");
319 static bool l4proto_manip_pkt(struct sk_buff *skb,
320 unsigned int iphdroff, unsigned int hdroff,
321 const struct nf_conntrack_tuple *tuple,
322 enum nf_nat_manip_type maniptype)
324 switch (tuple->dst.protonum) {
326 return tcp_manip_pkt(skb, iphdroff, hdroff,
329 return udp_manip_pkt(skb, iphdroff, hdroff,
331 case IPPROTO_UDPLITE:
332 return udplite_manip_pkt(skb, iphdroff, hdroff,
335 return sctp_manip_pkt(skb, iphdroff, hdroff,
338 return icmp_manip_pkt(skb, iphdroff, hdroff,
341 return icmpv6_manip_pkt(skb, iphdroff, hdroff,
344 return dccp_manip_pkt(skb, iphdroff, hdroff,
347 return gre_manip_pkt(skb, iphdroff, hdroff,
351 /* If we don't know protocol -- no error, pass it unmodified. */
355 static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
356 unsigned int iphdroff,
357 const struct nf_conntrack_tuple *target,
358 enum nf_nat_manip_type maniptype)
363 if (skb_ensure_writable(skb, iphdroff + sizeof(*iph)))
366 iph = (void *)skb->data + iphdroff;
367 hdroff = iphdroff + iph->ihl * 4;
369 if (!l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
371 iph = (void *)skb->data + iphdroff;
373 if (maniptype == NF_NAT_MANIP_SRC) {
374 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
375 iph->saddr = target->src.u3.ip;
377 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
378 iph->daddr = target->dst.u3.ip;
383 static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
384 unsigned int iphdroff,
385 const struct nf_conntrack_tuple *target,
386 enum nf_nat_manip_type maniptype)
388 #if IS_ENABLED(CONFIG_IPV6)
389 struct ipv6hdr *ipv6h;
394 if (skb_ensure_writable(skb, iphdroff + sizeof(*ipv6h)))
397 ipv6h = (void *)skb->data + iphdroff;
398 nexthdr = ipv6h->nexthdr;
399 hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h),
400 &nexthdr, &frag_off);
404 if ((frag_off & htons(~0x7)) == 0 &&
405 !l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype))
408 /* must reload, offset might have changed */
409 ipv6h = (void *)skb->data + iphdroff;
412 if (maniptype == NF_NAT_MANIP_SRC)
413 ipv6h->saddr = target->src.u3.in6;
415 ipv6h->daddr = target->dst.u3.in6;
421 unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
422 enum nf_nat_manip_type mtype,
423 enum ip_conntrack_dir dir)
425 struct nf_conntrack_tuple target;
427 /* We are aiming to look like inverse of other direction. */
428 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
430 switch (target.src.l3num) {
432 if (nf_nat_ipv6_manip_pkt(skb, 0, &target, mtype))
436 if (nf_nat_ipv4_manip_pkt(skb, 0, &target, mtype))
447 static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
448 unsigned int iphdroff, __sum16 *check,
449 const struct nf_conntrack_tuple *t,
450 enum nf_nat_manip_type maniptype)
452 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
455 if (maniptype == NF_NAT_MANIP_SRC) {
457 newip = t->src.u3.ip;
460 newip = t->dst.u3.ip;
462 inet_proto_csum_replace4(check, skb, oldip, newip, true);
465 static void nf_nat_ipv6_csum_update(struct sk_buff *skb,
466 unsigned int iphdroff, __sum16 *check,
467 const struct nf_conntrack_tuple *t,
468 enum nf_nat_manip_type maniptype)
470 #if IS_ENABLED(CONFIG_IPV6)
471 const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff);
472 const struct in6_addr *oldip, *newip;
474 if (maniptype == NF_NAT_MANIP_SRC) {
475 oldip = &ipv6h->saddr;
476 newip = &t->src.u3.in6;
478 oldip = &ipv6h->daddr;
479 newip = &t->dst.u3.in6;
481 inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
482 newip->s6_addr32, true);
486 static void nf_csum_update(struct sk_buff *skb,
487 unsigned int iphdroff, __sum16 *check,
488 const struct nf_conntrack_tuple *t,
489 enum nf_nat_manip_type maniptype)
491 switch (t->src.l3num) {
493 nf_nat_ipv4_csum_update(skb, iphdroff, check, t, maniptype);
496 nf_nat_ipv6_csum_update(skb, iphdroff, check, t, maniptype);
501 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
502 u8 proto, void *data, __sum16 *check,
503 int datalen, int oldlen)
505 if (skb->ip_summed != CHECKSUM_PARTIAL) {
506 const struct iphdr *iph = ip_hdr(skb);
508 skb->ip_summed = CHECKSUM_PARTIAL;
509 skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
511 skb->csum_offset = (void *)check - data;
512 *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen,
515 inet_proto_csum_replace2(check, skb,
516 htons(oldlen), htons(datalen), true);
520 #if IS_ENABLED(CONFIG_IPV6)
521 static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
522 u8 proto, void *data, __sum16 *check,
523 int datalen, int oldlen)
525 if (skb->ip_summed != CHECKSUM_PARTIAL) {
526 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
528 skb->ip_summed = CHECKSUM_PARTIAL;
529 skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) +
530 (data - (void *)skb->data);
531 skb->csum_offset = (void *)check - data;
532 *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
535 inet_proto_csum_replace2(check, skb,
536 htons(oldlen), htons(datalen), true);
541 void nf_nat_csum_recalc(struct sk_buff *skb,
542 u8 nfproto, u8 proto, void *data, __sum16 *check,
543 int datalen, int oldlen)
547 nf_nat_ipv4_csum_recalc(skb, proto, data, check,
550 #if IS_ENABLED(CONFIG_IPV6)
552 nf_nat_ipv6_csum_recalc(skb, proto, data, check,
561 int nf_nat_icmp_reply_translation(struct sk_buff *skb,
563 enum ip_conntrack_info ctinfo,
564 unsigned int hooknum)
570 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
571 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
572 unsigned int hdrlen = ip_hdrlen(skb);
573 struct nf_conntrack_tuple target;
574 unsigned long statusbit;
576 WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
578 if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
580 if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
583 inside = (void *)skb->data + hdrlen;
584 if (inside->icmp.type == ICMP_REDIRECT) {
585 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
587 if (ct->status & IPS_NAT_MASK)
591 if (manip == NF_NAT_MANIP_SRC)
592 statusbit = IPS_SRC_NAT;
594 statusbit = IPS_DST_NAT;
596 /* Invert if this is reply direction */
597 if (dir == IP_CT_DIR_REPLY)
598 statusbit ^= IPS_NAT_MASK;
600 if (!(ct->status & statusbit))
603 if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
604 &ct->tuplehash[!dir].tuple, !manip))
607 if (skb->ip_summed != CHECKSUM_PARTIAL) {
608 /* Reloading "inside" here since manip_pkt may reallocate */
609 inside = (void *)skb->data + hdrlen;
610 inside->icmp.checksum = 0;
611 inside->icmp.checksum =
612 csum_fold(skb_checksum(skb, hdrlen,
613 skb->len - hdrlen, 0));
616 /* Change outer to look like the reply to an incoming packet */
617 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
618 target.dst.protonum = IPPROTO_ICMP;
619 if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
624 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
627 nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
628 const struct nf_hook_state *state)
631 enum ip_conntrack_info ctinfo;
633 ct = nf_ct_get(skb, &ctinfo);
637 if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
638 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
639 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
647 return nf_nat_inet_fn(priv, skb, state);
651 nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
652 const struct nf_hook_state *state)
655 __be32 daddr = ip_hdr(skb)->daddr;
657 ret = nf_nat_ipv4_fn(priv, skb, state);
658 if (ret == NF_ACCEPT && daddr != ip_hdr(skb)->daddr)
665 nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
666 const struct nf_hook_state *state)
669 const struct nf_conn *ct;
670 enum ip_conntrack_info ctinfo;
675 ret = nf_nat_ipv4_fn(priv, skb, state);
677 if (ret != NF_ACCEPT)
680 if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
683 ct = nf_ct_get(skb, &ctinfo);
685 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
687 if (ct->tuplehash[dir].tuple.src.u3.ip !=
688 ct->tuplehash[!dir].tuple.dst.u3.ip ||
689 (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
690 ct->tuplehash[dir].tuple.src.u.all !=
691 ct->tuplehash[!dir].tuple.dst.u.all)) {
692 err = nf_xfrm_me_harder(state->net, skb, AF_INET);
694 ret = NF_DROP_ERR(err);
702 nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
703 const struct nf_hook_state *state)
705 const struct nf_conn *ct;
706 enum ip_conntrack_info ctinfo;
710 ret = nf_nat_ipv4_fn(priv, skb, state);
711 if (ret != NF_ACCEPT)
714 ct = nf_ct_get(skb, &ctinfo);
716 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
718 if (ct->tuplehash[dir].tuple.dst.u3.ip !=
719 ct->tuplehash[!dir].tuple.src.u3.ip) {
720 err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
722 ret = NF_DROP_ERR(err);
725 else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
726 ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
727 ct->tuplehash[dir].tuple.dst.u.all !=
728 ct->tuplehash[!dir].tuple.src.u.all) {
729 err = nf_xfrm_me_harder(state->net, skb, AF_INET);
731 ret = NF_DROP_ERR(err);
738 static const struct nf_hook_ops nf_nat_ipv4_ops[] = {
739 /* Before packet filtering, change destination */
741 .hook = nf_nat_ipv4_in,
743 .hooknum = NF_INET_PRE_ROUTING,
744 .priority = NF_IP_PRI_NAT_DST,
746 /* After packet filtering, change source */
748 .hook = nf_nat_ipv4_out,
750 .hooknum = NF_INET_POST_ROUTING,
751 .priority = NF_IP_PRI_NAT_SRC,
753 /* Before packet filtering, change destination */
755 .hook = nf_nat_ipv4_local_fn,
757 .hooknum = NF_INET_LOCAL_OUT,
758 .priority = NF_IP_PRI_NAT_DST,
760 /* After packet filtering, change source */
762 .hook = nf_nat_ipv4_fn,
764 .hooknum = NF_INET_LOCAL_IN,
765 .priority = NF_IP_PRI_NAT_SRC,
769 int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops)
771 return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv4_ops,
772 ARRAY_SIZE(nf_nat_ipv4_ops));
774 EXPORT_SYMBOL_GPL(nf_nat_ipv4_register_fn);
776 void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
778 nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
780 EXPORT_SYMBOL_GPL(nf_nat_ipv4_unregister_fn);
782 #if IS_ENABLED(CONFIG_IPV6)
783 int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
785 enum ip_conntrack_info ctinfo,
786 unsigned int hooknum,
790 struct icmp6hdr icmp6;
793 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
794 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
795 struct nf_conntrack_tuple target;
796 unsigned long statusbit;
798 WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
800 if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
802 if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
805 inside = (void *)skb->data + hdrlen;
806 if (inside->icmp6.icmp6_type == NDISC_REDIRECT) {
807 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
809 if (ct->status & IPS_NAT_MASK)
813 if (manip == NF_NAT_MANIP_SRC)
814 statusbit = IPS_SRC_NAT;
816 statusbit = IPS_DST_NAT;
818 /* Invert if this is reply direction */
819 if (dir == IP_CT_DIR_REPLY)
820 statusbit ^= IPS_NAT_MASK;
822 if (!(ct->status & statusbit))
825 if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
826 &ct->tuplehash[!dir].tuple, !manip))
829 if (skb->ip_summed != CHECKSUM_PARTIAL) {
830 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
832 inside = (void *)skb->data + hdrlen;
833 inside->icmp6.icmp6_cksum = 0;
834 inside->icmp6.icmp6_cksum =
835 csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
836 skb->len - hdrlen, IPPROTO_ICMPV6,
837 skb_checksum(skb, hdrlen,
838 skb->len - hdrlen, 0));
841 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
842 target.dst.protonum = IPPROTO_ICMPV6;
843 if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
848 EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
851 nf_nat_ipv6_fn(void *priv, struct sk_buff *skb,
852 const struct nf_hook_state *state)
855 enum ip_conntrack_info ctinfo;
860 ct = nf_ct_get(skb, &ctinfo);
861 /* Can't track? It's not due to stress, or conntrack would
862 * have dropped it. Hence it's the user's responsibilty to
863 * packet filter it out, or implement conntrack/NAT for that
869 if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) {
870 nexthdr = ipv6_hdr(skb)->nexthdr;
871 hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
872 &nexthdr, &frag_off);
874 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
875 if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
884 return nf_nat_inet_fn(priv, skb, state);
888 nf_nat_ipv6_in(void *priv, struct sk_buff *skb,
889 const struct nf_hook_state *state)
892 struct in6_addr daddr = ipv6_hdr(skb)->daddr;
894 ret = nf_nat_ipv6_fn(priv, skb, state);
895 if (ret != NF_DROP && ret != NF_STOLEN &&
896 ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
903 nf_nat_ipv6_out(void *priv, struct sk_buff *skb,
904 const struct nf_hook_state *state)
907 const struct nf_conn *ct;
908 enum ip_conntrack_info ctinfo;
913 ret = nf_nat_ipv6_fn(priv, skb, state);
915 if (ret != NF_ACCEPT)
918 if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
920 ct = nf_ct_get(skb, &ctinfo);
922 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
924 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
925 &ct->tuplehash[!dir].tuple.dst.u3) ||
926 (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
927 ct->tuplehash[dir].tuple.src.u.all !=
928 ct->tuplehash[!dir].tuple.dst.u.all)) {
929 err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
931 ret = NF_DROP_ERR(err);
940 nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
941 const struct nf_hook_state *state)
943 const struct nf_conn *ct;
944 enum ip_conntrack_info ctinfo;
948 ret = nf_nat_ipv6_fn(priv, skb, state);
949 if (ret != NF_ACCEPT)
952 ct = nf_ct_get(skb, &ctinfo);
954 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
956 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
957 &ct->tuplehash[!dir].tuple.src.u3)) {
958 err = nf_ip6_route_me_harder(state->net, skb);
960 ret = NF_DROP_ERR(err);
963 else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
964 ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
965 ct->tuplehash[dir].tuple.dst.u.all !=
966 ct->tuplehash[!dir].tuple.src.u.all) {
967 err = nf_xfrm_me_harder(state->net, skb, AF_INET6);
969 ret = NF_DROP_ERR(err);
977 static const struct nf_hook_ops nf_nat_ipv6_ops[] = {
978 /* Before packet filtering, change destination */
980 .hook = nf_nat_ipv6_in,
982 .hooknum = NF_INET_PRE_ROUTING,
983 .priority = NF_IP6_PRI_NAT_DST,
985 /* After packet filtering, change source */
987 .hook = nf_nat_ipv6_out,
989 .hooknum = NF_INET_POST_ROUTING,
990 .priority = NF_IP6_PRI_NAT_SRC,
992 /* Before packet filtering, change destination */
994 .hook = nf_nat_ipv6_local_fn,
996 .hooknum = NF_INET_LOCAL_OUT,
997 .priority = NF_IP6_PRI_NAT_DST,
999 /* After packet filtering, change source */
1001 .hook = nf_nat_ipv6_fn,
1003 .hooknum = NF_INET_LOCAL_IN,
1004 .priority = NF_IP6_PRI_NAT_SRC,
1008 int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops)
1010 return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv6_ops,
1011 ARRAY_SIZE(nf_nat_ipv6_ops));
1013 EXPORT_SYMBOL_GPL(nf_nat_ipv6_register_fn);
1015 void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
1017 nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
1019 EXPORT_SYMBOL_GPL(nf_nat_ipv6_unregister_fn);
1020 #endif /* CONFIG_IPV6 */
1022 #if defined(CONFIG_NF_TABLES_INET) && IS_ENABLED(CONFIG_NFT_NAT)
1023 int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops)
1027 if (WARN_ON_ONCE(ops->pf != NFPROTO_INET))
1030 ret = nf_nat_register_fn(net, NFPROTO_IPV6, ops, nf_nat_ipv6_ops,
1031 ARRAY_SIZE(nf_nat_ipv6_ops));
1035 ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops,
1036 ARRAY_SIZE(nf_nat_ipv4_ops));
1038 nf_nat_ipv6_unregister_fn(net, ops);
1042 EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn);
1044 void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops)
1046 nf_nat_unregister_fn(net, NFPROTO_IPV4, ops, ARRAY_SIZE(nf_nat_ipv4_ops));
1047 nf_nat_unregister_fn(net, NFPROTO_IPV6, ops, ARRAY_SIZE(nf_nat_ipv6_ops));
1049 EXPORT_SYMBOL_GPL(nf_nat_inet_unregister_fn);
1050 #endif /* NFT INET NAT */