2 * IPv6 tunneling device
3 * Linux INET6 implementation
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
32 #include <linux/if_tunnel.h>
33 #include <linux/net.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_arp.h>
37 #include <linux/icmpv6.h>
38 #include <linux/init.h>
39 #include <linux/route.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/netfilter_ipv6.h>
42 #include <linux/slab.h>
43 #include <linux/hash.h>
45 #include <asm/uaccess.h>
46 #include <linux/atomic.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/ip6_tunnel.h>
55 #include <net/dsfield.h>
56 #include <net/inet_ecn.h>
57 #include <net/net_namespace.h>
58 #include <net/netns/generic.h>
60 MODULE_AUTHOR("Ville Nuorvala");
61 MODULE_DESCRIPTION("IPv6 tunneling device");
62 MODULE_LICENSE("GPL");
63 MODULE_ALIAS_NETDEV("ip6tnl0");
66 #define IP6_TNL_TRACE(x...) pr_debug("%s:" x "\n", __func__)
68 #define IP6_TNL_TRACE(x...) do {;} while(0)
71 #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
72 #define IPV6_TCLASS_SHIFT 20
74 #define HASH_SIZE_SHIFT 5
75 #define HASH_SIZE (1 << HASH_SIZE_SHIFT)
77 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
79 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
81 return hash_32(hash, HASH_SIZE_SHIFT);
84 static int ip6_tnl_dev_init(struct net_device *dev);
85 static void ip6_tnl_dev_setup(struct net_device *dev);
87 static int ip6_tnl_net_id __read_mostly;
89 /* the IPv6 tunnel fallback device */
90 struct net_device *fb_tnl_dev;
91 /* lists for storing tunnels in use */
92 struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
93 struct ip6_tnl __rcu *tnls_wc[1];
94 struct ip6_tnl __rcu **tnls[2];
97 /* often modified stats are per cpu, other are shared (netdev->stats) */
99 unsigned long rx_packets;
100 unsigned long rx_bytes;
101 unsigned long tx_packets;
102 unsigned long tx_bytes;
103 } __attribute__((aligned(4*sizeof(unsigned long))));
105 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
107 struct pcpu_tstats sum = { 0 };
110 for_each_possible_cpu(i) {
111 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
113 sum.rx_packets += tstats->rx_packets;
114 sum.rx_bytes += tstats->rx_bytes;
115 sum.tx_packets += tstats->tx_packets;
116 sum.tx_bytes += tstats->tx_bytes;
118 dev->stats.rx_packets = sum.rx_packets;
119 dev->stats.rx_bytes = sum.rx_bytes;
120 dev->stats.tx_packets = sum.tx_packets;
121 dev->stats.tx_bytes = sum.tx_bytes;
126 * Locking : hash tables are protected by RCU and RTNL
129 static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
131 struct dst_entry *dst = t->dst_cache;
133 if (dst && dst->obsolete &&
134 dst->ops->check(dst, t->dst_cookie) == NULL) {
143 static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
145 dst_release(t->dst_cache);
149 static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
151 struct rt6_info *rt = (struct rt6_info *) dst;
152 t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
153 dst_release(t->dst_cache);
158 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
159 * @remote: the address of the tunnel exit-point
160 * @local: the address of the tunnel entry-point
163 * tunnel matching given end-points if found,
164 * else fallback tunnel if its device is up,
168 #define for_each_ip6_tunnel_rcu(start) \
169 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
171 static struct ip6_tnl *
172 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
174 unsigned int hash = HASH(remote, local);
176 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
178 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
179 if (ipv6_addr_equal(local, &t->parms.laddr) &&
180 ipv6_addr_equal(remote, &t->parms.raddr) &&
181 (t->dev->flags & IFF_UP))
184 t = rcu_dereference(ip6n->tnls_wc[0]);
185 if (t && (t->dev->flags & IFF_UP))
192 * ip6_tnl_bucket - get head of list matching given tunnel parameters
193 * @p: parameters containing tunnel end-points
196 * ip6_tnl_bucket() returns the head of the list matching the
197 * &struct in6_addr entries laddr and raddr in @p.
199 * Return: head of IPv6 tunnel list
202 static struct ip6_tnl __rcu **
203 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
205 const struct in6_addr *remote = &p->raddr;
206 const struct in6_addr *local = &p->laddr;
210 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
212 h = HASH(remote, local);
214 return &ip6n->tnls[prio][h];
218 * ip6_tnl_link - add tunnel to hash table
219 * @t: tunnel to be added
223 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
225 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
227 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
228 rcu_assign_pointer(*tp, t);
232 * ip6_tnl_unlink - remove tunnel from hash table
233 * @t: tunnel to be removed
237 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
239 struct ip6_tnl __rcu **tp;
240 struct ip6_tnl *iter;
242 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
243 (iter = rtnl_dereference(*tp)) != NULL;
246 rcu_assign_pointer(*tp, t->next);
252 static void ip6_dev_free(struct net_device *dev)
254 free_percpu(dev->tstats);
259 * ip6_tnl_create - create a new tunnel
260 * @p: tunnel parameters
261 * @pt: pointer to new tunnel
264 * Create tunnel matching given parameters.
267 * created tunnel or NULL
270 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
272 struct net_device *dev;
276 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
279 strlcpy(name, p->name, IFNAMSIZ);
281 sprintf(name, "ip6tnl%%d");
283 dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
287 dev_net_set(dev, net);
289 t = netdev_priv(dev);
291 err = ip6_tnl_dev_init(dev);
295 if ((err = register_netdevice(dev)) < 0)
298 strcpy(t->parms.name, dev->name);
301 ip6_tnl_link(ip6n, t);
311 * ip6_tnl_locate - find or create tunnel matching given parameters
312 * @p: tunnel parameters
313 * @create: != 0 if allowed to create new tunnel if no match found
316 * ip6_tnl_locate() first tries to locate an existing tunnel
317 * based on @parms. If this is unsuccessful, but @create is set a new
318 * tunnel device is created and registered for use.
321 * matching tunnel or NULL
324 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
325 struct ip6_tnl_parm *p, int create)
327 const struct in6_addr *remote = &p->raddr;
328 const struct in6_addr *local = &p->laddr;
329 struct ip6_tnl __rcu **tp;
331 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
333 for (tp = ip6_tnl_bucket(ip6n, p);
334 (t = rtnl_dereference(*tp)) != NULL;
336 if (ipv6_addr_equal(local, &t->parms.laddr) &&
337 ipv6_addr_equal(remote, &t->parms.raddr))
342 return ip6_tnl_create(net, p);
346 * ip6_tnl_dev_uninit - tunnel device uninitializer
347 * @dev: the device to be destroyed
350 * ip6_tnl_dev_uninit() removes tunnel from its list
354 ip6_tnl_dev_uninit(struct net_device *dev)
356 struct ip6_tnl *t = netdev_priv(dev);
357 struct net *net = dev_net(dev);
358 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
360 if (dev == ip6n->fb_tnl_dev)
361 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
363 ip6_tnl_unlink(ip6n, t);
364 ip6_tnl_dst_reset(t);
369 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
370 * @skb: received socket buffer
373 * 0 if none was found,
374 * else index to encapsulation limit
378 parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
380 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
381 __u8 nexthdr = ipv6h->nexthdr;
382 __u16 off = sizeof (*ipv6h);
384 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
386 struct ipv6_opt_hdr *hdr;
387 if (raw + off + sizeof (*hdr) > skb->data &&
388 !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
391 hdr = (struct ipv6_opt_hdr *) (raw + off);
392 if (nexthdr == NEXTHDR_FRAGMENT) {
393 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
394 if (frag_hdr->frag_off)
397 } else if (nexthdr == NEXTHDR_AUTH) {
398 optlen = (hdr->hdrlen + 2) << 2;
400 optlen = ipv6_optlen(hdr);
402 if (nexthdr == NEXTHDR_DEST) {
405 struct ipv6_tlv_tnl_enc_lim *tel;
407 /* No more room for encapsulation limit */
408 if (i + sizeof (*tel) > off + optlen)
411 tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
412 /* return index of option if found and valid */
413 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
416 /* else jump to next option */
418 i += tel->length + 2;
423 nexthdr = hdr->nexthdr;
430 * ip6_tnl_err - tunnel error handler
433 * ip6_tnl_err() should handle errors in the tunnel according
434 * to the specifications in RFC 2473.
438 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
439 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
441 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
444 u8 rel_type = ICMPV6_DEST_UNREACH;
445 u8 rel_code = ICMPV6_ADDR_UNREACH;
450 /* If the packet doesn't contain the original IPv6 header we are
451 in trouble since we might need the source address for further
452 processing of the error. */
455 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr,
456 &ipv6h->saddr)) == NULL)
459 if (t->parms.proto != ipproto && t->parms.proto != 0)
466 struct ipv6_tlv_tnl_enc_lim *tel;
468 case ICMPV6_DEST_UNREACH:
469 net_warn_ratelimited("%s: Path to destination invalid or inactive!\n",
473 case ICMPV6_TIME_EXCEED:
474 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
475 net_warn_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
480 case ICMPV6_PARAMPROB:
482 if ((*code) == ICMPV6_HDR_FIELD)
483 teli = parse_tlv_tnl_enc_lim(skb, skb->data);
485 if (teli && teli == *info - 2) {
486 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
487 if (tel->encap_limit == 0) {
488 net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
493 net_warn_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
497 case ICMPV6_PKT_TOOBIG:
498 mtu = *info - offset;
499 if (mtu < IPV6_MIN_MTU)
503 if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
504 rel_type = ICMPV6_PKT_TOOBIG;
523 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
524 u8 type, u8 code, int offset, __be32 info)
529 __u32 rel_info = ntohl(info);
531 struct sk_buff *skb2;
532 const struct iphdr *eiph;
536 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
537 &rel_msg, &rel_info, offset);
545 case ICMPV6_DEST_UNREACH:
546 if (rel_code != ICMPV6_ADDR_UNREACH)
548 rel_type = ICMP_DEST_UNREACH;
549 rel_code = ICMP_HOST_UNREACH;
551 case ICMPV6_PKT_TOOBIG:
554 rel_type = ICMP_DEST_UNREACH;
555 rel_code = ICMP_FRAG_NEEDED;
558 rel_type = ICMP_REDIRECT;
559 rel_code = ICMP_REDIR_HOST;
564 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
567 skb2 = skb_clone(skb, GFP_ATOMIC);
573 skb_pull(skb2, offset);
574 skb_reset_network_header(skb2);
577 /* Try to guess incoming interface */
578 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
581 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
585 skb2->dev = rt->dst.dev;
587 /* route "incoming" packet */
588 if (rt->rt_flags & RTCF_LOCAL) {
591 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
592 eiph->daddr, eiph->saddr,
595 RT_TOS(eiph->tos), 0);
597 rt->dst.dev->type != ARPHRD_TUNNEL) {
602 skb_dst_set(skb2, &rt->dst);
605 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
607 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
611 /* change mtu on this route */
612 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
613 if (rel_info > dst_mtu(skb_dst(skb2)))
616 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
618 if (rel_type == ICMP_REDIRECT)
619 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
621 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
629 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
630 u8 type, u8 code, int offset, __be32 info)
635 __u32 rel_info = ntohl(info);
638 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
639 &rel_msg, &rel_info, offset);
643 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
645 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
651 skb_pull(skb2, offset);
652 skb_reset_network_header(skb2);
654 /* Try to guess incoming interface */
655 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
658 if (rt && rt->dst.dev)
659 skb2->dev = rt->dst.dev;
661 icmpv6_send(skb2, rel_type, rel_code, rel_info);
664 dst_release(&rt->dst);
672 static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
673 const struct ipv6hdr *ipv6h,
676 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
678 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
679 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
681 if (INET_ECN_is_ce(dsfield))
682 IP_ECN_set_ce(ip_hdr(skb));
685 static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
686 const struct ipv6hdr *ipv6h,
689 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
690 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
692 if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
693 IP6_ECN_set_ce(ipv6_hdr(skb));
696 static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
697 const struct in6_addr *laddr,
698 const struct in6_addr *raddr)
700 struct ip6_tnl_parm *p = &t->parms;
701 int ltype = ipv6_addr_type(laddr);
702 int rtype = ipv6_addr_type(raddr);
705 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
706 flags = IP6_TNL_F_CAP_PER_PACKET;
707 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
708 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
709 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
710 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
711 if (ltype&IPV6_ADDR_UNICAST)
712 flags |= IP6_TNL_F_CAP_XMIT;
713 if (rtype&IPV6_ADDR_UNICAST)
714 flags |= IP6_TNL_F_CAP_RCV;
719 /* called with rcu_read_lock() */
720 static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
721 const struct in6_addr *laddr,
722 const struct in6_addr *raddr)
724 struct ip6_tnl_parm *p = &t->parms;
726 struct net *net = dev_net(t->dev);
728 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
729 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
730 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
731 struct net_device *ldev = NULL;
734 ldev = dev_get_by_index_rcu(net, p->link);
736 if ((ipv6_addr_is_multicast(laddr) ||
737 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
738 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
745 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
746 * @skb: received socket buffer
747 * @protocol: ethernet protocol ID
748 * @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
753 static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
755 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
756 const struct ipv6hdr *ipv6h,
757 struct sk_buff *skb))
760 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
764 if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
765 &ipv6h->daddr)) != NULL) {
766 struct pcpu_tstats *tstats;
768 if (t->parms.proto != ipproto && t->parms.proto != 0) {
773 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
778 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
779 t->dev->stats.rx_dropped++;
784 skb->mac_header = skb->network_header;
785 skb_reset_network_header(skb);
786 skb->protocol = htons(protocol);
787 skb->pkt_type = PACKET_HOST;
788 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
790 tstats = this_cpu_ptr(t->dev->tstats);
791 tstats->rx_packets++;
792 tstats->rx_bytes += skb->len;
794 __skb_tunnel_rx(skb, t->dev);
796 dscp_ecn_decapsulate(t, ipv6h, skb);
811 static int ip4ip6_rcv(struct sk_buff *skb)
813 return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
814 ip4ip6_dscp_ecn_decapsulate);
817 static int ip6ip6_rcv(struct sk_buff *skb)
819 return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
820 ip6ip6_dscp_ecn_decapsulate);
823 struct ipv6_tel_txoption {
824 struct ipv6_txoptions ops;
828 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
830 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
832 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
834 opt->dst_opt[4] = encap_limit;
835 opt->dst_opt[5] = IPV6_TLV_PADN;
838 opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
839 opt->ops.opt_nflen = 8;
843 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
844 * @t: the outgoing tunnel device
845 * @hdr: IPv6 header from the incoming packet
848 * Avoid trivial tunneling loop by checking that tunnel exit-point
849 * doesn't match source of incoming packet.
857 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
859 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
862 static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
864 struct ip6_tnl_parm *p = &t->parms;
866 struct net *net = dev_net(t->dev);
868 if (p->flags & IP6_TNL_F_CAP_XMIT) {
869 struct net_device *ldev = NULL;
873 ldev = dev_get_by_index_rcu(net, p->link);
875 if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0)))
876 pr_warn("%s xmit: Local address not yet configured!\n",
878 else if (!ipv6_addr_is_multicast(&p->raddr) &&
879 unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0)))
880 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
889 * ip6_tnl_xmit2 - encapsulate packet and send
890 * @skb: the outgoing socket buffer
891 * @dev: the outgoing tunnel device
892 * @dsfield: dscp code for outer header
893 * @fl: flow of tunneled packet
894 * @encap_limit: encapsulation limit
895 * @pmtu: Path MTU is stored if packet is too big
898 * Build new header and do some sanity checks on the packet before sending
904 * %-EMSGSIZE message too big. return mtu in this case.
907 static int ip6_tnl_xmit2(struct sk_buff *skb,
908 struct net_device *dev,
914 struct net *net = dev_net(dev);
915 struct ip6_tnl *t = netdev_priv(dev);
916 struct net_device_stats *stats = &t->dev->stats;
917 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
918 struct ipv6_tel_txoption opt;
919 struct dst_entry *dst = NULL, *ndst = NULL;
920 struct net_device *tdev;
922 unsigned int max_headroom = sizeof(struct ipv6hdr);
927 if (!fl6->flowi6_mark)
928 dst = ip6_tnl_dst_check(t);
930 ndst = ip6_route_output(net, NULL, fl6);
933 goto tx_err_link_failure;
934 ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
938 goto tx_err_link_failure;
947 net_warn_ratelimited("%s: Local routing loop detected!\n",
949 goto tx_err_dst_release;
951 mtu = dst_mtu(dst) - sizeof (*ipv6h);
952 if (encap_limit >= 0) {
956 if (mtu < IPV6_MIN_MTU)
959 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
960 if (skb->len > mtu) {
963 goto tx_err_dst_release;
967 * Okay, now see if we can stuff it in the buffer as-is.
969 max_headroom += LL_RESERVED_SPACE(tdev);
971 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
972 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
973 struct sk_buff *new_skb;
975 if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
976 goto tx_err_dst_release;
979 skb_set_owner_w(new_skb, skb->sk);
984 if (fl6->flowi6_mark) {
985 skb_dst_set(skb, dst);
988 skb_dst_set_noref(skb, dst);
990 skb->transport_header = skb->network_header;
992 proto = fl6->flowi6_proto;
993 if (encap_limit >= 0) {
994 init_tel_txopt(&opt, encap_limit);
995 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
997 skb_push(skb, sizeof(struct ipv6hdr));
998 skb_reset_network_header(skb);
999 ipv6h = ipv6_hdr(skb);
1000 *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000);
1001 dsfield = INET_ECN_encapsulate(0, dsfield);
1002 ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
1003 ipv6h->hop_limit = t->parms.hop_limit;
1004 ipv6h->nexthdr = proto;
1005 ipv6h->saddr = fl6->saddr;
1006 ipv6h->daddr = fl6->daddr;
1009 err = ip6_local_out(skb);
1011 if (net_xmit_eval(err) == 0) {
1012 struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats);
1014 tstats->tx_bytes += pkt_len;
1015 tstats->tx_packets++;
1018 stats->tx_aborted_errors++;
1021 ip6_tnl_dst_store(t, ndst);
1023 tx_err_link_failure:
1024 stats->tx_carrier_errors++;
1025 dst_link_failure(skb);
1032 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1034 struct ip6_tnl *t = netdev_priv(dev);
1035 const struct iphdr *iph = ip_hdr(skb);
1036 int encap_limit = -1;
1042 if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
1043 !ip6_tnl_xmit_ctl(t))
1046 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1047 encap_limit = t->parms.encap_limit;
1049 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
1050 fl6.flowi6_proto = IPPROTO_IPIP;
1052 dsfield = ipv4_get_dsfield(iph);
1054 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1055 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
1057 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1058 fl6.flowi6_mark = skb->mark;
1060 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1062 /* XXX: send ICMP error even if DF is not set. */
1063 if (err == -EMSGSIZE)
1064 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1073 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1075 struct ip6_tnl *t = netdev_priv(dev);
1076 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1077 int encap_limit = -1;
1084 if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
1085 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
1088 offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
1090 struct ipv6_tlv_tnl_enc_lim *tel;
1091 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
1092 if (tel->encap_limit == 0) {
1093 icmpv6_send(skb, ICMPV6_PARAMPROB,
1094 ICMPV6_HDR_FIELD, offset + 2);
1097 encap_limit = tel->encap_limit - 1;
1098 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1099 encap_limit = t->parms.encap_limit;
1101 memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
1102 fl6.flowi6_proto = IPPROTO_IPV6;
1104 dsfield = ipv6_get_dsfield(ipv6h);
1105 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1106 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
1107 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1108 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
1109 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1110 fl6.flowi6_mark = skb->mark;
1112 err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
1114 if (err == -EMSGSIZE)
1115 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1123 ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1125 struct ip6_tnl *t = netdev_priv(dev);
1126 struct net_device_stats *stats = &t->dev->stats;
1129 switch (skb->protocol) {
1130 case htons(ETH_P_IP):
1131 ret = ip4ip6_tnl_xmit(skb, dev);
1133 case htons(ETH_P_IPV6):
1134 ret = ip6ip6_tnl_xmit(skb, dev);
1143 return NETDEV_TX_OK;
1147 stats->tx_dropped++;
1149 return NETDEV_TX_OK;
1152 static void ip6_tnl_link_config(struct ip6_tnl *t)
1154 struct net_device *dev = t->dev;
1155 struct ip6_tnl_parm *p = &t->parms;
1156 struct flowi6 *fl6 = &t->fl.u.ip6;
1158 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1159 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1161 /* Set up flowi template */
1162 fl6->saddr = p->laddr;
1163 fl6->daddr = p->raddr;
1164 fl6->flowi6_oif = p->link;
1167 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1168 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1169 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1170 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1172 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1173 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1175 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1176 dev->flags |= IFF_POINTOPOINT;
1178 dev->flags &= ~IFF_POINTOPOINT;
1180 dev->iflink = p->link;
1182 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1183 int strict = (ipv6_addr_type(&p->raddr) &
1184 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1186 struct rt6_info *rt = rt6_lookup(dev_net(dev),
1187 &p->raddr, &p->laddr,
1194 dev->hard_header_len = rt->dst.dev->hard_header_len +
1195 sizeof (struct ipv6hdr);
1197 dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr);
1198 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1201 if (dev->mtu < IPV6_MIN_MTU)
1202 dev->mtu = IPV6_MIN_MTU;
1204 dst_release(&rt->dst);
1209 * ip6_tnl_change - update the tunnel parameters
1210 * @t: tunnel to be changed
1211 * @p: tunnel configuration parameters
1214 * ip6_tnl_change() updates the tunnel parameters
1218 ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
1220 t->parms.laddr = p->laddr;
1221 t->parms.raddr = p->raddr;
1222 t->parms.flags = p->flags;
1223 t->parms.hop_limit = p->hop_limit;
1224 t->parms.encap_limit = p->encap_limit;
1225 t->parms.flowinfo = p->flowinfo;
1226 t->parms.link = p->link;
1227 t->parms.proto = p->proto;
1228 ip6_tnl_dst_reset(t);
1229 ip6_tnl_link_config(t);
1234 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1235 * @dev: virtual device associated with tunnel
1236 * @ifr: parameters passed from userspace
1237 * @cmd: command to be performed
1240 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1243 * The possible commands are the following:
1244 * %SIOCGETTUNNEL: get tunnel parameters for device
1245 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1246 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1247 * %SIOCDELTUNNEL: delete tunnel
1249 * The fallback device "ip6tnl0", created during module
1250 * initialization, can be used for creating other tunnel devices.
1254 * %-EFAULT if unable to copy data to or from userspace,
1255 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1256 * %-EINVAL if passed tunnel parameters are invalid,
1257 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1258 * %-ENODEV if attempting to change or delete a nonexisting device
1262 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1265 struct ip6_tnl_parm p;
1266 struct ip6_tnl *t = NULL;
1267 struct net *net = dev_net(dev);
1268 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1272 if (dev == ip6n->fb_tnl_dev) {
1273 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
1277 t = ip6_tnl_locate(net, &p, 0);
1280 t = netdev_priv(dev);
1281 memcpy(&p, &t->parms, sizeof (p));
1282 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
1289 if (!capable(CAP_NET_ADMIN))
1292 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1295 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1298 t = ip6_tnl_locate(net, &p, cmd == SIOCADDTUNNEL);
1299 if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
1301 if (t->dev != dev) {
1306 t = netdev_priv(dev);
1308 ip6_tnl_unlink(ip6n, t);
1310 err = ip6_tnl_change(t, &p);
1311 ip6_tnl_link(ip6n, t);
1312 netdev_state_change(dev);
1316 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
1320 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1324 if (!capable(CAP_NET_ADMIN))
1327 if (dev == ip6n->fb_tnl_dev) {
1329 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1332 if ((t = ip6_tnl_locate(net, &p, 0)) == NULL)
1335 if (t->dev == ip6n->fb_tnl_dev)
1340 unregister_netdevice(dev);
1349 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1350 * @dev: virtual device associated with tunnel
1351 * @new_mtu: the new mtu
1355 * %-EINVAL if mtu too small
1359 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1361 if (new_mtu < IPV6_MIN_MTU) {
1369 static const struct net_device_ops ip6_tnl_netdev_ops = {
1370 .ndo_uninit = ip6_tnl_dev_uninit,
1371 .ndo_start_xmit = ip6_tnl_xmit,
1372 .ndo_do_ioctl = ip6_tnl_ioctl,
1373 .ndo_change_mtu = ip6_tnl_change_mtu,
1374 .ndo_get_stats = ip6_get_stats,
1379 * ip6_tnl_dev_setup - setup virtual tunnel device
1380 * @dev: virtual device associated with tunnel
1383 * Initialize function pointers and device parameters
1386 static void ip6_tnl_dev_setup(struct net_device *dev)
1390 dev->netdev_ops = &ip6_tnl_netdev_ops;
1391 dev->destructor = ip6_dev_free;
1393 dev->type = ARPHRD_TUNNEL6;
1394 dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
1395 dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
1396 t = netdev_priv(dev);
1397 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1399 dev->flags |= IFF_NOARP;
1400 dev->addr_len = sizeof(struct in6_addr);
1401 dev->features |= NETIF_F_NETNS_LOCAL;
1402 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1407 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1408 * @dev: virtual device associated with tunnel
1412 ip6_tnl_dev_init_gen(struct net_device *dev)
1414 struct ip6_tnl *t = netdev_priv(dev);
1417 dev->tstats = alloc_percpu(struct pcpu_tstats);
1424 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1425 * @dev: virtual device associated with tunnel
1428 static int ip6_tnl_dev_init(struct net_device *dev)
1430 struct ip6_tnl *t = netdev_priv(dev);
1431 int err = ip6_tnl_dev_init_gen(dev);
1435 ip6_tnl_link_config(t);
1440 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1441 * @dev: fallback device
1446 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1448 struct ip6_tnl *t = netdev_priv(dev);
1449 struct net *net = dev_net(dev);
1450 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1451 int err = ip6_tnl_dev_init_gen(dev);
1456 t->parms.proto = IPPROTO_IPV6;
1459 ip6_tnl_link_config(t);
1461 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1465 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
1466 .handler = ip4ip6_rcv,
1467 .err_handler = ip4ip6_err,
1471 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
1472 .handler = ip6ip6_rcv,
1473 .err_handler = ip6ip6_err,
1477 static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
1483 for (h = 0; h < HASH_SIZE; h++) {
1484 t = rtnl_dereference(ip6n->tnls_r_l[h]);
1486 unregister_netdevice_queue(t->dev, &list);
1487 t = rtnl_dereference(t->next);
1491 t = rtnl_dereference(ip6n->tnls_wc[0]);
1492 unregister_netdevice_queue(t->dev, &list);
1493 unregister_netdevice_many(&list);
1496 static int __net_init ip6_tnl_init_net(struct net *net)
1498 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1499 struct ip6_tnl *t = NULL;
1502 ip6n->tnls[0] = ip6n->tnls_wc;
1503 ip6n->tnls[1] = ip6n->tnls_r_l;
1506 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
1509 if (!ip6n->fb_tnl_dev)
1511 dev_net_set(ip6n->fb_tnl_dev, net);
1513 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
1517 err = register_netdev(ip6n->fb_tnl_dev);
1521 t = netdev_priv(ip6n->fb_tnl_dev);
1523 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
1527 ip6_dev_free(ip6n->fb_tnl_dev);
1532 static void __net_exit ip6_tnl_exit_net(struct net *net)
1534 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1537 ip6_tnl_destroy_tunnels(ip6n);
1541 static struct pernet_operations ip6_tnl_net_ops = {
1542 .init = ip6_tnl_init_net,
1543 .exit = ip6_tnl_exit_net,
1544 .id = &ip6_tnl_net_id,
1545 .size = sizeof(struct ip6_tnl_net),
1549 * ip6_tunnel_init - register protocol and reserve needed resources
1551 * Return: 0 on success
1554 static int __init ip6_tunnel_init(void)
1558 err = register_pernet_device(&ip6_tnl_net_ops);
1562 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
1564 pr_err("%s: can't register ip4ip6\n", __func__);
1568 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
1570 pr_err("%s: can't register ip6ip6\n", __func__);
1577 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1579 unregister_pernet_device(&ip6_tnl_net_ops);
1585 * ip6_tunnel_cleanup - free resources and unregister protocol
1588 static void __exit ip6_tunnel_cleanup(void)
1590 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
1591 pr_info("%s: can't deregister ip4ip6\n", __func__);
1593 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1594 pr_info("%s: can't deregister ip6ip6\n", __func__);
1596 unregister_pernet_device(&ip6_tnl_net_ops);
1599 module_init(ip6_tunnel_init);
1600 module_exit(ip6_tunnel_cleanup);