2 * IPv6 tunneling device
3 * Linux INET6 implementation
10 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/module.h>
24 #include <linux/capability.h>
25 #include <linux/errno.h>
26 #include <linux/types.h>
27 #include <linux/sockios.h>
28 #include <linux/icmp.h>
32 #include <linux/net.h>
33 #include <linux/in6.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/init.h>
38 #include <linux/route.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/netfilter_ipv6.h>
41 #include <linux/slab.h>
42 #include <linux/hash.h>
43 #include <linux/etherdevice.h>
45 #include <linux/uaccess.h>
46 #include <linux/atomic.h>
50 #include <net/ip_tunnels.h>
52 #include <net/ip6_route.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_tunnel.h>
56 #include <net/dsfield.h>
57 #include <net/inet_ecn.h>
58 #include <net/net_namespace.h>
59 #include <net/netns/generic.h>
60 #include <net/dst_metadata.h>
62 MODULE_AUTHOR("Ville Nuorvala");
63 MODULE_DESCRIPTION("IPv6 tunneling device");
64 MODULE_LICENSE("GPL");
65 MODULE_ALIAS_RTNL_LINK("ip6tnl");
66 MODULE_ALIAS_NETDEV("ip6tnl0");
68 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
69 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
71 static bool log_ecn_error = true;
72 module_param(log_ecn_error, bool, 0644);
73 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
75 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
77 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
79 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
82 static int ip6_tnl_dev_init(struct net_device *dev);
83 static void ip6_tnl_dev_setup(struct net_device *dev);
84 static struct rtnl_link_ops ip6_link_ops __read_mostly;
86 static unsigned int ip6_tnl_net_id __read_mostly;
88 /* the IPv6 tunnel fallback device */
89 struct net_device *fb_tnl_dev;
90 /* lists for storing tunnels in use */
91 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
92 struct ip6_tnl __rcu *tnls_wc[1];
93 struct ip6_tnl __rcu **tnls[2];
94 struct ip6_tnl __rcu *collect_md_tun;
97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
99 struct pcpu_sw_netstats tmp, sum = { 0 };
102 for_each_possible_cpu(i) {
104 const struct pcpu_sw_netstats *tstats =
105 per_cpu_ptr(dev->tstats, i);
108 start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 tmp.rx_packets = tstats->rx_packets;
110 tmp.rx_bytes = tstats->rx_bytes;
111 tmp.tx_packets = tstats->tx_packets;
112 tmp.tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
115 sum.rx_packets += tmp.rx_packets;
116 sum.rx_bytes += tmp.rx_bytes;
117 sum.tx_packets += tmp.tx_packets;
118 sum.tx_bytes += tmp.tx_bytes;
120 dev->stats.rx_packets = sum.rx_packets;
121 dev->stats.rx_bytes = sum.rx_bytes;
122 dev->stats.tx_packets = sum.tx_packets;
123 dev->stats.tx_bytes = sum.tx_bytes;
128 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
129 * @remote: the address of the tunnel exit-point
130 * @local: the address of the tunnel entry-point
133 * tunnel matching given end-points if found,
134 * else fallback tunnel if its device is up,
138 #define for_each_ip6_tunnel_rcu(start) \
139 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
141 static struct ip6_tnl *
142 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
144 unsigned int hash = HASH(remote, local);
146 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
149 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
150 if (ipv6_addr_equal(local, &t->parms.laddr) &&
151 ipv6_addr_equal(remote, &t->parms.raddr) &&
152 (t->dev->flags & IFF_UP))
156 memset(&any, 0, sizeof(any));
157 hash = HASH(&any, local);
158 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
159 if (ipv6_addr_equal(local, &t->parms.laddr) &&
160 ipv6_addr_any(&t->parms.raddr) &&
161 (t->dev->flags & IFF_UP))
165 hash = HASH(remote, &any);
166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 if (ipv6_addr_equal(remote, &t->parms.raddr) &&
168 ipv6_addr_any(&t->parms.laddr) &&
169 (t->dev->flags & IFF_UP))
173 t = rcu_dereference(ip6n->collect_md_tun);
174 if (t && t->dev->flags & IFF_UP)
177 t = rcu_dereference(ip6n->tnls_wc[0]);
178 if (t && (t->dev->flags & IFF_UP))
185 * ip6_tnl_bucket - get head of list matching given tunnel parameters
186 * @p: parameters containing tunnel end-points
189 * ip6_tnl_bucket() returns the head of the list matching the
190 * &struct in6_addr entries laddr and raddr in @p.
192 * Return: head of IPv6 tunnel list
195 static struct ip6_tnl __rcu **
196 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
198 const struct in6_addr *remote = &p->raddr;
199 const struct in6_addr *local = &p->laddr;
203 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
205 h = HASH(remote, local);
207 return &ip6n->tnls[prio][h];
211 * ip6_tnl_link - add tunnel to hash table
212 * @t: tunnel to be added
216 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
218 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
220 if (t->parms.collect_md)
221 rcu_assign_pointer(ip6n->collect_md_tun, t);
222 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
223 rcu_assign_pointer(*tp, t);
227 * ip6_tnl_unlink - remove tunnel from hash table
228 * @t: tunnel to be removed
232 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
234 struct ip6_tnl __rcu **tp;
235 struct ip6_tnl *iter;
237 if (t->parms.collect_md)
238 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
240 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
241 (iter = rtnl_dereference(*tp)) != NULL;
244 rcu_assign_pointer(*tp, t->next);
250 static void ip6_dev_free(struct net_device *dev)
252 struct ip6_tnl *t = netdev_priv(dev);
254 gro_cells_destroy(&t->gro_cells);
255 dst_cache_destroy(&t->dst_cache);
256 free_percpu(dev->tstats);
259 static int ip6_tnl_create2(struct net_device *dev)
261 struct ip6_tnl *t = netdev_priv(dev);
262 struct net *net = dev_net(dev);
263 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
266 t = netdev_priv(dev);
268 dev->rtnl_link_ops = &ip6_link_ops;
269 err = register_netdevice(dev);
273 strcpy(t->parms.name, dev->name);
276 ip6_tnl_link(ip6n, t);
284 * ip6_tnl_create - create a new tunnel
285 * @p: tunnel parameters
286 * @pt: pointer to new tunnel
289 * Create tunnel matching given parameters.
292 * created tunnel or error pointer
295 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
297 struct net_device *dev;
303 strlcpy(name, p->name, IFNAMSIZ);
305 sprintf(name, "ip6tnl%%d");
307 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
312 dev_net_set(dev, net);
314 t = netdev_priv(dev);
316 t->net = dev_net(dev);
317 err = ip6_tnl_create2(dev);
330 * ip6_tnl_locate - find or create tunnel matching given parameters
331 * @p: tunnel parameters
332 * @create: != 0 if allowed to create new tunnel if no match found
335 * ip6_tnl_locate() first tries to locate an existing tunnel
336 * based on @parms. If this is unsuccessful, but @create is set a new
337 * tunnel device is created and registered for use.
340 * matching tunnel or error pointer
343 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
344 struct __ip6_tnl_parm *p, int create)
346 const struct in6_addr *remote = &p->raddr;
347 const struct in6_addr *local = &p->laddr;
348 struct ip6_tnl __rcu **tp;
350 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
352 for (tp = ip6_tnl_bucket(ip6n, p);
353 (t = rtnl_dereference(*tp)) != NULL;
355 if (ipv6_addr_equal(local, &t->parms.laddr) &&
356 ipv6_addr_equal(remote, &t->parms.raddr)) {
358 return ERR_PTR(-EEXIST);
364 return ERR_PTR(-ENODEV);
365 return ip6_tnl_create(net, p);
369 * ip6_tnl_dev_uninit - tunnel device uninitializer
370 * @dev: the device to be destroyed
373 * ip6_tnl_dev_uninit() removes tunnel from its list
377 ip6_tnl_dev_uninit(struct net_device *dev)
379 struct ip6_tnl *t = netdev_priv(dev);
380 struct net *net = t->net;
381 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
383 if (dev == ip6n->fb_tnl_dev)
384 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
386 ip6_tnl_unlink(ip6n, t);
387 dst_cache_reset(&t->dst_cache);
392 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
393 * @skb: received socket buffer
396 * 0 if none was found,
397 * else index to encapsulation limit
400 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
402 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
403 unsigned int nhoff = raw - skb->data;
404 unsigned int off = nhoff + sizeof(*ipv6h);
405 u8 next, nexthdr = ipv6h->nexthdr;
407 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
408 struct ipv6_opt_hdr *hdr;
411 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
414 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
415 if (nexthdr == NEXTHDR_FRAGMENT) {
416 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
417 if (frag_hdr->frag_off)
420 } else if (nexthdr == NEXTHDR_AUTH) {
421 optlen = (hdr->hdrlen + 2) << 2;
423 optlen = ipv6_optlen(hdr);
425 /* cache hdr->nexthdr, since pskb_may_pull() might
429 if (nexthdr == NEXTHDR_DEST) {
432 /* Remember : hdr is no longer valid at this point. */
433 if (!pskb_may_pull(skb, off + optlen))
437 struct ipv6_tlv_tnl_enc_lim *tel;
439 /* No more room for encapsulation limit */
440 if (i + sizeof(*tel) > optlen)
443 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
444 /* return index of option if found and valid */
445 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
447 return i + off - nhoff;
448 /* else jump to next option */
450 i += tel->length + 2;
460 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
463 * ip6_tnl_err - tunnel error handler
466 * ip6_tnl_err() should handle errors in the tunnel according
467 * to the specifications in RFC 2473.
471 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
472 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
474 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
477 u8 rel_type = ICMPV6_DEST_UNREACH;
478 u8 rel_code = ICMPV6_ADDR_UNREACH;
484 /* If the packet doesn't contain the original IPv6 header we are
485 in trouble since we might need the source address for further
486 processing of the error. */
489 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
493 tproto = ACCESS_ONCE(t->parms.proto);
494 if (tproto != ipproto && tproto != 0)
501 struct ipv6_tlv_tnl_enc_lim *tel;
503 case ICMPV6_DEST_UNREACH:
504 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
508 case ICMPV6_TIME_EXCEED:
509 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
510 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
515 case ICMPV6_PARAMPROB:
517 if ((*code) == ICMPV6_HDR_FIELD)
518 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
520 if (teli && teli == *info - 2) {
521 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
522 if (tel->encap_limit == 0) {
523 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
528 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
532 case ICMPV6_PKT_TOOBIG:
533 mtu = *info - offset;
534 if (mtu < IPV6_MIN_MTU)
538 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
540 rel_type = ICMPV6_PKT_TOOBIG;
559 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
560 u8 type, u8 code, int offset, __be32 info)
565 __u32 rel_info = ntohl(info);
567 struct sk_buff *skb2;
568 const struct iphdr *eiph;
572 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
573 &rel_msg, &rel_info, offset);
581 case ICMPV6_DEST_UNREACH:
582 if (rel_code != ICMPV6_ADDR_UNREACH)
584 rel_type = ICMP_DEST_UNREACH;
585 rel_code = ICMP_HOST_UNREACH;
587 case ICMPV6_PKT_TOOBIG:
590 rel_type = ICMP_DEST_UNREACH;
591 rel_code = ICMP_FRAG_NEEDED;
594 rel_type = ICMP_REDIRECT;
595 rel_code = ICMP_REDIR_HOST;
600 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
603 skb2 = skb_clone(skb, GFP_ATOMIC);
609 skb_pull(skb2, offset);
610 skb_reset_network_header(skb2);
613 /* Try to guess incoming interface */
614 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
617 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
621 skb2->dev = rt->dst.dev;
623 /* route "incoming" packet */
624 if (rt->rt_flags & RTCF_LOCAL) {
627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
628 eiph->daddr, eiph->saddr,
631 RT_TOS(eiph->tos), 0);
633 rt->dst.dev->type != ARPHRD_TUNNEL) {
638 skb_dst_set(skb2, &rt->dst);
641 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
643 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
647 /* change mtu on this route */
648 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
649 if (rel_info > dst_mtu(skb_dst(skb2)))
652 skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
654 if (rel_type == ICMP_REDIRECT)
655 skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
657 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
665 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
666 u8 type, u8 code, int offset, __be32 info)
671 __u32 rel_info = ntohl(info);
674 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
675 &rel_msg, &rel_info, offset);
679 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
681 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
687 skb_pull(skb2, offset);
688 skb_reset_network_header(skb2);
690 /* Try to guess incoming interface */
691 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
694 if (rt && rt->dst.dev)
695 skb2->dev = rt->dst.dev;
697 icmpv6_send(skb2, rel_type, rel_code, rel_info);
707 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
708 const struct ipv6hdr *ipv6h,
711 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
713 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
714 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
716 return IP6_ECN_decapsulate(ipv6h, skb);
719 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
720 const struct ipv6hdr *ipv6h,
723 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
724 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
726 return IP6_ECN_decapsulate(ipv6h, skb);
729 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
730 const struct in6_addr *laddr,
731 const struct in6_addr *raddr)
733 struct __ip6_tnl_parm *p = &t->parms;
734 int ltype = ipv6_addr_type(laddr);
735 int rtype = ipv6_addr_type(raddr);
738 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
739 flags = IP6_TNL_F_CAP_PER_PACKET;
740 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
741 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
742 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
743 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
744 if (ltype&IPV6_ADDR_UNICAST)
745 flags |= IP6_TNL_F_CAP_XMIT;
746 if (rtype&IPV6_ADDR_UNICAST)
747 flags |= IP6_TNL_F_CAP_RCV;
751 EXPORT_SYMBOL(ip6_tnl_get_cap);
753 /* called with rcu_read_lock() */
754 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
755 const struct in6_addr *laddr,
756 const struct in6_addr *raddr)
758 struct __ip6_tnl_parm *p = &t->parms;
760 struct net *net = t->net;
762 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
763 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
764 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
765 struct net_device *ldev = NULL;
768 ldev = dev_get_by_index_rcu(net, p->link);
770 if ((ipv6_addr_is_multicast(laddr) ||
771 likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
772 likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
777 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
779 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
780 const struct tnl_ptk_info *tpi,
781 struct metadata_dst *tun_dst,
782 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
783 const struct ipv6hdr *ipv6h,
784 struct sk_buff *skb),
787 struct pcpu_sw_netstats *tstats;
788 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
791 if ((!(tpi->flags & TUNNEL_CSUM) &&
792 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
793 ((tpi->flags & TUNNEL_CSUM) &&
794 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
795 tunnel->dev->stats.rx_crc_errors++;
796 tunnel->dev->stats.rx_errors++;
800 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
801 if (!(tpi->flags & TUNNEL_SEQ) ||
803 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
804 tunnel->dev->stats.rx_fifo_errors++;
805 tunnel->dev->stats.rx_errors++;
808 tunnel->i_seqno = ntohl(tpi->seq) + 1;
811 skb->protocol = tpi->proto;
813 /* Warning: All skb pointers will be invalidated! */
814 if (tunnel->dev->type == ARPHRD_ETHER) {
815 if (!pskb_may_pull(skb, ETH_HLEN)) {
816 tunnel->dev->stats.rx_length_errors++;
817 tunnel->dev->stats.rx_errors++;
821 ipv6h = ipv6_hdr(skb);
822 skb->protocol = eth_type_trans(skb, tunnel->dev);
823 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
825 skb->dev = tunnel->dev;
828 skb_reset_network_header(skb);
829 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
831 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
833 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
836 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
838 ipv6_get_dsfield(ipv6h));
840 ++tunnel->dev->stats.rx_frame_errors;
841 ++tunnel->dev->stats.rx_errors;
846 tstats = this_cpu_ptr(tunnel->dev->tstats);
847 u64_stats_update_begin(&tstats->syncp);
848 tstats->rx_packets++;
849 tstats->rx_bytes += skb->len;
850 u64_stats_update_end(&tstats->syncp);
852 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
855 skb_dst_set(skb, (struct dst_entry *)tun_dst);
857 gro_cells_receive(&tunnel->gro_cells, skb);
862 dst_release((struct dst_entry *)tun_dst);
867 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
868 const struct tnl_ptk_info *tpi,
869 struct metadata_dst *tun_dst,
872 return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
875 EXPORT_SYMBOL(ip6_tnl_rcv);
877 static const struct tnl_ptk_info tpi_v6 = {
878 /* no tunnel info required for ipxip6. */
879 .proto = htons(ETH_P_IPV6),
882 static const struct tnl_ptk_info tpi_v4 = {
883 /* no tunnel info required for ipxip6. */
884 .proto = htons(ETH_P_IP),
887 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
888 const struct tnl_ptk_info *tpi,
889 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
890 const struct ipv6hdr *ipv6h,
891 struct sk_buff *skb))
894 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
895 struct metadata_dst *tun_dst = NULL;
899 t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
902 u8 tproto = ACCESS_ONCE(t->parms.proto);
904 if (tproto != ipproto && tproto != 0)
906 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
908 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
910 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
912 if (t->parms.collect_md) {
913 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
917 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
931 static int ip4ip6_rcv(struct sk_buff *skb)
933 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
934 ip4ip6_dscp_ecn_decapsulate);
937 static int ip6ip6_rcv(struct sk_buff *skb)
939 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
940 ip6ip6_dscp_ecn_decapsulate);
943 struct ipv6_tel_txoption {
944 struct ipv6_txoptions ops;
948 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
950 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
952 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
954 opt->dst_opt[4] = encap_limit;
955 opt->dst_opt[5] = IPV6_TLV_PADN;
958 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
959 opt->ops.opt_nflen = 8;
963 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
964 * @t: the outgoing tunnel device
965 * @hdr: IPv6 header from the incoming packet
968 * Avoid trivial tunneling loop by checking that tunnel exit-point
969 * doesn't match source of incoming packet.
977 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
979 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
982 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
983 const struct in6_addr *laddr,
984 const struct in6_addr *raddr)
986 struct __ip6_tnl_parm *p = &t->parms;
988 struct net *net = t->net;
990 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
991 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
992 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
993 struct net_device *ldev = NULL;
997 ldev = dev_get_by_index_rcu(net, p->link);
999 if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0)))
1000 pr_warn("%s xmit: Local address not yet configured!\n",
1002 else if (!ipv6_addr_is_multicast(raddr) &&
1003 unlikely(ipv6_chk_addr(net, raddr, NULL, 0)))
1004 pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1012 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1015 * ip6_tnl_xmit - encapsulate packet and send
1016 * @skb: the outgoing socket buffer
1017 * @dev: the outgoing tunnel device
1018 * @dsfield: dscp code for outer header
1019 * @fl6: flow of tunneled packet
1020 * @encap_limit: encapsulation limit
1021 * @pmtu: Path MTU is stored if packet is too big
1022 * @proto: next header value
1025 * Build new header and do some sanity checks on the packet before sending
1031 * %-EMSGSIZE message too big. return mtu in this case.
1034 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1035 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1038 struct ip6_tnl *t = netdev_priv(dev);
1039 struct net *net = t->net;
1040 struct net_device_stats *stats = &t->dev->stats;
1041 struct ipv6hdr *ipv6h;
1042 struct ipv6_tel_txoption opt;
1043 struct dst_entry *dst = NULL, *ndst = NULL;
1044 struct net_device *tdev;
1046 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1047 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1048 unsigned int max_headroom = psh_hlen;
1049 bool use_cache = false;
1053 if (t->parms.collect_md) {
1054 hop_limit = skb_tunnel_info(skb)->key.ttl;
1057 hop_limit = t->parms.hop_limit;
1061 if (ipv6_addr_any(&t->parms.raddr)) {
1062 if (skb->protocol == htons(ETH_P_IPV6)) {
1063 struct in6_addr *addr6;
1064 struct neighbour *neigh;
1068 goto tx_err_link_failure;
1070 neigh = dst_neigh_lookup(skb_dst(skb),
1071 &ipv6_hdr(skb)->daddr);
1073 goto tx_err_link_failure;
1075 addr6 = (struct in6_addr *)&neigh->primary_key;
1076 addr_type = ipv6_addr_type(addr6);
1078 if (addr_type == IPV6_ADDR_ANY)
1079 addr6 = &ipv6_hdr(skb)->daddr;
1081 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1082 neigh_release(neigh);
1084 } else if (!(t->parms.flags &
1085 (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
1086 /* enable the cache only only if the routing decision does
1087 * not depend on the current inner header value
1093 dst = dst_cache_get(&t->dst_cache);
1095 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1096 goto tx_err_link_failure;
1100 /* add dsfield to flowlabel for route lookup */
1101 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1103 dst = ip6_route_output(net, NULL, fl6);
1106 goto tx_err_link_failure;
1107 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1111 goto tx_err_link_failure;
1113 if (t->parms.collect_md &&
1114 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1115 &fl6->daddr, 0, &fl6->saddr))
1116 goto tx_err_link_failure;
1123 stats->collisions++;
1124 net_warn_ratelimited("%s: Local routing loop detected!\n",
1126 goto tx_err_dst_release;
1128 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1129 if (encap_limit >= 0) {
1133 if (mtu < IPV6_MIN_MTU)
1135 if (skb_dst(skb) && !t->parms.collect_md)
1136 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1137 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1140 goto tx_err_dst_release;
1143 if (t->err_count > 0) {
1144 if (time_before(jiffies,
1145 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1148 dst_link_failure(skb);
1154 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1157 * Okay, now see if we can stuff it in the buffer as-is.
1159 max_headroom += LL_RESERVED_SPACE(tdev);
1161 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1162 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1163 struct sk_buff *new_skb;
1165 new_skb = skb_realloc_headroom(skb, max_headroom);
1167 goto tx_err_dst_release;
1170 skb_set_owner_w(new_skb, skb->sk);
1175 if (t->parms.collect_md) {
1176 if (t->encap.type != TUNNEL_ENCAP_NONE)
1177 goto tx_err_dst_release;
1179 if (use_cache && ndst)
1180 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1182 skb_dst_set(skb, dst);
1184 if (encap_limit >= 0) {
1185 init_tel_txopt(&opt, encap_limit);
1186 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1188 hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
1190 /* Calculate max headroom for all the headers and adjust
1191 * needed_headroom if necessary.
1193 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1194 + dst->header_len + t->hlen;
1195 if (max_headroom > dev->needed_headroom)
1196 dev->needed_headroom = max_headroom;
1198 err = ip6_tnl_encap(skb, t, &proto, fl6);
1202 skb_push(skb, sizeof(struct ipv6hdr));
1203 skb_reset_network_header(skb);
1204 ipv6h = ipv6_hdr(skb);
1205 ip6_flow_hdr(ipv6h, dsfield,
1206 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1207 ipv6h->hop_limit = hop_limit;
1208 ipv6h->nexthdr = proto;
1209 ipv6h->saddr = fl6->saddr;
1210 ipv6h->daddr = fl6->daddr;
1211 ip6tunnel_xmit(NULL, skb, dev);
1213 tx_err_link_failure:
1214 stats->tx_carrier_errors++;
1215 dst_link_failure(skb);
1220 EXPORT_SYMBOL(ip6_tnl_xmit);
1223 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1225 struct ip6_tnl *t = netdev_priv(dev);
1226 const struct iphdr *iph = ip_hdr(skb);
1227 int encap_limit = -1;
1234 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1236 tproto = ACCESS_ONCE(t->parms.proto);
1237 if (tproto != IPPROTO_IPIP && tproto != 0)
1240 if (t->parms.collect_md) {
1241 struct ip_tunnel_info *tun_info;
1242 const struct ip_tunnel_key *key;
1244 tun_info = skb_tunnel_info(skb);
1245 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1246 ip_tunnel_info_af(tun_info) != AF_INET6))
1248 key = &tun_info->key;
1249 memset(&fl6, 0, sizeof(fl6));
1250 fl6.flowi6_proto = IPPROTO_IPIP;
1251 fl6.daddr = key->u.ipv6.dst;
1252 fl6.flowlabel = key->label;
1255 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1256 encap_limit = t->parms.encap_limit;
1258 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1259 fl6.flowi6_proto = IPPROTO_IPIP;
1261 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1262 dsfield = ipv4_get_dsfield(iph);
1264 dsfield = ip6_tclass(t->parms.flowinfo);
1265 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1266 fl6.flowi6_mark = skb->mark;
1268 fl6.flowi6_mark = t->parms.fwmark;
1271 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1273 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1276 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1278 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1280 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1283 /* XXX: send ICMP error even if DF is not set. */
1284 if (err == -EMSGSIZE)
1285 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1294 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1296 struct ip6_tnl *t = netdev_priv(dev);
1297 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
1298 int encap_limit = -1;
1306 tproto = ACCESS_ONCE(t->parms.proto);
1307 if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1308 ip6_tnl_addr_conflict(t, ipv6h))
1311 if (t->parms.collect_md) {
1312 struct ip_tunnel_info *tun_info;
1313 const struct ip_tunnel_key *key;
1315 tun_info = skb_tunnel_info(skb);
1316 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1317 ip_tunnel_info_af(tun_info) != AF_INET6))
1319 key = &tun_info->key;
1320 memset(&fl6, 0, sizeof(fl6));
1321 fl6.flowi6_proto = IPPROTO_IPV6;
1322 fl6.daddr = key->u.ipv6.dst;
1323 fl6.flowlabel = key->label;
1326 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1327 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1328 ipv6h = ipv6_hdr(skb);
1330 struct ipv6_tlv_tnl_enc_lim *tel;
1332 tel = (void *)&skb_network_header(skb)[offset];
1333 if (tel->encap_limit == 0) {
1334 icmpv6_send(skb, ICMPV6_PARAMPROB,
1335 ICMPV6_HDR_FIELD, offset + 2);
1338 encap_limit = tel->encap_limit - 1;
1339 } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1340 encap_limit = t->parms.encap_limit;
1343 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1344 fl6.flowi6_proto = IPPROTO_IPV6;
1346 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1347 dsfield = ipv6_get_dsfield(ipv6h);
1349 dsfield = ip6_tclass(t->parms.flowinfo);
1350 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1351 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1352 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1353 fl6.flowi6_mark = skb->mark;
1355 fl6.flowi6_mark = t->parms.fwmark;
1358 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1360 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1363 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1365 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1367 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1370 if (err == -EMSGSIZE)
1371 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1379 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1381 struct ip6_tnl *t = netdev_priv(dev);
1382 struct net_device_stats *stats = &t->dev->stats;
1385 switch (skb->protocol) {
1386 case htons(ETH_P_IP):
1387 ret = ip4ip6_tnl_xmit(skb, dev);
1389 case htons(ETH_P_IPV6):
1390 ret = ip6ip6_tnl_xmit(skb, dev);
1399 return NETDEV_TX_OK;
1403 stats->tx_dropped++;
1405 return NETDEV_TX_OK;
1408 static void ip6_tnl_link_config(struct ip6_tnl *t)
1410 struct net_device *dev = t->dev;
1411 struct __ip6_tnl_parm *p = &t->parms;
1412 struct flowi6 *fl6 = &t->fl.u.ip6;
1415 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1416 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1418 /* Set up flowi template */
1419 fl6->saddr = p->laddr;
1420 fl6->daddr = p->raddr;
1421 fl6->flowi6_oif = p->link;
1424 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1425 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1426 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1427 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1429 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1430 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1432 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1433 dev->flags |= IFF_POINTOPOINT;
1435 dev->flags &= ~IFF_POINTOPOINT;
1438 t->hlen = t->encap_hlen + t->tun_hlen;
1439 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1441 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1442 int strict = (ipv6_addr_type(&p->raddr) &
1443 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1445 struct rt6_info *rt = rt6_lookup(t->net,
1446 &p->raddr, &p->laddr,
1453 dev->hard_header_len = rt->dst.dev->hard_header_len +
1456 dev->mtu = rt->dst.dev->mtu - t_hlen;
1457 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1460 if (dev->mtu < IPV6_MIN_MTU)
1461 dev->mtu = IPV6_MIN_MTU;
1468 * ip6_tnl_change - update the tunnel parameters
1469 * @t: tunnel to be changed
1470 * @p: tunnel configuration parameters
1473 * ip6_tnl_change() updates the tunnel parameters
1477 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1479 t->parms.laddr = p->laddr;
1480 t->parms.raddr = p->raddr;
1481 t->parms.flags = p->flags;
1482 t->parms.hop_limit = p->hop_limit;
1483 t->parms.encap_limit = p->encap_limit;
1484 t->parms.flowinfo = p->flowinfo;
1485 t->parms.link = p->link;
1486 t->parms.proto = p->proto;
1487 t->parms.fwmark = p->fwmark;
1488 dst_cache_reset(&t->dst_cache);
1489 ip6_tnl_link_config(t);
1493 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1495 struct net *net = t->net;
1496 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1499 ip6_tnl_unlink(ip6n, t);
1501 err = ip6_tnl_change(t, p);
1502 ip6_tnl_link(ip6n, t);
1503 netdev_state_change(t->dev);
1507 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1509 /* for default tnl0 device allow to change only the proto */
1510 t->parms.proto = p->proto;
1511 netdev_state_change(t->dev);
1516 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1518 p->laddr = u->laddr;
1519 p->raddr = u->raddr;
1520 p->flags = u->flags;
1521 p->hop_limit = u->hop_limit;
1522 p->encap_limit = u->encap_limit;
1523 p->flowinfo = u->flowinfo;
1525 p->proto = u->proto;
1526 memcpy(p->name, u->name, sizeof(u->name));
1530 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1532 u->laddr = p->laddr;
1533 u->raddr = p->raddr;
1534 u->flags = p->flags;
1535 u->hop_limit = p->hop_limit;
1536 u->encap_limit = p->encap_limit;
1537 u->flowinfo = p->flowinfo;
1539 u->proto = p->proto;
1540 memcpy(u->name, p->name, sizeof(u->name));
1544 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1545 * @dev: virtual device associated with tunnel
1546 * @ifr: parameters passed from userspace
1547 * @cmd: command to be performed
1550 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1553 * The possible commands are the following:
1554 * %SIOCGETTUNNEL: get tunnel parameters for device
1555 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1556 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1557 * %SIOCDELTUNNEL: delete tunnel
1559 * The fallback device "ip6tnl0", created during module
1560 * initialization, can be used for creating other tunnel devices.
1564 * %-EFAULT if unable to copy data to or from userspace,
1565 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1566 * %-EINVAL if passed tunnel parameters are invalid,
1567 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1568 * %-ENODEV if attempting to change or delete a nonexisting device
1572 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1575 struct ip6_tnl_parm p;
1576 struct __ip6_tnl_parm p1;
1577 struct ip6_tnl *t = netdev_priv(dev);
1578 struct net *net = t->net;
1579 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1581 memset(&p1, 0, sizeof(p1));
1585 if (dev == ip6n->fb_tnl_dev) {
1586 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1590 ip6_tnl_parm_from_user(&p1, &p);
1591 t = ip6_tnl_locate(net, &p1, 0);
1593 t = netdev_priv(dev);
1595 memset(&p, 0, sizeof(p));
1597 ip6_tnl_parm_to_user(&p, &t->parms);
1598 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1605 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1608 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1611 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1614 ip6_tnl_parm_from_user(&p1, &p);
1615 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1616 if (cmd == SIOCCHGTUNNEL) {
1618 if (t->dev != dev) {
1623 t = netdev_priv(dev);
1624 if (dev == ip6n->fb_tnl_dev)
1625 err = ip6_tnl0_update(t, &p1);
1627 err = ip6_tnl_update(t, &p1);
1631 ip6_tnl_parm_to_user(&p, &t->parms);
1632 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1641 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1644 if (dev == ip6n->fb_tnl_dev) {
1646 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1649 ip6_tnl_parm_from_user(&p1, &p);
1650 t = ip6_tnl_locate(net, &p1, 0);
1654 if (t->dev == ip6n->fb_tnl_dev)
1659 unregister_netdevice(dev);
1668 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1669 * @dev: virtual device associated with tunnel
1670 * @new_mtu: the new mtu
1674 * %-EINVAL if mtu too small
1677 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1679 struct ip6_tnl *tnl = netdev_priv(dev);
1681 if (tnl->parms.proto == IPPROTO_IPIP) {
1682 if (new_mtu < ETH_MIN_MTU)
1685 if (new_mtu < IPV6_MIN_MTU)
1688 if (new_mtu > 0xFFF8 - dev->hard_header_len)
1693 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1695 int ip6_tnl_get_iflink(const struct net_device *dev)
1697 struct ip6_tnl *t = netdev_priv(dev);
1699 return t->parms.link;
1701 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1703 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1706 if (num >= MAX_IPTUN_ENCAP_OPS)
1709 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1710 &ip6tun_encaps[num],
1711 NULL, ops) ? 0 : -1;
1713 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1715 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1720 if (num >= MAX_IPTUN_ENCAP_OPS)
1723 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1724 &ip6tun_encaps[num],
1725 ops, NULL) == ops) ? 0 : -1;
1731 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1733 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1734 struct ip_tunnel_encap *ipencap)
1738 memset(&t->encap, 0, sizeof(t->encap));
1740 hlen = ip6_encap_hlen(ipencap);
1744 t->encap.type = ipencap->type;
1745 t->encap.sport = ipencap->sport;
1746 t->encap.dport = ipencap->dport;
1747 t->encap.flags = ipencap->flags;
1749 t->encap_hlen = hlen;
1750 t->hlen = t->encap_hlen + t->tun_hlen;
1754 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1756 static const struct net_device_ops ip6_tnl_netdev_ops = {
1757 .ndo_init = ip6_tnl_dev_init,
1758 .ndo_uninit = ip6_tnl_dev_uninit,
1759 .ndo_start_xmit = ip6_tnl_start_xmit,
1760 .ndo_do_ioctl = ip6_tnl_ioctl,
1761 .ndo_change_mtu = ip6_tnl_change_mtu,
1762 .ndo_get_stats = ip6_get_stats,
1763 .ndo_get_iflink = ip6_tnl_get_iflink,
1766 #define IPXIPX_FEATURES (NETIF_F_SG | \
1767 NETIF_F_FRAGLIST | \
1769 NETIF_F_GSO_SOFTWARE | \
1773 * ip6_tnl_dev_setup - setup virtual tunnel device
1774 * @dev: virtual device associated with tunnel
1777 * Initialize function pointers and device parameters
1780 static void ip6_tnl_dev_setup(struct net_device *dev)
1782 dev->netdev_ops = &ip6_tnl_netdev_ops;
1783 dev->needs_free_netdev = true;
1784 dev->priv_destructor = ip6_dev_free;
1786 dev->type = ARPHRD_TUNNEL6;
1787 dev->flags |= IFF_NOARP;
1788 dev->addr_len = sizeof(struct in6_addr);
1789 dev->features |= NETIF_F_LLTX;
1790 netif_keep_dst(dev);
1792 dev->features |= IPXIPX_FEATURES;
1793 dev->hw_features |= IPXIPX_FEATURES;
1795 /* This perm addr will be used as interface identifier by IPv6 */
1796 dev->addr_assign_type = NET_ADDR_RANDOM;
1797 eth_random_addr(dev->perm_addr);
1802 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1803 * @dev: virtual device associated with tunnel
1807 ip6_tnl_dev_init_gen(struct net_device *dev)
1809 struct ip6_tnl *t = netdev_priv(dev);
1814 t->net = dev_net(dev);
1815 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1819 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1823 ret = gro_cells_init(&t->gro_cells, dev);
1828 t->hlen = t->encap_hlen + t->tun_hlen;
1829 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1831 dev->type = ARPHRD_TUNNEL6;
1832 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1833 dev->mtu = ETH_DATA_LEN - t_hlen;
1834 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1836 dev->min_mtu = ETH_MIN_MTU;
1837 dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1842 dst_cache_destroy(&t->dst_cache);
1844 free_percpu(dev->tstats);
1851 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1852 * @dev: virtual device associated with tunnel
1855 static int ip6_tnl_dev_init(struct net_device *dev)
1857 struct ip6_tnl *t = netdev_priv(dev);
1858 int err = ip6_tnl_dev_init_gen(dev);
1862 ip6_tnl_link_config(t);
1863 if (t->parms.collect_md) {
1864 dev->features |= NETIF_F_NETNS_LOCAL;
1865 netif_keep_dst(dev);
1871 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1872 * @dev: fallback device
1877 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1879 struct ip6_tnl *t = netdev_priv(dev);
1880 struct net *net = dev_net(dev);
1881 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1883 t->parms.proto = IPPROTO_IPV6;
1886 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1890 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1891 struct netlink_ext_ack *extack)
1895 if (!data || !data[IFLA_IPTUN_PROTO])
1898 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1899 if (proto != IPPROTO_IPV6 &&
1900 proto != IPPROTO_IPIP &&
1907 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1908 struct __ip6_tnl_parm *parms)
1910 memset(parms, 0, sizeof(*parms));
1915 if (data[IFLA_IPTUN_LINK])
1916 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1918 if (data[IFLA_IPTUN_LOCAL])
1919 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1921 if (data[IFLA_IPTUN_REMOTE])
1922 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1924 if (data[IFLA_IPTUN_TTL])
1925 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1927 if (data[IFLA_IPTUN_ENCAP_LIMIT])
1928 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1930 if (data[IFLA_IPTUN_FLOWINFO])
1931 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1933 if (data[IFLA_IPTUN_FLAGS])
1934 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1936 if (data[IFLA_IPTUN_PROTO])
1937 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1939 if (data[IFLA_IPTUN_COLLECT_METADATA])
1940 parms->collect_md = true;
1942 if (data[IFLA_IPTUN_FWMARK])
1943 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1946 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1947 struct ip_tunnel_encap *ipencap)
1951 memset(ipencap, 0, sizeof(*ipencap));
1956 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1958 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1961 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1963 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1966 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1968 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1971 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1973 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1979 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1980 struct nlattr *tb[], struct nlattr *data[],
1981 struct netlink_ext_ack *extack)
1983 struct net *net = dev_net(dev);
1984 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1985 struct ip6_tnl *nt, *t;
1986 struct ip_tunnel_encap ipencap;
1988 nt = netdev_priv(dev);
1990 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
1991 int err = ip6_tnl_encap_setup(nt, &ipencap);
1997 ip6_tnl_netlink_parms(data, &nt->parms);
1999 if (nt->parms.collect_md) {
2000 if (rtnl_dereference(ip6n->collect_md_tun))
2003 t = ip6_tnl_locate(net, &nt->parms, 0);
2008 return ip6_tnl_create2(dev);
2011 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2012 struct nlattr *data[],
2013 struct netlink_ext_ack *extack)
2015 struct ip6_tnl *t = netdev_priv(dev);
2016 struct __ip6_tnl_parm p;
2017 struct net *net = t->net;
2018 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2019 struct ip_tunnel_encap ipencap;
2021 if (dev == ip6n->fb_tnl_dev)
2024 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2025 int err = ip6_tnl_encap_setup(t, &ipencap);
2030 ip6_tnl_netlink_parms(data, &p);
2034 t = ip6_tnl_locate(net, &p, 0);
2039 t = netdev_priv(dev);
2041 return ip6_tnl_update(t, &p);
2044 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2046 struct net *net = dev_net(dev);
2047 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2049 if (dev != ip6n->fb_tnl_dev)
2050 unregister_netdevice_queue(dev, head);
2053 static size_t ip6_tnl_get_size(const struct net_device *dev)
2056 /* IFLA_IPTUN_LINK */
2058 /* IFLA_IPTUN_LOCAL */
2059 nla_total_size(sizeof(struct in6_addr)) +
2060 /* IFLA_IPTUN_REMOTE */
2061 nla_total_size(sizeof(struct in6_addr)) +
2062 /* IFLA_IPTUN_TTL */
2064 /* IFLA_IPTUN_ENCAP_LIMIT */
2066 /* IFLA_IPTUN_FLOWINFO */
2068 /* IFLA_IPTUN_FLAGS */
2070 /* IFLA_IPTUN_PROTO */
2072 /* IFLA_IPTUN_ENCAP_TYPE */
2074 /* IFLA_IPTUN_ENCAP_FLAGS */
2076 /* IFLA_IPTUN_ENCAP_SPORT */
2078 /* IFLA_IPTUN_ENCAP_DPORT */
2080 /* IFLA_IPTUN_COLLECT_METADATA */
2082 /* IFLA_IPTUN_FWMARK */
2087 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2089 struct ip6_tnl *tunnel = netdev_priv(dev);
2090 struct __ip6_tnl_parm *parm = &tunnel->parms;
2092 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2093 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2094 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2095 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2096 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2097 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2098 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2099 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2100 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2101 goto nla_put_failure;
2103 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2104 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2105 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2106 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2107 goto nla_put_failure;
2109 if (parm->collect_md)
2110 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2111 goto nla_put_failure;
2119 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2121 struct ip6_tnl *tunnel = netdev_priv(dev);
2125 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2127 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2128 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2129 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2130 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2131 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2132 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2133 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2134 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2135 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2136 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2137 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2138 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2139 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2140 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2141 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
2144 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2146 .maxtype = IFLA_IPTUN_MAX,
2147 .policy = ip6_tnl_policy,
2148 .priv_size = sizeof(struct ip6_tnl),
2149 .setup = ip6_tnl_dev_setup,
2150 .validate = ip6_tnl_validate,
2151 .newlink = ip6_tnl_newlink,
2152 .changelink = ip6_tnl_changelink,
2153 .dellink = ip6_tnl_dellink,
2154 .get_size = ip6_tnl_get_size,
2155 .fill_info = ip6_tnl_fill_info,
2156 .get_link_net = ip6_tnl_get_link_net,
2159 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2160 .handler = ip4ip6_rcv,
2161 .err_handler = ip4ip6_err,
2165 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2166 .handler = ip6ip6_rcv,
2167 .err_handler = ip6ip6_err,
2171 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net)
2173 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2174 struct net_device *dev, *aux;
2179 for_each_netdev_safe(net, dev, aux)
2180 if (dev->rtnl_link_ops == &ip6_link_ops)
2181 unregister_netdevice_queue(dev, &list);
2183 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2184 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2186 /* If dev is in the same netns, it has already
2187 * been added to the list by the previous loop.
2189 if (!net_eq(dev_net(t->dev), net))
2190 unregister_netdevice_queue(t->dev, &list);
2191 t = rtnl_dereference(t->next);
2195 unregister_netdevice_many(&list);
2198 static int __net_init ip6_tnl_init_net(struct net *net)
2200 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2201 struct ip6_tnl *t = NULL;
2204 ip6n->tnls[0] = ip6n->tnls_wc;
2205 ip6n->tnls[1] = ip6n->tnls_r_l;
2208 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2209 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2211 if (!ip6n->fb_tnl_dev)
2213 dev_net_set(ip6n->fb_tnl_dev, net);
2214 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2215 /* FB netdevice is special: we have one, and only one per netns.
2216 * Allowing to move it to another netns is clearly unsafe.
2218 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2220 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2224 err = register_netdev(ip6n->fb_tnl_dev);
2228 t = netdev_priv(ip6n->fb_tnl_dev);
2230 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2234 free_netdev(ip6n->fb_tnl_dev);
2239 static void __net_exit ip6_tnl_exit_net(struct net *net)
2242 ip6_tnl_destroy_tunnels(net);
2246 static struct pernet_operations ip6_tnl_net_ops = {
2247 .init = ip6_tnl_init_net,
2248 .exit = ip6_tnl_exit_net,
2249 .id = &ip6_tnl_net_id,
2250 .size = sizeof(struct ip6_tnl_net),
2254 * ip6_tunnel_init - register protocol and reserve needed resources
2256 * Return: 0 on success
2259 static int __init ip6_tunnel_init(void)
2263 if (!ipv6_mod_enabled())
2266 err = register_pernet_device(&ip6_tnl_net_ops);
2270 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2272 pr_err("%s: can't register ip4ip6\n", __func__);
2276 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2278 pr_err("%s: can't register ip6ip6\n", __func__);
2281 err = rtnl_link_register(&ip6_link_ops);
2283 goto rtnl_link_failed;
2288 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2290 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2292 unregister_pernet_device(&ip6_tnl_net_ops);
2298 * ip6_tunnel_cleanup - free resources and unregister protocol
2301 static void __exit ip6_tunnel_cleanup(void)
2303 rtnl_link_unregister(&ip6_link_ops);
2304 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2305 pr_info("%s: can't deregister ip4ip6\n", __func__);
2307 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2308 pr_info("%s: can't deregister ip6ip6\n", __func__);
2310 unregister_pernet_device(&ip6_tnl_net_ops);
2313 module_init(ip6_tunnel_init);
2314 module_exit(ip6_tunnel_cleanup);