2 * Linux NET3: GRE over IP protocol decoder.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
56 1. The most important issue is detecting local dead loops.
57 They would cause complete host lockup in transmit, which
58 would be "resolved" by stack overflow or, if queueing is enabled,
59 with infinite looping in net_bh.
61 We cannot track such dead loops during route installation,
62 it is infeasible task. The most general solutions would be
63 to keep skb->encapsulation counter (sort of local ttl),
64 and silently drop packet when it expires. It is a good
65 solution, but it supposes maintaining new variable in ALL
66 skb, even if no tunneling is used.
68 Current solution: xmit_recursion breaks dead loops. This is a percpu
69 counter, since when we enter the first ndo_xmit(), cpu migration is
70 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
72 2. Networking dead loops would not kill routers, but would really
73 kill network. IP hop limit plays role of "t->recursion" in this case,
74 if we copy it from packet being encapsulated to upper header.
75 It is very good solution, but it introduces two problems:
77 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
78 do not work over tunnels.
79 - traceroute does not work. I planned to relay ICMP from tunnel,
80 so that this problem would be solved and traceroute output
81 would even more informative. This idea appeared to be wrong:
82 only Linux complies to rfc1812 now (yes, guys, Linux is the only
83 true router now :-)), all routers (at least, in neighbourhood of mine)
84 return only 8 bytes of payload. It is the end.
86 Hence, if we want that OSPF worked or traceroute said something reasonable,
87 we should search for another solution.
89 One of them is to parse packet trying to detect inner encapsulation
90 made by our node. It is difficult or even impossible, especially,
91 taking into account fragmentation. TO be short, ttl is not solution at all.
93 Current solution: The solution was UNEXPECTEDLY SIMPLE.
94 We force DF flag on tunnels with preconfigured hop limit,
95 that is ALL. :-) Well, it does not remove the problem completely,
96 but exponential growth of network traffic is changed to linear
97 (branches, that exceed pmtu are pruned) and tunnel mtu
98 rapidly degrades to value <68, where looping stops.
99 Yes, it is not good if there exists a router in the loop,
100 which does not force DF, even when encapsulating packets have DF set.
101 But it is not our problem! Nobody could accuse us, we made
102 all that we could make. Even if it is your gated who injected
103 fatal route to network, even if it were you who configured
104 fatal static route: you are innocent. :-)
109 static bool log_ecn_error = true;
110 module_param(log_ecn_error, bool, 0644);
111 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
113 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
114 static int ipgre_tunnel_init(struct net_device *dev);
116 static unsigned int ipgre_net_id __read_mostly;
117 static unsigned int gre_tap_net_id __read_mostly;
119 static void ipgre_err(struct sk_buff *skb, u32 info,
120 const struct tnl_ptk_info *tpi)
123 /* All the routers (except for Linux) return only
124 8 bytes of packet payload. It means, that precise relaying of
125 ICMP in the real Internet is absolutely infeasible.
127 Moreover, Cisco "wise men" put GRE key to the third word
128 in GRE header. It makes impossible maintaining even soft
129 state for keyed GRE tunnels with enabled checksum. Tell
132 Well, I wonder, rfc1812 was written by Cisco employee,
133 what the hell these idiots break standards established
136 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn;
138 const struct iphdr *iph;
139 const int type = icmp_hdr(skb)->type;
140 const int code = icmp_hdr(skb)->code;
141 unsigned int data_len = 0;
146 case ICMP_PARAMETERPROB:
149 case ICMP_DEST_UNREACH:
152 case ICMP_PORT_UNREACH:
153 /* Impossible event. */
156 /* All others are translated to HOST_UNREACH.
157 rfc2003 contains "deep thoughts" about NET_UNREACH,
158 I believe they are just ether pollution. --ANK
164 case ICMP_TIME_EXCEEDED:
165 if (code != ICMP_EXC_TTL)
167 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
174 if (tpi->proto == htons(ETH_P_TEB))
175 itn = net_generic(net, gre_tap_net_id);
177 itn = net_generic(net, ipgre_net_id);
179 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
180 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
181 iph->daddr, iph->saddr, tpi->key);
186 #if IS_ENABLED(CONFIG_IPV6)
187 if (tpi->proto == htons(ETH_P_IPV6) &&
188 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
193 if (t->parms.iph.daddr == 0 ||
194 ipv4_is_multicast(t->parms.iph.daddr))
197 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
200 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
204 t->err_time = jiffies;
207 static void gre_err(struct sk_buff *skb, u32 info)
209 /* All the routers (except for Linux) return only
210 * 8 bytes of packet payload. It means, that precise relaying of
211 * ICMP in the real Internet is absolutely infeasible.
213 * Moreover, Cisco "wise men" put GRE key to the third word
214 * in GRE header. It makes impossible maintaining even soft
216 * GRE tunnels with enabled checksum. Tell them "thank you".
218 * Well, I wonder, rfc1812 was written by Cisco employee,
219 * what the hell these idiots break standards established
223 const struct iphdr *iph = (struct iphdr *)skb->data;
224 const int type = icmp_hdr(skb)->type;
225 const int code = icmp_hdr(skb)->code;
226 struct tnl_ptk_info tpi;
227 bool csum_err = false;
229 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
231 if (!csum_err) /* ignore csum errors. */
235 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
236 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
237 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
240 if (type == ICMP_REDIRECT) {
241 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
246 ipgre_err(skb, info, &tpi);
249 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
250 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
252 struct metadata_dst *tun_dst = NULL;
253 const struct iphdr *iph;
254 struct ip_tunnel *tunnel;
257 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
258 iph->saddr, iph->daddr, tpi->key);
261 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
262 raw_proto, false) < 0)
265 if (tunnel->dev->type != ARPHRD_NONE)
266 skb_pop_mac_header(skb);
268 skb_reset_mac_header(skb);
269 if (tunnel->collect_md) {
273 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
274 tun_id = key32_to_tunnel_id(tpi->key);
275 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
277 return PACKET_REJECT;
280 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
290 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
293 struct net *net = dev_net(skb->dev);
294 struct ip_tunnel_net *itn;
297 if (tpi->proto == htons(ETH_P_TEB))
298 itn = net_generic(net, gre_tap_net_id);
300 itn = net_generic(net, ipgre_net_id);
302 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
303 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
304 /* ipgre tunnels in collect metadata mode should receive
305 * also ETH_P_TEB traffic.
307 itn = net_generic(net, ipgre_net_id);
308 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
313 static int gre_rcv(struct sk_buff *skb)
315 struct tnl_ptk_info tpi;
316 bool csum_err = false;
319 #ifdef CONFIG_NET_IPGRE_BROADCAST
320 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
321 /* Looped back packet, drop it! */
322 if (rt_is_output_route(skb_rtable(skb)))
327 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
331 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
334 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
340 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
341 const struct iphdr *tnl_params,
344 struct ip_tunnel *tunnel = netdev_priv(dev);
346 if (tunnel->parms.o_flags & TUNNEL_SEQ)
349 /* Push GRE header. */
350 gre_build_header(skb, tunnel->tun_hlen,
351 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
352 htonl(tunnel->o_seqno));
354 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
357 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
359 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
362 static struct rtable *gre_get_rt(struct sk_buff *skb,
363 struct net_device *dev,
365 const struct ip_tunnel_key *key)
367 struct net *net = dev_net(dev);
369 memset(fl, 0, sizeof(*fl));
370 fl->daddr = key->u.ipv4.dst;
371 fl->saddr = key->u.ipv4.src;
372 fl->flowi4_tos = RT_TOS(key->tos);
373 fl->flowi4_mark = skb->mark;
374 fl->flowi4_proto = IPPROTO_GRE;
376 return ip_route_output_key(net, fl);
379 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
382 struct ip_tunnel_info *tun_info;
383 const struct ip_tunnel_key *key;
384 struct rtable *rt = NULL;
392 tun_info = skb_tunnel_info(skb);
393 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
394 ip_tunnel_info_af(tun_info) != AF_INET))
397 key = &tun_info->key;
398 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
400 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr);
402 rt = gre_get_rt(skb, dev, &fl, key);
406 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
410 tunnel_hlen = gre_calc_hlen(key->tun_flags);
412 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
413 + tunnel_hlen + sizeof(struct iphdr);
414 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
415 int head_delta = SKB_DATA_ALIGN(min_headroom -
418 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
424 /* Push Tunnel header. */
425 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
428 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
429 gre_build_header(skb, tunnel_hlen, flags, proto,
430 tunnel_id_to_key32(tun_info->key.tun_id), 0);
432 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
434 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
435 key->tos, key->ttl, df, false);
442 dev->stats.tx_dropped++;
445 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
447 struct ip_tunnel_info *info = skb_tunnel_info(skb);
451 if (ip_tunnel_info_af(info) != AF_INET)
454 rt = gre_get_rt(skb, dev, &fl4, &info->key);
459 info->key.u.ipv4.src = fl4.saddr;
463 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
464 struct net_device *dev)
466 struct ip_tunnel *tunnel = netdev_priv(dev);
467 const struct iphdr *tnl_params;
469 if (tunnel->collect_md) {
470 gre_fb_xmit(skb, dev, skb->protocol);
474 if (dev->header_ops) {
475 /* Need space for new headers */
476 if (skb_cow_head(skb, dev->needed_headroom -
477 (tunnel->hlen + sizeof(struct iphdr))))
480 tnl_params = (const struct iphdr *)skb->data;
482 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
485 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
486 skb_reset_mac_header(skb);
488 if (skb_cow_head(skb, dev->needed_headroom))
491 tnl_params = &tunnel->parms.iph;
494 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
497 __gre_xmit(skb, dev, tnl_params, skb->protocol);
502 dev->stats.tx_dropped++;
506 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
507 struct net_device *dev)
509 struct ip_tunnel *tunnel = netdev_priv(dev);
511 if (tunnel->collect_md) {
512 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
516 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
519 if (skb_cow_head(skb, dev->needed_headroom))
522 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
527 dev->stats.tx_dropped++;
531 static int ipgre_tunnel_ioctl(struct net_device *dev,
532 struct ifreq *ifr, int cmd)
535 struct ip_tunnel_parm p;
537 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
539 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
540 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
541 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
542 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
545 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
546 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
548 err = ip_tunnel_ioctl(dev, &p, cmd);
552 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
553 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
555 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
560 /* Nice toy. Unfortunately, useless in real life :-)
561 It allows to construct virtual multiprotocol broadcast "LAN"
562 over the Internet, provided multicast routing is tuned.
565 I have no idea was this bicycle invented before me,
566 so that I had to set ARPHRD_IPGRE to a random value.
567 I have an impression, that Cisco could make something similar,
568 but this feature is apparently missing in IOS<=11.2(8).
570 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
571 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
573 ping -t 255 224.66.66.66
575 If nobody answers, mbone does not work.
577 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
578 ip addr add 10.66.66.<somewhat>/24 dev Universe
580 ifconfig Universe add fe80::<Your_real_addr>/10
581 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
584 ftp fec0:6666:6666::193.233.7.65
587 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
589 const void *daddr, const void *saddr, unsigned int len)
591 struct ip_tunnel *t = netdev_priv(dev);
593 struct gre_base_hdr *greh;
595 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
596 greh = (struct gre_base_hdr *)(iph+1);
597 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
598 greh->protocol = htons(type);
600 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
602 /* Set the source hardware address. */
604 memcpy(&iph->saddr, saddr, 4);
606 memcpy(&iph->daddr, daddr, 4);
608 return t->hlen + sizeof(*iph);
610 return -(t->hlen + sizeof(*iph));
613 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
615 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
616 memcpy(haddr, &iph->saddr, 4);
620 static const struct header_ops ipgre_header_ops = {
621 .create = ipgre_header,
622 .parse = ipgre_header_parse,
625 #ifdef CONFIG_NET_IPGRE_BROADCAST
626 static int ipgre_open(struct net_device *dev)
628 struct ip_tunnel *t = netdev_priv(dev);
630 if (ipv4_is_multicast(t->parms.iph.daddr)) {
634 rt = ip_route_output_gre(t->net, &fl4,
638 RT_TOS(t->parms.iph.tos),
641 return -EADDRNOTAVAIL;
644 if (!__in_dev_get_rtnl(dev))
645 return -EADDRNOTAVAIL;
646 t->mlink = dev->ifindex;
647 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
652 static int ipgre_close(struct net_device *dev)
654 struct ip_tunnel *t = netdev_priv(dev);
656 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
657 struct in_device *in_dev;
658 in_dev = inetdev_by_index(t->net, t->mlink);
660 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
666 static const struct net_device_ops ipgre_netdev_ops = {
667 .ndo_init = ipgre_tunnel_init,
668 .ndo_uninit = ip_tunnel_uninit,
669 #ifdef CONFIG_NET_IPGRE_BROADCAST
670 .ndo_open = ipgre_open,
671 .ndo_stop = ipgre_close,
673 .ndo_start_xmit = ipgre_xmit,
674 .ndo_do_ioctl = ipgre_tunnel_ioctl,
675 .ndo_change_mtu = ip_tunnel_change_mtu,
676 .ndo_get_stats64 = ip_tunnel_get_stats64,
677 .ndo_get_iflink = ip_tunnel_get_iflink,
680 #define GRE_FEATURES (NETIF_F_SG | \
685 static void ipgre_tunnel_setup(struct net_device *dev)
687 dev->netdev_ops = &ipgre_netdev_ops;
688 dev->type = ARPHRD_IPGRE;
689 ip_tunnel_setup(dev, ipgre_net_id);
692 static void __gre_tunnel_init(struct net_device *dev)
694 struct ip_tunnel *tunnel;
697 tunnel = netdev_priv(dev);
698 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
699 tunnel->parms.iph.protocol = IPPROTO_GRE;
701 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
703 t_hlen = tunnel->hlen + sizeof(struct iphdr);
705 dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
706 dev->mtu = ETH_DATA_LEN - t_hlen - 4;
708 dev->features |= GRE_FEATURES;
709 dev->hw_features |= GRE_FEATURES;
711 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
712 /* TCP offload with GRE SEQ is not supported, nor
713 * can we support 2 levels of outer headers requiring
716 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
717 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
718 dev->features |= NETIF_F_GSO_SOFTWARE;
719 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
722 /* Can use a lockless transmit, unless we generate
725 dev->features |= NETIF_F_LLTX;
729 static int ipgre_tunnel_init(struct net_device *dev)
731 struct ip_tunnel *tunnel = netdev_priv(dev);
732 struct iphdr *iph = &tunnel->parms.iph;
734 __gre_tunnel_init(dev);
736 memcpy(dev->dev_addr, &iph->saddr, 4);
737 memcpy(dev->broadcast, &iph->daddr, 4);
739 dev->flags = IFF_NOARP;
743 if (iph->daddr && !tunnel->collect_md) {
744 #ifdef CONFIG_NET_IPGRE_BROADCAST
745 if (ipv4_is_multicast(iph->daddr)) {
748 dev->flags = IFF_BROADCAST;
749 dev->header_ops = &ipgre_header_ops;
752 } else if (!tunnel->collect_md) {
753 dev->header_ops = &ipgre_header_ops;
756 return ip_tunnel_init(dev);
759 static const struct gre_protocol ipgre_protocol = {
761 .err_handler = gre_err,
764 static int __net_init ipgre_init_net(struct net *net)
766 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
769 static void __net_exit ipgre_exit_net(struct net *net)
771 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
772 ip_tunnel_delete_net(itn, &ipgre_link_ops);
775 static struct pernet_operations ipgre_net_ops = {
776 .init = ipgre_init_net,
777 .exit = ipgre_exit_net,
779 .size = sizeof(struct ip_tunnel_net),
782 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
790 if (data[IFLA_GRE_IFLAGS])
791 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
792 if (data[IFLA_GRE_OFLAGS])
793 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
794 if (flags & (GRE_VERSION|GRE_ROUTING))
797 if (data[IFLA_GRE_COLLECT_METADATA] &&
798 data[IFLA_GRE_ENCAP_TYPE] &&
799 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
805 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
809 if (tb[IFLA_ADDRESS]) {
810 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
812 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
813 return -EADDRNOTAVAIL;
819 if (data[IFLA_GRE_REMOTE]) {
820 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
826 return ipgre_tunnel_validate(tb, data);
829 static int ipgre_netlink_parms(struct net_device *dev,
830 struct nlattr *data[],
832 struct ip_tunnel_parm *parms,
835 struct ip_tunnel *t = netdev_priv(dev);
837 memset(parms, 0, sizeof(*parms));
839 parms->iph.protocol = IPPROTO_GRE;
844 if (data[IFLA_GRE_LINK])
845 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
847 if (data[IFLA_GRE_IFLAGS])
848 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
850 if (data[IFLA_GRE_OFLAGS])
851 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
853 if (data[IFLA_GRE_IKEY])
854 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
856 if (data[IFLA_GRE_OKEY])
857 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
859 if (data[IFLA_GRE_LOCAL])
860 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
862 if (data[IFLA_GRE_REMOTE])
863 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
865 if (data[IFLA_GRE_TTL])
866 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
868 if (data[IFLA_GRE_TOS])
869 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
871 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
874 parms->iph.frag_off = htons(IP_DF);
877 if (data[IFLA_GRE_COLLECT_METADATA]) {
878 t->collect_md = true;
879 if (dev->type == ARPHRD_IPGRE)
880 dev->type = ARPHRD_NONE;
883 if (data[IFLA_GRE_IGNORE_DF]) {
884 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
885 && (parms->iph.frag_off & htons(IP_DF)))
887 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
890 if (data[IFLA_GRE_FWMARK])
891 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
896 /* This function returns true when ENCAP attributes are present in the nl msg */
897 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
898 struct ip_tunnel_encap *ipencap)
902 memset(ipencap, 0, sizeof(*ipencap));
907 if (data[IFLA_GRE_ENCAP_TYPE]) {
909 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
912 if (data[IFLA_GRE_ENCAP_FLAGS]) {
914 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
917 if (data[IFLA_GRE_ENCAP_SPORT]) {
919 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
922 if (data[IFLA_GRE_ENCAP_DPORT]) {
924 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
930 static int gre_tap_init(struct net_device *dev)
932 __gre_tunnel_init(dev);
933 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
935 return ip_tunnel_init(dev);
938 static const struct net_device_ops gre_tap_netdev_ops = {
939 .ndo_init = gre_tap_init,
940 .ndo_uninit = ip_tunnel_uninit,
941 .ndo_start_xmit = gre_tap_xmit,
942 .ndo_set_mac_address = eth_mac_addr,
943 .ndo_validate_addr = eth_validate_addr,
944 .ndo_change_mtu = ip_tunnel_change_mtu,
945 .ndo_get_stats64 = ip_tunnel_get_stats64,
946 .ndo_get_iflink = ip_tunnel_get_iflink,
947 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
950 static void ipgre_tap_setup(struct net_device *dev)
953 dev->netdev_ops = &gre_tap_netdev_ops;
954 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
955 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
956 ip_tunnel_setup(dev, gre_tap_net_id);
959 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
960 struct nlattr *tb[], struct nlattr *data[])
962 struct ip_tunnel_parm p;
963 struct ip_tunnel_encap ipencap;
967 if (ipgre_netlink_encap_parms(data, &ipencap)) {
968 struct ip_tunnel *t = netdev_priv(dev);
969 err = ip_tunnel_encap_setup(t, &ipencap);
975 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
978 return ip_tunnel_newlink(dev, tb, &p, fwmark);
981 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
982 struct nlattr *data[])
984 struct ip_tunnel *t = netdev_priv(dev);
985 struct ip_tunnel_parm p;
986 struct ip_tunnel_encap ipencap;
987 __u32 fwmark = t->fwmark;
990 if (ipgre_netlink_encap_parms(data, &ipencap)) {
991 err = ip_tunnel_encap_setup(t, &ipencap);
997 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1000 return ip_tunnel_changelink(dev, tb, &p, fwmark);
1003 static size_t ipgre_get_size(const struct net_device *dev)
1008 /* IFLA_GRE_IFLAGS */
1010 /* IFLA_GRE_OFLAGS */
1016 /* IFLA_GRE_LOCAL */
1018 /* IFLA_GRE_REMOTE */
1024 /* IFLA_GRE_PMTUDISC */
1026 /* IFLA_GRE_ENCAP_TYPE */
1028 /* IFLA_GRE_ENCAP_FLAGS */
1030 /* IFLA_GRE_ENCAP_SPORT */
1032 /* IFLA_GRE_ENCAP_DPORT */
1034 /* IFLA_GRE_COLLECT_METADATA */
1036 /* IFLA_GRE_IGNORE_DF */
1038 /* IFLA_GRE_FWMARK */
1043 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1045 struct ip_tunnel *t = netdev_priv(dev);
1046 struct ip_tunnel_parm *p = &t->parms;
1048 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1049 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1050 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1051 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1052 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1053 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1054 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1055 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1056 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1057 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1058 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1059 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1060 !!(p->iph.frag_off & htons(IP_DF))) ||
1061 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1062 goto nla_put_failure;
1064 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1066 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1068 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1070 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1072 goto nla_put_failure;
1074 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1075 goto nla_put_failure;
1077 if (t->collect_md) {
1078 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1079 goto nla_put_failure;
1088 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1089 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1090 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1091 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1092 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1093 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1094 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1095 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1096 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1097 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1098 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1099 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1100 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1101 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1102 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1103 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1104 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1105 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1108 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1110 .maxtype = IFLA_GRE_MAX,
1111 .policy = ipgre_policy,
1112 .priv_size = sizeof(struct ip_tunnel),
1113 .setup = ipgre_tunnel_setup,
1114 .validate = ipgre_tunnel_validate,
1115 .newlink = ipgre_newlink,
1116 .changelink = ipgre_changelink,
1117 .dellink = ip_tunnel_dellink,
1118 .get_size = ipgre_get_size,
1119 .fill_info = ipgre_fill_info,
1120 .get_link_net = ip_tunnel_get_link_net,
1123 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1125 .maxtype = IFLA_GRE_MAX,
1126 .policy = ipgre_policy,
1127 .priv_size = sizeof(struct ip_tunnel),
1128 .setup = ipgre_tap_setup,
1129 .validate = ipgre_tap_validate,
1130 .newlink = ipgre_newlink,
1131 .changelink = ipgre_changelink,
1132 .dellink = ip_tunnel_dellink,
1133 .get_size = ipgre_get_size,
1134 .fill_info = ipgre_fill_info,
1135 .get_link_net = ip_tunnel_get_link_net,
1138 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1139 u8 name_assign_type)
1141 struct nlattr *tb[IFLA_MAX + 1];
1142 struct net_device *dev;
1143 LIST_HEAD(list_kill);
1144 struct ip_tunnel *t;
1147 memset(&tb, 0, sizeof(tb));
1149 dev = rtnl_create_link(net, name, name_assign_type,
1150 &ipgre_tap_ops, tb);
1154 /* Configure flow based GRE device. */
1155 t = netdev_priv(dev);
1156 t->collect_md = true;
1158 err = ipgre_newlink(net, dev, tb, NULL);
1161 return ERR_PTR(err);
1164 /* openvswitch users expect packet sizes to be unrestricted,
1165 * so set the largest MTU we can.
1167 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1171 err = rtnl_configure_link(dev, NULL);
1177 ip_tunnel_dellink(dev, &list_kill);
1178 unregister_netdevice_many(&list_kill);
1179 return ERR_PTR(err);
1181 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1183 static int __net_init ipgre_tap_init_net(struct net *net)
1185 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1188 static void __net_exit ipgre_tap_exit_net(struct net *net)
1190 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
1191 ip_tunnel_delete_net(itn, &ipgre_tap_ops);
1194 static struct pernet_operations ipgre_tap_net_ops = {
1195 .init = ipgre_tap_init_net,
1196 .exit = ipgre_tap_exit_net,
1197 .id = &gre_tap_net_id,
1198 .size = sizeof(struct ip_tunnel_net),
1201 static int __init ipgre_init(void)
1205 pr_info("GRE over IPv4 tunneling driver\n");
1207 err = register_pernet_device(&ipgre_net_ops);
1211 err = register_pernet_device(&ipgre_tap_net_ops);
1213 goto pnet_tap_faied;
1215 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1217 pr_info("%s: can't add protocol\n", __func__);
1218 goto add_proto_failed;
1221 err = rtnl_link_register(&ipgre_link_ops);
1223 goto rtnl_link_failed;
1225 err = rtnl_link_register(&ipgre_tap_ops);
1227 goto tap_ops_failed;
1232 rtnl_link_unregister(&ipgre_link_ops);
1234 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1236 unregister_pernet_device(&ipgre_tap_net_ops);
1238 unregister_pernet_device(&ipgre_net_ops);
1242 static void __exit ipgre_fini(void)
1244 rtnl_link_unregister(&ipgre_tap_ops);
1245 rtnl_link_unregister(&ipgre_link_ops);
1246 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1247 unregister_pernet_device(&ipgre_tap_net_ops);
1248 unregister_pernet_device(&ipgre_net_ops);
1251 module_init(ipgre_init);
1252 module_exit(ipgre_fini);
1253 MODULE_LICENSE("GPL");
1254 MODULE_ALIAS_RTNL_LINK("gre");
1255 MODULE_ALIAS_RTNL_LINK("gretap");
1256 MODULE_ALIAS_NETDEV("gre0");
1257 MODULE_ALIAS_NETDEV("gretap0");