2 * Linux NET3: GRE over IP protocol decoder.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
118 bool truncate, bool is_ipv4);
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
124 static void ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
151 case ICMP_PARAMETERPROB:
154 case ICMP_DEST_UNREACH:
157 case ICMP_PORT_UNREACH:
158 /* Impossible event. */
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
169 case ICMP_TIME_EXCEEDED:
170 if (code != ICMP_EXC_TTL)
172 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
179 if (tpi->proto == htons(ETH_P_TEB))
180 itn = net_generic(net, gre_tap_net_id);
181 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
182 tpi->proto == htons(ETH_P_ERSPAN2))
183 itn = net_generic(net, erspan_net_id);
185 itn = net_generic(net, ipgre_net_id);
187 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
188 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
189 iph->daddr, iph->saddr, tpi->key);
194 #if IS_ENABLED(CONFIG_IPV6)
195 if (tpi->proto == htons(ETH_P_IPV6) &&
196 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
201 if (t->parms.iph.daddr == 0 ||
202 ipv4_is_multicast(t->parms.iph.daddr))
205 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
208 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
212 t->err_time = jiffies;
215 static void gre_err(struct sk_buff *skb, u32 info)
217 /* All the routers (except for Linux) return only
218 * 8 bytes of packet payload. It means, that precise relaying of
219 * ICMP in the real Internet is absolutely infeasible.
221 * Moreover, Cisco "wise men" put GRE key to the third word
222 * in GRE header. It makes impossible maintaining even soft
224 * GRE tunnels with enabled checksum. Tell them "thank you".
226 * Well, I wonder, rfc1812 was written by Cisco employee,
227 * what the hell these idiots break standards established
231 const struct iphdr *iph = (struct iphdr *)skb->data;
232 const int type = icmp_hdr(skb)->type;
233 const int code = icmp_hdr(skb)->code;
234 struct tnl_ptk_info tpi;
235 bool csum_err = false;
237 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
239 if (!csum_err) /* ignore csum errors. */
243 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
244 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
245 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
248 if (type == ICMP_REDIRECT) {
249 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
254 ipgre_err(skb, info, &tpi);
257 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
260 struct net *net = dev_net(skb->dev);
261 struct metadata_dst *tun_dst = NULL;
262 struct erspan_base_hdr *ershdr;
263 struct erspan_metadata *pkt_md;
264 struct ip_tunnel_net *itn;
265 struct ip_tunnel *tunnel;
266 const struct iphdr *iph;
267 struct erspan_md2 *md2;
271 itn = net_generic(net, erspan_net_id);
272 len = gre_hdr_len + sizeof(*ershdr);
274 /* Check based hdr len */
275 if (unlikely(!pskb_may_pull(skb, len)))
276 return PACKET_REJECT;
279 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
282 /* The original GRE header does not have key field,
283 * Use ERSPAN 10-bit session ID as key.
285 tpi->key = cpu_to_be32(get_session_id(ershdr));
286 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
287 tpi->flags | TUNNEL_KEY,
288 iph->saddr, iph->daddr, tpi->key);
291 len = gre_hdr_len + erspan_hdr_len(ver);
292 if (unlikely(!pskb_may_pull(skb, len)))
293 return PACKET_REJECT;
295 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
296 pkt_md = (struct erspan_metadata *)(ershdr + 1);
298 if (__iptunnel_pull_header(skb,
304 if (tunnel->collect_md) {
305 struct ip_tunnel_info *info;
306 struct erspan_metadata *md;
310 tpi->flags |= TUNNEL_KEY;
312 tun_id = key32_to_tunnel_id(tpi->key);
314 tun_dst = ip_tun_rx_dst(skb, flags,
315 tun_id, sizeof(*md));
317 return PACKET_REJECT;
319 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
322 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
325 info = &tun_dst->u.tun_info;
326 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
327 info->options_len = sizeof(*md);
330 skb_reset_mac_header(skb);
331 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
334 return PACKET_REJECT;
341 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
342 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
344 struct metadata_dst *tun_dst = NULL;
345 const struct iphdr *iph;
346 struct ip_tunnel *tunnel;
349 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
350 iph->saddr, iph->daddr, tpi->key);
353 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
354 raw_proto, false) < 0)
357 if (tunnel->dev->type != ARPHRD_NONE)
358 skb_pop_mac_header(skb);
360 skb_reset_mac_header(skb);
361 if (tunnel->collect_md) {
365 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
366 tun_id = key32_to_tunnel_id(tpi->key);
367 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
369 return PACKET_REJECT;
372 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
382 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
385 struct net *net = dev_net(skb->dev);
386 struct ip_tunnel_net *itn;
389 if (tpi->proto == htons(ETH_P_TEB))
390 itn = net_generic(net, gre_tap_net_id);
392 itn = net_generic(net, ipgre_net_id);
394 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
395 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
396 /* ipgre tunnels in collect metadata mode should receive
397 * also ETH_P_TEB traffic.
399 itn = net_generic(net, ipgre_net_id);
400 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
405 static int gre_rcv(struct sk_buff *skb)
407 struct tnl_ptk_info tpi;
408 bool csum_err = false;
411 #ifdef CONFIG_NET_IPGRE_BROADCAST
412 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
413 /* Looped back packet, drop it! */
414 if (rt_is_output_route(skb_rtable(skb)))
419 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
423 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
424 tpi.proto == htons(ETH_P_ERSPAN2))) {
425 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
430 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
434 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
440 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
441 const struct iphdr *tnl_params,
444 struct ip_tunnel *tunnel = netdev_priv(dev);
446 if (tunnel->parms.o_flags & TUNNEL_SEQ)
449 /* Push GRE header. */
450 gre_build_header(skb, tunnel->tun_hlen,
451 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
452 htonl(tunnel->o_seqno));
454 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
457 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
459 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
462 static struct rtable *gre_get_rt(struct sk_buff *skb,
463 struct net_device *dev,
465 const struct ip_tunnel_key *key)
467 struct net *net = dev_net(dev);
469 memset(fl, 0, sizeof(*fl));
470 fl->daddr = key->u.ipv4.dst;
471 fl->saddr = key->u.ipv4.src;
472 fl->flowi4_tos = RT_TOS(key->tos);
473 fl->flowi4_mark = skb->mark;
474 fl->flowi4_proto = IPPROTO_GRE;
476 return ip_route_output_key(net, fl);
479 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
480 struct net_device *dev,
484 struct ip_tunnel_info *tun_info;
485 const struct ip_tunnel_key *key;
486 struct rtable *rt = NULL;
491 tun_info = skb_tunnel_info(skb);
492 key = &tun_info->key;
493 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
496 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
498 rt = gre_get_rt(skb, dev, fl, key);
502 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
506 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
507 + tunnel_hlen + sizeof(struct iphdr);
508 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
509 int head_delta = SKB_DATA_ALIGN(min_headroom -
512 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
523 dev->stats.tx_dropped++;
527 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
530 struct ip_tunnel *tunnel = netdev_priv(dev);
531 struct ip_tunnel_info *tun_info;
532 const struct ip_tunnel_key *key;
533 struct rtable *rt = NULL;
538 tun_info = skb_tunnel_info(skb);
539 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
540 ip_tunnel_info_af(tun_info) != AF_INET))
543 key = &tun_info->key;
544 tunnel_hlen = gre_calc_hlen(key->tun_flags);
546 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
550 /* Push Tunnel header. */
551 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
554 flags = tun_info->key.tun_flags &
555 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
556 gre_build_header(skb, tunnel_hlen, flags, proto,
557 tunnel_id_to_key32(tun_info->key.tun_id),
558 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
560 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
562 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
563 key->tos, key->ttl, df, false);
570 dev->stats.tx_dropped++;
573 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
576 struct ip_tunnel *tunnel = netdev_priv(dev);
577 struct ip_tunnel_info *tun_info;
578 const struct ip_tunnel_key *key;
579 struct erspan_metadata *md;
580 struct rtable *rt = NULL;
581 bool truncate = false;
589 tun_info = skb_tunnel_info(skb);
590 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
591 ip_tunnel_info_af(tun_info) != AF_INET))
594 key = &tun_info->key;
595 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
597 md = ip_tunnel_info_opts(tun_info);
601 /* ERSPAN has fixed 8 byte GRE header */
602 version = md->version;
603 tunnel_hlen = 8 + erspan_hdr_len(version);
605 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
609 if (gre_handle_offloads(skb, false))
612 if (skb->len > dev->mtu + dev->hard_header_len) {
613 pskb_trim(skb, dev->mtu + dev->hard_header_len);
617 nhoff = skb_network_header(skb) - skb_mac_header(skb);
618 if (skb->protocol == htons(ETH_P_IP) &&
619 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
622 thoff = skb_transport_header(skb) - skb_mac_header(skb);
623 if (skb->protocol == htons(ETH_P_IPV6) &&
624 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
628 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
629 ntohl(md->u.index), truncate, true);
630 } else if (version == 2) {
631 erspan_build_header_v2(skb,
632 ntohl(tunnel_id_to_key32(key->tun_id)),
634 get_hwid(&md->u.md2),
640 gre_build_header(skb, 8, TUNNEL_SEQ,
641 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
643 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
645 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
646 key->tos, key->ttl, df, false);
653 dev->stats.tx_dropped++;
656 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
658 struct ip_tunnel_info *info = skb_tunnel_info(skb);
662 if (ip_tunnel_info_af(info) != AF_INET)
665 rt = gre_get_rt(skb, dev, &fl4, &info->key);
670 info->key.u.ipv4.src = fl4.saddr;
674 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
675 struct net_device *dev)
677 struct ip_tunnel *tunnel = netdev_priv(dev);
678 const struct iphdr *tnl_params;
680 if (tunnel->collect_md) {
681 gre_fb_xmit(skb, dev, skb->protocol);
685 if (dev->header_ops) {
686 /* Need space for new headers */
687 if (skb_cow_head(skb, dev->needed_headroom -
688 (tunnel->hlen + sizeof(struct iphdr))))
691 tnl_params = (const struct iphdr *)skb->data;
693 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
696 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
697 skb_reset_mac_header(skb);
699 if (skb_cow_head(skb, dev->needed_headroom))
702 tnl_params = &tunnel->parms.iph;
705 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
708 __gre_xmit(skb, dev, tnl_params, skb->protocol);
713 dev->stats.tx_dropped++;
717 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
718 struct net_device *dev)
720 struct ip_tunnel *tunnel = netdev_priv(dev);
721 bool truncate = false;
723 if (tunnel->collect_md) {
724 erspan_fb_xmit(skb, dev, skb->protocol);
728 if (gre_handle_offloads(skb, false))
731 if (skb_cow_head(skb, dev->needed_headroom))
734 if (skb->len > dev->mtu + dev->hard_header_len) {
735 pskb_trim(skb, dev->mtu + dev->hard_header_len);
739 /* Push ERSPAN header */
740 if (tunnel->erspan_ver == 1)
741 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
744 else if (tunnel->erspan_ver == 2)
745 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
746 tunnel->dir, tunnel->hwid,
751 tunnel->parms.o_flags &= ~TUNNEL_KEY;
752 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
757 dev->stats.tx_dropped++;
761 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
762 struct net_device *dev)
764 struct ip_tunnel *tunnel = netdev_priv(dev);
766 if (tunnel->collect_md) {
767 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
771 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
774 if (skb_cow_head(skb, dev->needed_headroom))
777 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
782 dev->stats.tx_dropped++;
786 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
788 struct ip_tunnel *tunnel = netdev_priv(dev);
791 len = tunnel->tun_hlen;
792 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
793 len = tunnel->tun_hlen - len;
794 tunnel->hlen = tunnel->hlen + len;
796 dev->needed_headroom = dev->needed_headroom + len;
798 dev->mtu = max_t(int, dev->mtu - len, 68);
800 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
801 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
802 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
803 dev->features |= NETIF_F_GSO_SOFTWARE;
804 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
806 dev->features &= ~NETIF_F_GSO_SOFTWARE;
807 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
809 dev->features |= NETIF_F_LLTX;
811 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
812 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
816 static int ipgre_tunnel_ioctl(struct net_device *dev,
817 struct ifreq *ifr, int cmd)
819 struct ip_tunnel_parm p;
822 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
825 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
826 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
827 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
828 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
832 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
833 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
835 err = ip_tunnel_ioctl(dev, &p, cmd);
839 if (cmd == SIOCCHGTUNNEL) {
840 struct ip_tunnel *t = netdev_priv(dev);
842 t->parms.i_flags = p.i_flags;
843 t->parms.o_flags = p.o_flags;
845 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
846 ipgre_link_update(dev, true);
849 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
850 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
852 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
858 /* Nice toy. Unfortunately, useless in real life :-)
859 It allows to construct virtual multiprotocol broadcast "LAN"
860 over the Internet, provided multicast routing is tuned.
863 I have no idea was this bicycle invented before me,
864 so that I had to set ARPHRD_IPGRE to a random value.
865 I have an impression, that Cisco could make something similar,
866 but this feature is apparently missing in IOS<=11.2(8).
868 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
869 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
871 ping -t 255 224.66.66.66
873 If nobody answers, mbone does not work.
875 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
876 ip addr add 10.66.66.<somewhat>/24 dev Universe
878 ifconfig Universe add fe80::<Your_real_addr>/10
879 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
882 ftp fec0:6666:6666::193.233.7.65
885 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
887 const void *daddr, const void *saddr, unsigned int len)
889 struct ip_tunnel *t = netdev_priv(dev);
891 struct gre_base_hdr *greh;
893 iph = skb_push(skb, t->hlen + sizeof(*iph));
894 greh = (struct gre_base_hdr *)(iph+1);
895 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
896 greh->protocol = htons(type);
898 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
900 /* Set the source hardware address. */
902 memcpy(&iph->saddr, saddr, 4);
904 memcpy(&iph->daddr, daddr, 4);
906 return t->hlen + sizeof(*iph);
908 return -(t->hlen + sizeof(*iph));
911 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
913 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
914 memcpy(haddr, &iph->saddr, 4);
918 static const struct header_ops ipgre_header_ops = {
919 .create = ipgre_header,
920 .parse = ipgre_header_parse,
923 #ifdef CONFIG_NET_IPGRE_BROADCAST
924 static int ipgre_open(struct net_device *dev)
926 struct ip_tunnel *t = netdev_priv(dev);
928 if (ipv4_is_multicast(t->parms.iph.daddr)) {
932 rt = ip_route_output_gre(t->net, &fl4,
936 RT_TOS(t->parms.iph.tos),
939 return -EADDRNOTAVAIL;
942 if (!__in_dev_get_rtnl(dev))
943 return -EADDRNOTAVAIL;
944 t->mlink = dev->ifindex;
945 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
950 static int ipgre_close(struct net_device *dev)
952 struct ip_tunnel *t = netdev_priv(dev);
954 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
955 struct in_device *in_dev;
956 in_dev = inetdev_by_index(t->net, t->mlink);
958 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
964 static const struct net_device_ops ipgre_netdev_ops = {
965 .ndo_init = ipgre_tunnel_init,
966 .ndo_uninit = ip_tunnel_uninit,
967 #ifdef CONFIG_NET_IPGRE_BROADCAST
968 .ndo_open = ipgre_open,
969 .ndo_stop = ipgre_close,
971 .ndo_start_xmit = ipgre_xmit,
972 .ndo_do_ioctl = ipgre_tunnel_ioctl,
973 .ndo_change_mtu = ip_tunnel_change_mtu,
974 .ndo_get_stats64 = ip_tunnel_get_stats64,
975 .ndo_get_iflink = ip_tunnel_get_iflink,
978 #define GRE_FEATURES (NETIF_F_SG | \
983 static void ipgre_tunnel_setup(struct net_device *dev)
985 dev->netdev_ops = &ipgre_netdev_ops;
986 dev->type = ARPHRD_IPGRE;
987 ip_tunnel_setup(dev, ipgre_net_id);
990 static void __gre_tunnel_init(struct net_device *dev)
992 struct ip_tunnel *tunnel;
994 tunnel = netdev_priv(dev);
995 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
996 tunnel->parms.iph.protocol = IPPROTO_GRE;
998 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1000 dev->features |= GRE_FEATURES;
1001 dev->hw_features |= GRE_FEATURES;
1003 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
1004 /* TCP offload with GRE SEQ is not supported, nor
1005 * can we support 2 levels of outer headers requiring
1008 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
1009 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
1010 dev->features |= NETIF_F_GSO_SOFTWARE;
1011 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1014 /* Can use a lockless transmit, unless we generate
1017 dev->features |= NETIF_F_LLTX;
1021 static int ipgre_tunnel_init(struct net_device *dev)
1023 struct ip_tunnel *tunnel = netdev_priv(dev);
1024 struct iphdr *iph = &tunnel->parms.iph;
1026 __gre_tunnel_init(dev);
1028 memcpy(dev->dev_addr, &iph->saddr, 4);
1029 memcpy(dev->broadcast, &iph->daddr, 4);
1031 dev->flags = IFF_NOARP;
1032 netif_keep_dst(dev);
1035 if (iph->daddr && !tunnel->collect_md) {
1036 #ifdef CONFIG_NET_IPGRE_BROADCAST
1037 if (ipv4_is_multicast(iph->daddr)) {
1040 dev->flags = IFF_BROADCAST;
1041 dev->header_ops = &ipgre_header_ops;
1044 } else if (!tunnel->collect_md) {
1045 dev->header_ops = &ipgre_header_ops;
1048 return ip_tunnel_init(dev);
1051 static const struct gre_protocol ipgre_protocol = {
1053 .err_handler = gre_err,
1056 static int __net_init ipgre_init_net(struct net *net)
1058 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1061 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1063 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1066 static struct pernet_operations ipgre_net_ops = {
1067 .init = ipgre_init_net,
1068 .exit_batch = ipgre_exit_batch_net,
1069 .id = &ipgre_net_id,
1070 .size = sizeof(struct ip_tunnel_net),
1073 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1074 struct netlink_ext_ack *extack)
1082 if (data[IFLA_GRE_IFLAGS])
1083 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1084 if (data[IFLA_GRE_OFLAGS])
1085 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1086 if (flags & (GRE_VERSION|GRE_ROUTING))
1089 if (data[IFLA_GRE_COLLECT_METADATA] &&
1090 data[IFLA_GRE_ENCAP_TYPE] &&
1091 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1097 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1098 struct netlink_ext_ack *extack)
1102 if (tb[IFLA_ADDRESS]) {
1103 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1105 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1106 return -EADDRNOTAVAIL;
1112 if (data[IFLA_GRE_REMOTE]) {
1113 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1119 return ipgre_tunnel_validate(tb, data, extack);
1122 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1123 struct netlink_ext_ack *extack)
1131 ret = ipgre_tap_validate(tb, data, extack);
1135 /* ERSPAN should only have GRE sequence and key flag */
1136 if (data[IFLA_GRE_OFLAGS])
1137 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1138 if (data[IFLA_GRE_IFLAGS])
1139 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1140 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1141 flags != (GRE_SEQ | GRE_KEY))
1144 /* ERSPAN Session ID only has 10-bit. Since we reuse
1145 * 32-bit key field as ID, check it's range.
1147 if (data[IFLA_GRE_IKEY] &&
1148 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1151 if (data[IFLA_GRE_OKEY] &&
1152 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1158 static int ipgre_netlink_parms(struct net_device *dev,
1159 struct nlattr *data[],
1160 struct nlattr *tb[],
1161 struct ip_tunnel_parm *parms,
1164 struct ip_tunnel *t = netdev_priv(dev);
1166 memset(parms, 0, sizeof(*parms));
1168 parms->iph.protocol = IPPROTO_GRE;
1173 if (data[IFLA_GRE_LINK])
1174 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1176 if (data[IFLA_GRE_IFLAGS])
1177 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1179 if (data[IFLA_GRE_OFLAGS])
1180 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1182 if (data[IFLA_GRE_IKEY])
1183 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1185 if (data[IFLA_GRE_OKEY])
1186 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1188 if (data[IFLA_GRE_LOCAL])
1189 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1191 if (data[IFLA_GRE_REMOTE])
1192 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1194 if (data[IFLA_GRE_TTL])
1195 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1197 if (data[IFLA_GRE_TOS])
1198 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1200 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1203 parms->iph.frag_off = htons(IP_DF);
1206 if (data[IFLA_GRE_COLLECT_METADATA]) {
1207 t->collect_md = true;
1208 if (dev->type == ARPHRD_IPGRE)
1209 dev->type = ARPHRD_NONE;
1212 if (data[IFLA_GRE_IGNORE_DF]) {
1213 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1214 && (parms->iph.frag_off & htons(IP_DF)))
1216 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1219 if (data[IFLA_GRE_FWMARK])
1220 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1222 if (data[IFLA_GRE_ERSPAN_VER]) {
1223 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1225 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1229 if (t->erspan_ver == 1) {
1230 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1231 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1232 if (t->index & ~INDEX_MASK)
1235 } else if (t->erspan_ver == 2) {
1236 if (data[IFLA_GRE_ERSPAN_DIR]) {
1237 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1238 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1241 if (data[IFLA_GRE_ERSPAN_HWID]) {
1242 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1243 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1251 /* This function returns true when ENCAP attributes are present in the nl msg */
1252 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1253 struct ip_tunnel_encap *ipencap)
1257 memset(ipencap, 0, sizeof(*ipencap));
1262 if (data[IFLA_GRE_ENCAP_TYPE]) {
1264 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1267 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1269 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1272 if (data[IFLA_GRE_ENCAP_SPORT]) {
1274 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1277 if (data[IFLA_GRE_ENCAP_DPORT]) {
1279 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1285 static int gre_tap_init(struct net_device *dev)
1287 __gre_tunnel_init(dev);
1288 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1289 netif_keep_dst(dev);
1291 return ip_tunnel_init(dev);
1294 static const struct net_device_ops gre_tap_netdev_ops = {
1295 .ndo_init = gre_tap_init,
1296 .ndo_uninit = ip_tunnel_uninit,
1297 .ndo_start_xmit = gre_tap_xmit,
1298 .ndo_set_mac_address = eth_mac_addr,
1299 .ndo_validate_addr = eth_validate_addr,
1300 .ndo_change_mtu = ip_tunnel_change_mtu,
1301 .ndo_get_stats64 = ip_tunnel_get_stats64,
1302 .ndo_get_iflink = ip_tunnel_get_iflink,
1303 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1306 static int erspan_tunnel_init(struct net_device *dev)
1308 struct ip_tunnel *tunnel = netdev_priv(dev);
1310 tunnel->tun_hlen = 8;
1311 tunnel->parms.iph.protocol = IPPROTO_GRE;
1312 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1313 erspan_hdr_len(tunnel->erspan_ver);
1315 dev->features |= GRE_FEATURES;
1316 dev->hw_features |= GRE_FEATURES;
1317 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1318 netif_keep_dst(dev);
1320 return ip_tunnel_init(dev);
1323 static const struct net_device_ops erspan_netdev_ops = {
1324 .ndo_init = erspan_tunnel_init,
1325 .ndo_uninit = ip_tunnel_uninit,
1326 .ndo_start_xmit = erspan_xmit,
1327 .ndo_set_mac_address = eth_mac_addr,
1328 .ndo_validate_addr = eth_validate_addr,
1329 .ndo_change_mtu = ip_tunnel_change_mtu,
1330 .ndo_get_stats64 = ip_tunnel_get_stats64,
1331 .ndo_get_iflink = ip_tunnel_get_iflink,
1332 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1335 static void ipgre_tap_setup(struct net_device *dev)
1339 dev->netdev_ops = &gre_tap_netdev_ops;
1340 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1341 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1342 ip_tunnel_setup(dev, gre_tap_net_id);
1345 bool is_gretap_dev(const struct net_device *dev)
1347 return dev->netdev_ops == &gre_tap_netdev_ops;
1349 EXPORT_SYMBOL_GPL(is_gretap_dev);
1351 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1352 struct nlattr *tb[], struct nlattr *data[],
1353 struct netlink_ext_ack *extack)
1355 struct ip_tunnel_parm p;
1356 struct ip_tunnel_encap ipencap;
1360 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1361 struct ip_tunnel *t = netdev_priv(dev);
1362 err = ip_tunnel_encap_setup(t, &ipencap);
1368 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1371 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1374 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1375 struct nlattr *data[],
1376 struct netlink_ext_ack *extack)
1378 struct ip_tunnel *t = netdev_priv(dev);
1379 struct ip_tunnel_encap ipencap;
1380 __u32 fwmark = t->fwmark;
1381 struct ip_tunnel_parm p;
1384 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1385 err = ip_tunnel_encap_setup(t, &ipencap);
1391 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1395 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1399 t->parms.i_flags = p.i_flags;
1400 t->parms.o_flags = p.o_flags;
1402 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1403 ipgre_link_update(dev, !tb[IFLA_MTU]);
1408 static size_t ipgre_get_size(const struct net_device *dev)
1413 /* IFLA_GRE_IFLAGS */
1415 /* IFLA_GRE_OFLAGS */
1421 /* IFLA_GRE_LOCAL */
1423 /* IFLA_GRE_REMOTE */
1429 /* IFLA_GRE_PMTUDISC */
1431 /* IFLA_GRE_ENCAP_TYPE */
1433 /* IFLA_GRE_ENCAP_FLAGS */
1435 /* IFLA_GRE_ENCAP_SPORT */
1437 /* IFLA_GRE_ENCAP_DPORT */
1439 /* IFLA_GRE_COLLECT_METADATA */
1441 /* IFLA_GRE_IGNORE_DF */
1443 /* IFLA_GRE_FWMARK */
1445 /* IFLA_GRE_ERSPAN_INDEX */
1447 /* IFLA_GRE_ERSPAN_VER */
1449 /* IFLA_GRE_ERSPAN_DIR */
1451 /* IFLA_GRE_ERSPAN_HWID */
1456 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1458 struct ip_tunnel *t = netdev_priv(dev);
1459 struct ip_tunnel_parm *p = &t->parms;
1461 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1462 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1463 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1464 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1465 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1466 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1467 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1468 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1469 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1470 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1471 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1472 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1473 !!(p->iph.frag_off & htons(IP_DF))) ||
1474 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1475 goto nla_put_failure;
1477 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1479 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1481 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1483 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1485 goto nla_put_failure;
1487 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1488 goto nla_put_failure;
1490 if (t->collect_md) {
1491 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1492 goto nla_put_failure;
1495 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1496 goto nla_put_failure;
1498 if (t->erspan_ver == 1) {
1499 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1500 goto nla_put_failure;
1501 } else if (t->erspan_ver == 2) {
1502 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1503 goto nla_put_failure;
1504 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1505 goto nla_put_failure;
1514 static void erspan_setup(struct net_device *dev)
1516 struct ip_tunnel *t = netdev_priv(dev);
1519 dev->netdev_ops = &erspan_netdev_ops;
1520 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1521 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1522 ip_tunnel_setup(dev, erspan_net_id);
1526 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1527 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1528 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1529 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1530 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1531 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1532 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1533 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1534 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1535 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1536 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1537 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1538 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1539 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1540 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1541 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1542 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1543 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1544 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1545 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1546 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1547 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1550 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1552 .maxtype = IFLA_GRE_MAX,
1553 .policy = ipgre_policy,
1554 .priv_size = sizeof(struct ip_tunnel),
1555 .setup = ipgre_tunnel_setup,
1556 .validate = ipgre_tunnel_validate,
1557 .newlink = ipgre_newlink,
1558 .changelink = ipgre_changelink,
1559 .dellink = ip_tunnel_dellink,
1560 .get_size = ipgre_get_size,
1561 .fill_info = ipgre_fill_info,
1562 .get_link_net = ip_tunnel_get_link_net,
1565 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1567 .maxtype = IFLA_GRE_MAX,
1568 .policy = ipgre_policy,
1569 .priv_size = sizeof(struct ip_tunnel),
1570 .setup = ipgre_tap_setup,
1571 .validate = ipgre_tap_validate,
1572 .newlink = ipgre_newlink,
1573 .changelink = ipgre_changelink,
1574 .dellink = ip_tunnel_dellink,
1575 .get_size = ipgre_get_size,
1576 .fill_info = ipgre_fill_info,
1577 .get_link_net = ip_tunnel_get_link_net,
1580 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1582 .maxtype = IFLA_GRE_MAX,
1583 .policy = ipgre_policy,
1584 .priv_size = sizeof(struct ip_tunnel),
1585 .setup = erspan_setup,
1586 .validate = erspan_validate,
1587 .newlink = ipgre_newlink,
1588 .changelink = ipgre_changelink,
1589 .dellink = ip_tunnel_dellink,
1590 .get_size = ipgre_get_size,
1591 .fill_info = ipgre_fill_info,
1592 .get_link_net = ip_tunnel_get_link_net,
1595 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1596 u8 name_assign_type)
1598 struct nlattr *tb[IFLA_MAX + 1];
1599 struct net_device *dev;
1600 LIST_HEAD(list_kill);
1601 struct ip_tunnel *t;
1604 memset(&tb, 0, sizeof(tb));
1606 dev = rtnl_create_link(net, name, name_assign_type,
1607 &ipgre_tap_ops, tb);
1611 /* Configure flow based GRE device. */
1612 t = netdev_priv(dev);
1613 t->collect_md = true;
1615 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1618 return ERR_PTR(err);
1621 /* openvswitch users expect packet sizes to be unrestricted,
1622 * so set the largest MTU we can.
1624 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1628 err = rtnl_configure_link(dev, NULL);
1634 ip_tunnel_dellink(dev, &list_kill);
1635 unregister_netdevice_many(&list_kill);
1636 return ERR_PTR(err);
1638 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1640 static int __net_init ipgre_tap_init_net(struct net *net)
1642 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1645 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1647 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1650 static struct pernet_operations ipgre_tap_net_ops = {
1651 .init = ipgre_tap_init_net,
1652 .exit_batch = ipgre_tap_exit_batch_net,
1653 .id = &gre_tap_net_id,
1654 .size = sizeof(struct ip_tunnel_net),
1657 static int __net_init erspan_init_net(struct net *net)
1659 return ip_tunnel_init_net(net, erspan_net_id,
1660 &erspan_link_ops, "erspan0");
1663 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1665 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1668 static struct pernet_operations erspan_net_ops = {
1669 .init = erspan_init_net,
1670 .exit_batch = erspan_exit_batch_net,
1671 .id = &erspan_net_id,
1672 .size = sizeof(struct ip_tunnel_net),
1675 static int __init ipgre_init(void)
1679 pr_info("GRE over IPv4 tunneling driver\n");
1681 err = register_pernet_device(&ipgre_net_ops);
1685 err = register_pernet_device(&ipgre_tap_net_ops);
1687 goto pnet_tap_failed;
1689 err = register_pernet_device(&erspan_net_ops);
1691 goto pnet_erspan_failed;
1693 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1695 pr_info("%s: can't add protocol\n", __func__);
1696 goto add_proto_failed;
1699 err = rtnl_link_register(&ipgre_link_ops);
1701 goto rtnl_link_failed;
1703 err = rtnl_link_register(&ipgre_tap_ops);
1705 goto tap_ops_failed;
1707 err = rtnl_link_register(&erspan_link_ops);
1709 goto erspan_link_failed;
1714 rtnl_link_unregister(&ipgre_tap_ops);
1716 rtnl_link_unregister(&ipgre_link_ops);
1718 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1720 unregister_pernet_device(&erspan_net_ops);
1722 unregister_pernet_device(&ipgre_tap_net_ops);
1724 unregister_pernet_device(&ipgre_net_ops);
1728 static void __exit ipgre_fini(void)
1730 rtnl_link_unregister(&ipgre_tap_ops);
1731 rtnl_link_unregister(&ipgre_link_ops);
1732 rtnl_link_unregister(&erspan_link_ops);
1733 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1734 unregister_pernet_device(&ipgre_tap_net_ops);
1735 unregister_pernet_device(&ipgre_net_ops);
1736 unregister_pernet_device(&erspan_net_ops);
1739 module_init(ipgre_init);
1740 module_exit(ipgre_fini);
1741 MODULE_LICENSE("GPL");
1742 MODULE_ALIAS_RTNL_LINK("gre");
1743 MODULE_ALIAS_RTNL_LINK("gretap");
1744 MODULE_ALIAS_RTNL_LINK("erspan");
1745 MODULE_ALIAS_NETDEV("gre0");
1746 MODULE_ALIAS_NETDEV("gretap0");
1747 MODULE_ALIAS_NETDEV("erspan0");