2 * Linux NET3: GRE over IP protocol decoder.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/capability.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/skbuff.h>
22 #include <linux/netdevice.h>
24 #include <linux/tcp.h>
25 #include <linux/udp.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/in6.h>
30 #include <linux/inetdevice.h>
31 #include <linux/igmp.h>
32 #include <linux/netfilter_ipv4.h>
33 #include <linux/etherdevice.h>
34 #include <linux/if_ether.h>
39 #include <net/protocol.h>
40 #include <net/ip_tunnels.h>
42 #include <net/checksum.h>
43 #include <net/dsfield.h>
44 #include <net/inet_ecn.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/rtnetlink.h>
50 #include <net/dst_metadata.h>
51 #include <net/erspan.h>
57 1. The most important issue is detecting local dead loops.
58 They would cause complete host lockup in transmit, which
59 would be "resolved" by stack overflow or, if queueing is enabled,
60 with infinite looping in net_bh.
62 We cannot track such dead loops during route installation,
63 it is infeasible task. The most general solutions would be
64 to keep skb->encapsulation counter (sort of local ttl),
65 and silently drop packet when it expires. It is a good
66 solution, but it supposes maintaining new variable in ALL
67 skb, even if no tunneling is used.
69 Current solution: xmit_recursion breaks dead loops. This is a percpu
70 counter, since when we enter the first ndo_xmit(), cpu migration is
71 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
73 2. Networking dead loops would not kill routers, but would really
74 kill network. IP hop limit plays role of "t->recursion" in this case,
75 if we copy it from packet being encapsulated to upper header.
76 It is very good solution, but it introduces two problems:
78 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
79 do not work over tunnels.
80 - traceroute does not work. I planned to relay ICMP from tunnel,
81 so that this problem would be solved and traceroute output
82 would even more informative. This idea appeared to be wrong:
83 only Linux complies to rfc1812 now (yes, guys, Linux is the only
84 true router now :-)), all routers (at least, in neighbourhood of mine)
85 return only 8 bytes of payload. It is the end.
87 Hence, if we want that OSPF worked or traceroute said something reasonable,
88 we should search for another solution.
90 One of them is to parse packet trying to detect inner encapsulation
91 made by our node. It is difficult or even impossible, especially,
92 taking into account fragmentation. TO be short, ttl is not solution at all.
94 Current solution: The solution was UNEXPECTEDLY SIMPLE.
95 We force DF flag on tunnels with preconfigured hop limit,
96 that is ALL. :-) Well, it does not remove the problem completely,
97 but exponential growth of network traffic is changed to linear
98 (branches, that exceed pmtu are pruned) and tunnel mtu
99 rapidly degrades to value <68, where looping stops.
100 Yes, it is not good if there exists a router in the loop,
101 which does not force DF, even when encapsulating packets have DF set.
102 But it is not our problem! Nobody could accuse us, we made
103 all that we could make. Even if it is your gated who injected
104 fatal route to network, even if it were you who configured
105 fatal static route: you are innocent. :-)
110 static bool log_ecn_error = true;
111 module_param(log_ecn_error, bool, 0644);
112 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
114 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
115 static int ipgre_tunnel_init(struct net_device *dev);
116 static void erspan_build_header(struct sk_buff *skb,
118 bool truncate, bool is_ipv4);
120 static unsigned int ipgre_net_id __read_mostly;
121 static unsigned int gre_tap_net_id __read_mostly;
122 static unsigned int erspan_net_id __read_mostly;
124 static void ipgre_err(struct sk_buff *skb, u32 info,
125 const struct tnl_ptk_info *tpi)
128 /* All the routers (except for Linux) return only
129 8 bytes of packet payload. It means, that precise relaying of
130 ICMP in the real Internet is absolutely infeasible.
132 Moreover, Cisco "wise men" put GRE key to the third word
133 in GRE header. It makes impossible maintaining even soft
134 state for keyed GRE tunnels with enabled checksum. Tell
137 Well, I wonder, rfc1812 was written by Cisco employee,
138 what the hell these idiots break standards established
141 struct net *net = dev_net(skb->dev);
142 struct ip_tunnel_net *itn;
143 const struct iphdr *iph;
144 const int type = icmp_hdr(skb)->type;
145 const int code = icmp_hdr(skb)->code;
146 unsigned int data_len = 0;
151 case ICMP_PARAMETERPROB:
154 case ICMP_DEST_UNREACH:
157 case ICMP_PORT_UNREACH:
158 /* Impossible event. */
161 /* All others are translated to HOST_UNREACH.
162 rfc2003 contains "deep thoughts" about NET_UNREACH,
163 I believe they are just ether pollution. --ANK
169 case ICMP_TIME_EXCEEDED:
170 if (code != ICMP_EXC_TTL)
172 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
179 if (tpi->proto == htons(ETH_P_TEB))
180 itn = net_generic(net, gre_tap_net_id);
182 itn = net_generic(net, ipgre_net_id);
184 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
185 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
186 iph->daddr, iph->saddr, tpi->key);
191 #if IS_ENABLED(CONFIG_IPV6)
192 if (tpi->proto == htons(ETH_P_IPV6) &&
193 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
198 if (t->parms.iph.daddr == 0 ||
199 ipv4_is_multicast(t->parms.iph.daddr))
202 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
205 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
209 t->err_time = jiffies;
212 static void gre_err(struct sk_buff *skb, u32 info)
214 /* All the routers (except for Linux) return only
215 * 8 bytes of packet payload. It means, that precise relaying of
216 * ICMP in the real Internet is absolutely infeasible.
218 * Moreover, Cisco "wise men" put GRE key to the third word
219 * in GRE header. It makes impossible maintaining even soft
221 * GRE tunnels with enabled checksum. Tell them "thank you".
223 * Well, I wonder, rfc1812 was written by Cisco employee,
224 * what the hell these idiots break standards established
228 const struct iphdr *iph = (struct iphdr *)skb->data;
229 const int type = icmp_hdr(skb)->type;
230 const int code = icmp_hdr(skb)->code;
231 struct tnl_ptk_info tpi;
232 bool csum_err = false;
234 if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP),
236 if (!csum_err) /* ignore csum errors. */
240 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
241 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
242 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
245 if (type == ICMP_REDIRECT) {
246 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
251 ipgre_err(skb, info, &tpi);
254 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
257 struct net *net = dev_net(skb->dev);
258 struct metadata_dst *tun_dst = NULL;
259 struct erspan_base_hdr *ershdr;
260 struct erspan_metadata *pkt_md;
261 struct ip_tunnel_net *itn;
262 struct ip_tunnel *tunnel;
263 const struct iphdr *iph;
264 struct erspan_md2 *md2;
268 itn = net_generic(net, erspan_net_id);
269 len = gre_hdr_len + sizeof(*ershdr);
271 /* Check based hdr len */
272 if (unlikely(!pskb_may_pull(skb, len)))
273 return PACKET_REJECT;
276 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
279 /* The original GRE header does not have key field,
280 * Use ERSPAN 10-bit session ID as key.
282 tpi->key = cpu_to_be32(get_session_id(ershdr));
283 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
284 tpi->flags | TUNNEL_KEY,
285 iph->saddr, iph->daddr, tpi->key);
288 len = gre_hdr_len + erspan_hdr_len(ver);
289 if (unlikely(!pskb_may_pull(skb, len)))
290 return PACKET_REJECT;
292 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
293 pkt_md = (struct erspan_metadata *)(ershdr + 1);
295 if (__iptunnel_pull_header(skb,
301 if (tunnel->collect_md) {
302 struct ip_tunnel_info *info;
303 struct erspan_metadata *md;
307 tpi->flags |= TUNNEL_KEY;
309 tun_id = key32_to_tunnel_id(tpi->key);
311 tun_dst = ip_tun_rx_dst(skb, flags,
312 tun_id, sizeof(*md));
314 return PACKET_REJECT;
316 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
319 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
322 info = &tun_dst->u.tun_info;
323 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
324 info->options_len = sizeof(*md);
327 skb_reset_mac_header(skb);
328 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
336 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
337 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
339 struct metadata_dst *tun_dst = NULL;
340 const struct iphdr *iph;
341 struct ip_tunnel *tunnel;
344 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
345 iph->saddr, iph->daddr, tpi->key);
348 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
349 raw_proto, false) < 0)
352 if (tunnel->dev->type != ARPHRD_NONE)
353 skb_pop_mac_header(skb);
355 skb_reset_mac_header(skb);
356 if (tunnel->collect_md) {
360 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
361 tun_id = key32_to_tunnel_id(tpi->key);
362 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
364 return PACKET_REJECT;
367 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
377 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
380 struct net *net = dev_net(skb->dev);
381 struct ip_tunnel_net *itn;
384 if (tpi->proto == htons(ETH_P_TEB))
385 itn = net_generic(net, gre_tap_net_id);
387 itn = net_generic(net, ipgre_net_id);
389 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
390 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
391 /* ipgre tunnels in collect metadata mode should receive
392 * also ETH_P_TEB traffic.
394 itn = net_generic(net, ipgre_net_id);
395 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
400 static int gre_rcv(struct sk_buff *skb)
402 struct tnl_ptk_info tpi;
403 bool csum_err = false;
406 #ifdef CONFIG_NET_IPGRE_BROADCAST
407 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
408 /* Looped back packet, drop it! */
409 if (rt_is_output_route(skb_rtable(skb)))
414 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
418 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
419 tpi.proto == htons(ETH_P_ERSPAN2))) {
420 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
425 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
429 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
435 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
436 const struct iphdr *tnl_params,
439 struct ip_tunnel *tunnel = netdev_priv(dev);
441 if (tunnel->parms.o_flags & TUNNEL_SEQ)
444 /* Push GRE header. */
445 gre_build_header(skb, tunnel->tun_hlen,
446 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
447 htonl(tunnel->o_seqno));
449 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
452 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
454 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
457 static struct rtable *gre_get_rt(struct sk_buff *skb,
458 struct net_device *dev,
460 const struct ip_tunnel_key *key)
462 struct net *net = dev_net(dev);
464 memset(fl, 0, sizeof(*fl));
465 fl->daddr = key->u.ipv4.dst;
466 fl->saddr = key->u.ipv4.src;
467 fl->flowi4_tos = RT_TOS(key->tos);
468 fl->flowi4_mark = skb->mark;
469 fl->flowi4_proto = IPPROTO_GRE;
471 return ip_route_output_key(net, fl);
474 static struct rtable *prepare_fb_xmit(struct sk_buff *skb,
475 struct net_device *dev,
479 struct ip_tunnel_info *tun_info;
480 const struct ip_tunnel_key *key;
481 struct rtable *rt = NULL;
486 tun_info = skb_tunnel_info(skb);
487 key = &tun_info->key;
488 use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
491 rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl->saddr);
493 rt = gre_get_rt(skb, dev, fl, key);
497 dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst,
501 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
502 + tunnel_hlen + sizeof(struct iphdr);
503 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
504 int head_delta = SKB_DATA_ALIGN(min_headroom -
507 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
518 dev->stats.tx_dropped++;
522 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
525 struct ip_tunnel *tunnel = netdev_priv(dev);
526 struct ip_tunnel_info *tun_info;
527 const struct ip_tunnel_key *key;
528 struct rtable *rt = NULL;
533 tun_info = skb_tunnel_info(skb);
534 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
535 ip_tunnel_info_af(tun_info) != AF_INET))
538 key = &tun_info->key;
539 tunnel_hlen = gre_calc_hlen(key->tun_flags);
541 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
545 /* Push Tunnel header. */
546 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
549 flags = tun_info->key.tun_flags &
550 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
551 gre_build_header(skb, tunnel_hlen, flags, proto,
552 tunnel_id_to_key32(tun_info->key.tun_id),
553 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
555 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
557 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
558 key->tos, key->ttl, df, false);
565 dev->stats.tx_dropped++;
568 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev,
571 struct ip_tunnel *tunnel = netdev_priv(dev);
572 struct ip_tunnel_info *tun_info;
573 const struct ip_tunnel_key *key;
574 struct erspan_metadata *md;
575 struct rtable *rt = NULL;
576 bool truncate = false;
583 tun_info = skb_tunnel_info(skb);
584 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
585 ip_tunnel_info_af(tun_info) != AF_INET))
588 key = &tun_info->key;
589 md = ip_tunnel_info_opts(tun_info);
593 /* ERSPAN has fixed 8 byte GRE header */
594 version = md->version;
595 tunnel_hlen = 8 + erspan_hdr_len(version);
597 rt = prepare_fb_xmit(skb, dev, &fl, tunnel_hlen);
601 if (gre_handle_offloads(skb, false))
604 if (skb->len > dev->mtu + dev->hard_header_len) {
605 pskb_trim(skb, dev->mtu + dev->hard_header_len);
609 nhoff = skb_network_header(skb) - skb_mac_header(skb);
610 if (skb->protocol == htons(ETH_P_IP) &&
611 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
615 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
616 ntohl(md->u.index), truncate, true);
617 } else if (version == 2) {
618 erspan_build_header_v2(skb,
619 ntohl(tunnel_id_to_key32(key->tun_id)),
621 get_hwid(&md->u.md2),
627 gre_build_header(skb, 8, TUNNEL_SEQ,
628 htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++));
630 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
632 iptunnel_xmit(skb->sk, rt, skb, fl.saddr, key->u.ipv4.dst, IPPROTO_GRE,
633 key->tos, key->ttl, df, false);
640 dev->stats.tx_dropped++;
643 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
645 struct ip_tunnel_info *info = skb_tunnel_info(skb);
649 if (ip_tunnel_info_af(info) != AF_INET)
652 rt = gre_get_rt(skb, dev, &fl4, &info->key);
657 info->key.u.ipv4.src = fl4.saddr;
661 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
662 struct net_device *dev)
664 struct ip_tunnel *tunnel = netdev_priv(dev);
665 const struct iphdr *tnl_params;
667 if (tunnel->collect_md) {
668 gre_fb_xmit(skb, dev, skb->protocol);
672 if (dev->header_ops) {
673 /* Need space for new headers */
674 if (skb_cow_head(skb, dev->needed_headroom -
675 (tunnel->hlen + sizeof(struct iphdr))))
678 tnl_params = (const struct iphdr *)skb->data;
680 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
683 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
684 skb_reset_mac_header(skb);
686 if (skb_cow_head(skb, dev->needed_headroom))
689 tnl_params = &tunnel->parms.iph;
692 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
695 __gre_xmit(skb, dev, tnl_params, skb->protocol);
700 dev->stats.tx_dropped++;
704 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
705 struct net_device *dev)
707 struct ip_tunnel *tunnel = netdev_priv(dev);
708 bool truncate = false;
710 if (tunnel->collect_md) {
711 erspan_fb_xmit(skb, dev, skb->protocol);
715 if (gre_handle_offloads(skb, false))
718 if (skb_cow_head(skb, dev->needed_headroom))
721 if (skb->len > dev->mtu + dev->hard_header_len) {
722 pskb_trim(skb, dev->mtu + dev->hard_header_len);
726 /* Push ERSPAN header */
727 if (tunnel->erspan_ver == 1)
728 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
732 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
733 tunnel->dir, tunnel->hwid,
736 tunnel->parms.o_flags &= ~TUNNEL_KEY;
737 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN));
742 dev->stats.tx_dropped++;
746 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
747 struct net_device *dev)
749 struct ip_tunnel *tunnel = netdev_priv(dev);
751 if (tunnel->collect_md) {
752 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
756 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
759 if (skb_cow_head(skb, dev->needed_headroom))
762 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
767 dev->stats.tx_dropped++;
771 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
773 struct ip_tunnel *tunnel = netdev_priv(dev);
776 len = tunnel->tun_hlen;
777 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
778 len = tunnel->tun_hlen - len;
779 tunnel->hlen = tunnel->hlen + len;
781 dev->needed_headroom = dev->needed_headroom + len;
783 dev->mtu = max_t(int, dev->mtu - len, 68);
785 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
786 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
787 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
788 dev->features |= NETIF_F_GSO_SOFTWARE;
789 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
791 dev->features &= ~NETIF_F_GSO_SOFTWARE;
792 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
794 dev->features |= NETIF_F_LLTX;
796 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
797 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
801 static int ipgre_tunnel_ioctl(struct net_device *dev,
802 struct ifreq *ifr, int cmd)
804 struct ip_tunnel_parm p;
807 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
810 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
811 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
812 p.iph.ihl != 5 || (p.iph.frag_off & htons(~IP_DF)) ||
813 ((p.i_flags | p.o_flags) & (GRE_VERSION | GRE_ROUTING)))
817 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
818 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
820 err = ip_tunnel_ioctl(dev, &p, cmd);
824 if (cmd == SIOCCHGTUNNEL) {
825 struct ip_tunnel *t = netdev_priv(dev);
827 t->parms.i_flags = p.i_flags;
828 t->parms.o_flags = p.o_flags;
830 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
831 ipgre_link_update(dev, true);
834 p.i_flags = gre_tnl_flags_to_gre_flags(p.i_flags);
835 p.o_flags = gre_tnl_flags_to_gre_flags(p.o_flags);
837 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
843 /* Nice toy. Unfortunately, useless in real life :-)
844 It allows to construct virtual multiprotocol broadcast "LAN"
845 over the Internet, provided multicast routing is tuned.
848 I have no idea was this bicycle invented before me,
849 so that I had to set ARPHRD_IPGRE to a random value.
850 I have an impression, that Cisco could make something similar,
851 but this feature is apparently missing in IOS<=11.2(8).
853 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
854 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
856 ping -t 255 224.66.66.66
858 If nobody answers, mbone does not work.
860 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
861 ip addr add 10.66.66.<somewhat>/24 dev Universe
863 ifconfig Universe add fe80::<Your_real_addr>/10
864 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
867 ftp fec0:6666:6666::193.233.7.65
870 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
872 const void *daddr, const void *saddr, unsigned int len)
874 struct ip_tunnel *t = netdev_priv(dev);
876 struct gre_base_hdr *greh;
878 iph = skb_push(skb, t->hlen + sizeof(*iph));
879 greh = (struct gre_base_hdr *)(iph+1);
880 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
881 greh->protocol = htons(type);
883 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
885 /* Set the source hardware address. */
887 memcpy(&iph->saddr, saddr, 4);
889 memcpy(&iph->daddr, daddr, 4);
891 return t->hlen + sizeof(*iph);
893 return -(t->hlen + sizeof(*iph));
896 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
898 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
899 memcpy(haddr, &iph->saddr, 4);
903 static const struct header_ops ipgre_header_ops = {
904 .create = ipgre_header,
905 .parse = ipgre_header_parse,
908 #ifdef CONFIG_NET_IPGRE_BROADCAST
909 static int ipgre_open(struct net_device *dev)
911 struct ip_tunnel *t = netdev_priv(dev);
913 if (ipv4_is_multicast(t->parms.iph.daddr)) {
917 rt = ip_route_output_gre(t->net, &fl4,
921 RT_TOS(t->parms.iph.tos),
924 return -EADDRNOTAVAIL;
927 if (!__in_dev_get_rtnl(dev))
928 return -EADDRNOTAVAIL;
929 t->mlink = dev->ifindex;
930 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
935 static int ipgre_close(struct net_device *dev)
937 struct ip_tunnel *t = netdev_priv(dev);
939 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
940 struct in_device *in_dev;
941 in_dev = inetdev_by_index(t->net, t->mlink);
943 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
949 static const struct net_device_ops ipgre_netdev_ops = {
950 .ndo_init = ipgre_tunnel_init,
951 .ndo_uninit = ip_tunnel_uninit,
952 #ifdef CONFIG_NET_IPGRE_BROADCAST
953 .ndo_open = ipgre_open,
954 .ndo_stop = ipgre_close,
956 .ndo_start_xmit = ipgre_xmit,
957 .ndo_do_ioctl = ipgre_tunnel_ioctl,
958 .ndo_change_mtu = ip_tunnel_change_mtu,
959 .ndo_get_stats64 = ip_tunnel_get_stats64,
960 .ndo_get_iflink = ip_tunnel_get_iflink,
963 #define GRE_FEATURES (NETIF_F_SG | \
968 static void ipgre_tunnel_setup(struct net_device *dev)
970 dev->netdev_ops = &ipgre_netdev_ops;
971 dev->type = ARPHRD_IPGRE;
972 ip_tunnel_setup(dev, ipgre_net_id);
975 static void __gre_tunnel_init(struct net_device *dev)
977 struct ip_tunnel *tunnel;
980 tunnel = netdev_priv(dev);
981 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
982 tunnel->parms.iph.protocol = IPPROTO_GRE;
984 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
986 t_hlen = tunnel->hlen + sizeof(struct iphdr);
988 dev->features |= GRE_FEATURES;
989 dev->hw_features |= GRE_FEATURES;
991 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
992 /* TCP offload with GRE SEQ is not supported, nor
993 * can we support 2 levels of outer headers requiring
996 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
997 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
998 dev->features |= NETIF_F_GSO_SOFTWARE;
999 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1002 /* Can use a lockless transmit, unless we generate
1005 dev->features |= NETIF_F_LLTX;
1009 static int ipgre_tunnel_init(struct net_device *dev)
1011 struct ip_tunnel *tunnel = netdev_priv(dev);
1012 struct iphdr *iph = &tunnel->parms.iph;
1014 __gre_tunnel_init(dev);
1016 memcpy(dev->dev_addr, &iph->saddr, 4);
1017 memcpy(dev->broadcast, &iph->daddr, 4);
1019 dev->flags = IFF_NOARP;
1020 netif_keep_dst(dev);
1023 if (iph->daddr && !tunnel->collect_md) {
1024 #ifdef CONFIG_NET_IPGRE_BROADCAST
1025 if (ipv4_is_multicast(iph->daddr)) {
1028 dev->flags = IFF_BROADCAST;
1029 dev->header_ops = &ipgre_header_ops;
1032 } else if (!tunnel->collect_md) {
1033 dev->header_ops = &ipgre_header_ops;
1036 return ip_tunnel_init(dev);
1039 static const struct gre_protocol ipgre_protocol = {
1041 .err_handler = gre_err,
1044 static int __net_init ipgre_init_net(struct net *net)
1046 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1049 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1051 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1054 static struct pernet_operations ipgre_net_ops = {
1055 .init = ipgre_init_net,
1056 .exit_batch = ipgre_exit_batch_net,
1057 .id = &ipgre_net_id,
1058 .size = sizeof(struct ip_tunnel_net),
1061 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1062 struct netlink_ext_ack *extack)
1070 if (data[IFLA_GRE_IFLAGS])
1071 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1072 if (data[IFLA_GRE_OFLAGS])
1073 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1074 if (flags & (GRE_VERSION|GRE_ROUTING))
1077 if (data[IFLA_GRE_COLLECT_METADATA] &&
1078 data[IFLA_GRE_ENCAP_TYPE] &&
1079 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1085 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1086 struct netlink_ext_ack *extack)
1090 if (tb[IFLA_ADDRESS]) {
1091 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1093 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1094 return -EADDRNOTAVAIL;
1100 if (data[IFLA_GRE_REMOTE]) {
1101 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1107 return ipgre_tunnel_validate(tb, data, extack);
1110 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1111 struct netlink_ext_ack *extack)
1119 ret = ipgre_tap_validate(tb, data, extack);
1123 /* ERSPAN should only have GRE sequence and key flag */
1124 if (data[IFLA_GRE_OFLAGS])
1125 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1126 if (data[IFLA_GRE_IFLAGS])
1127 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1128 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1129 flags != (GRE_SEQ | GRE_KEY))
1132 /* ERSPAN Session ID only has 10-bit. Since we reuse
1133 * 32-bit key field as ID, check it's range.
1135 if (data[IFLA_GRE_IKEY] &&
1136 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1139 if (data[IFLA_GRE_OKEY] &&
1140 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1146 static int ipgre_netlink_parms(struct net_device *dev,
1147 struct nlattr *data[],
1148 struct nlattr *tb[],
1149 struct ip_tunnel_parm *parms,
1152 struct ip_tunnel *t = netdev_priv(dev);
1154 memset(parms, 0, sizeof(*parms));
1156 parms->iph.protocol = IPPROTO_GRE;
1161 if (data[IFLA_GRE_LINK])
1162 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1164 if (data[IFLA_GRE_IFLAGS])
1165 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1167 if (data[IFLA_GRE_OFLAGS])
1168 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1170 if (data[IFLA_GRE_IKEY])
1171 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1173 if (data[IFLA_GRE_OKEY])
1174 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1176 if (data[IFLA_GRE_LOCAL])
1177 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1179 if (data[IFLA_GRE_REMOTE])
1180 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1182 if (data[IFLA_GRE_TTL])
1183 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1185 if (data[IFLA_GRE_TOS])
1186 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1188 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1191 parms->iph.frag_off = htons(IP_DF);
1194 if (data[IFLA_GRE_COLLECT_METADATA]) {
1195 t->collect_md = true;
1196 if (dev->type == ARPHRD_IPGRE)
1197 dev->type = ARPHRD_NONE;
1200 if (data[IFLA_GRE_IGNORE_DF]) {
1201 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1202 && (parms->iph.frag_off & htons(IP_DF)))
1204 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1207 if (data[IFLA_GRE_FWMARK])
1208 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1210 if (data[IFLA_GRE_ERSPAN_VER]) {
1211 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1213 if (t->erspan_ver != 1 && t->erspan_ver != 2)
1217 if (t->erspan_ver == 1) {
1218 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1219 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1220 if (t->index & ~INDEX_MASK)
1223 } else if (t->erspan_ver == 2) {
1224 if (data[IFLA_GRE_ERSPAN_DIR]) {
1225 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1226 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1229 if (data[IFLA_GRE_ERSPAN_HWID]) {
1230 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1231 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1239 /* This function returns true when ENCAP attributes are present in the nl msg */
1240 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1241 struct ip_tunnel_encap *ipencap)
1245 memset(ipencap, 0, sizeof(*ipencap));
1250 if (data[IFLA_GRE_ENCAP_TYPE]) {
1252 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1255 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1257 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1260 if (data[IFLA_GRE_ENCAP_SPORT]) {
1262 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1265 if (data[IFLA_GRE_ENCAP_DPORT]) {
1267 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1273 static int gre_tap_init(struct net_device *dev)
1275 __gre_tunnel_init(dev);
1276 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1277 netif_keep_dst(dev);
1279 return ip_tunnel_init(dev);
1282 static const struct net_device_ops gre_tap_netdev_ops = {
1283 .ndo_init = gre_tap_init,
1284 .ndo_uninit = ip_tunnel_uninit,
1285 .ndo_start_xmit = gre_tap_xmit,
1286 .ndo_set_mac_address = eth_mac_addr,
1287 .ndo_validate_addr = eth_validate_addr,
1288 .ndo_change_mtu = ip_tunnel_change_mtu,
1289 .ndo_get_stats64 = ip_tunnel_get_stats64,
1290 .ndo_get_iflink = ip_tunnel_get_iflink,
1291 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1294 static int erspan_tunnel_init(struct net_device *dev)
1296 struct ip_tunnel *tunnel = netdev_priv(dev);
1299 tunnel->tun_hlen = 8;
1300 tunnel->parms.iph.protocol = IPPROTO_GRE;
1301 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1302 erspan_hdr_len(tunnel->erspan_ver);
1303 t_hlen = tunnel->hlen + sizeof(struct iphdr);
1305 dev->features |= GRE_FEATURES;
1306 dev->hw_features |= GRE_FEATURES;
1307 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1308 netif_keep_dst(dev);
1310 return ip_tunnel_init(dev);
1313 static const struct net_device_ops erspan_netdev_ops = {
1314 .ndo_init = erspan_tunnel_init,
1315 .ndo_uninit = ip_tunnel_uninit,
1316 .ndo_start_xmit = erspan_xmit,
1317 .ndo_set_mac_address = eth_mac_addr,
1318 .ndo_validate_addr = eth_validate_addr,
1319 .ndo_change_mtu = ip_tunnel_change_mtu,
1320 .ndo_get_stats64 = ip_tunnel_get_stats64,
1321 .ndo_get_iflink = ip_tunnel_get_iflink,
1322 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1325 static void ipgre_tap_setup(struct net_device *dev)
1329 dev->netdev_ops = &gre_tap_netdev_ops;
1330 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1331 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1332 ip_tunnel_setup(dev, gre_tap_net_id);
1335 bool is_gretap_dev(const struct net_device *dev)
1337 return dev->netdev_ops == &gre_tap_netdev_ops;
1339 EXPORT_SYMBOL_GPL(is_gretap_dev);
1341 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1342 struct nlattr *tb[], struct nlattr *data[],
1343 struct netlink_ext_ack *extack)
1345 struct ip_tunnel_parm p;
1346 struct ip_tunnel_encap ipencap;
1350 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1351 struct ip_tunnel *t = netdev_priv(dev);
1352 err = ip_tunnel_encap_setup(t, &ipencap);
1358 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1361 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1364 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1365 struct nlattr *data[],
1366 struct netlink_ext_ack *extack)
1368 struct ip_tunnel *t = netdev_priv(dev);
1369 struct ip_tunnel_encap ipencap;
1370 __u32 fwmark = t->fwmark;
1371 struct ip_tunnel_parm p;
1374 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1375 err = ip_tunnel_encap_setup(t, &ipencap);
1381 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1385 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1389 t->parms.i_flags = p.i_flags;
1390 t->parms.o_flags = p.o_flags;
1392 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
1393 ipgre_link_update(dev, !tb[IFLA_MTU]);
1398 static size_t ipgre_get_size(const struct net_device *dev)
1403 /* IFLA_GRE_IFLAGS */
1405 /* IFLA_GRE_OFLAGS */
1411 /* IFLA_GRE_LOCAL */
1413 /* IFLA_GRE_REMOTE */
1419 /* IFLA_GRE_PMTUDISC */
1421 /* IFLA_GRE_ENCAP_TYPE */
1423 /* IFLA_GRE_ENCAP_FLAGS */
1425 /* IFLA_GRE_ENCAP_SPORT */
1427 /* IFLA_GRE_ENCAP_DPORT */
1429 /* IFLA_GRE_COLLECT_METADATA */
1431 /* IFLA_GRE_IGNORE_DF */
1433 /* IFLA_GRE_FWMARK */
1435 /* IFLA_GRE_ERSPAN_INDEX */
1437 /* IFLA_GRE_ERSPAN_VER */
1439 /* IFLA_GRE_ERSPAN_DIR */
1441 /* IFLA_GRE_ERSPAN_HWID */
1446 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1448 struct ip_tunnel *t = netdev_priv(dev);
1449 struct ip_tunnel_parm *p = &t->parms;
1451 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1452 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1453 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1454 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1455 gre_tnl_flags_to_gre_flags(p->o_flags)) ||
1456 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1457 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1458 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1459 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1460 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1461 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1462 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1463 !!(p->iph.frag_off & htons(IP_DF))) ||
1464 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1465 goto nla_put_failure;
1467 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1469 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1471 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1473 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1475 goto nla_put_failure;
1477 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1478 goto nla_put_failure;
1480 if (t->collect_md) {
1481 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1482 goto nla_put_failure;
1485 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1486 goto nla_put_failure;
1488 if (t->erspan_ver == 1) {
1489 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1490 goto nla_put_failure;
1491 } else if (t->erspan_ver == 2) {
1492 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1493 goto nla_put_failure;
1494 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1495 goto nla_put_failure;
1504 static void erspan_setup(struct net_device *dev)
1507 dev->netdev_ops = &erspan_netdev_ops;
1508 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1509 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1510 ip_tunnel_setup(dev, erspan_net_id);
1513 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1514 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1515 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1516 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1517 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1518 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1519 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1520 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1521 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1522 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1523 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1524 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1525 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1526 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1527 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1528 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1529 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1530 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1531 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1532 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1533 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1534 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1537 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1539 .maxtype = IFLA_GRE_MAX,
1540 .policy = ipgre_policy,
1541 .priv_size = sizeof(struct ip_tunnel),
1542 .setup = ipgre_tunnel_setup,
1543 .validate = ipgre_tunnel_validate,
1544 .newlink = ipgre_newlink,
1545 .changelink = ipgre_changelink,
1546 .dellink = ip_tunnel_dellink,
1547 .get_size = ipgre_get_size,
1548 .fill_info = ipgre_fill_info,
1549 .get_link_net = ip_tunnel_get_link_net,
1552 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1554 .maxtype = IFLA_GRE_MAX,
1555 .policy = ipgre_policy,
1556 .priv_size = sizeof(struct ip_tunnel),
1557 .setup = ipgre_tap_setup,
1558 .validate = ipgre_tap_validate,
1559 .newlink = ipgre_newlink,
1560 .changelink = ipgre_changelink,
1561 .dellink = ip_tunnel_dellink,
1562 .get_size = ipgre_get_size,
1563 .fill_info = ipgre_fill_info,
1564 .get_link_net = ip_tunnel_get_link_net,
1567 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1569 .maxtype = IFLA_GRE_MAX,
1570 .policy = ipgre_policy,
1571 .priv_size = sizeof(struct ip_tunnel),
1572 .setup = erspan_setup,
1573 .validate = erspan_validate,
1574 .newlink = ipgre_newlink,
1575 .changelink = ipgre_changelink,
1576 .dellink = ip_tunnel_dellink,
1577 .get_size = ipgre_get_size,
1578 .fill_info = ipgre_fill_info,
1579 .get_link_net = ip_tunnel_get_link_net,
1582 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1583 u8 name_assign_type)
1585 struct nlattr *tb[IFLA_MAX + 1];
1586 struct net_device *dev;
1587 LIST_HEAD(list_kill);
1588 struct ip_tunnel *t;
1591 memset(&tb, 0, sizeof(tb));
1593 dev = rtnl_create_link(net, name, name_assign_type,
1594 &ipgre_tap_ops, tb);
1598 /* Configure flow based GRE device. */
1599 t = netdev_priv(dev);
1600 t->collect_md = true;
1602 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1605 return ERR_PTR(err);
1608 /* openvswitch users expect packet sizes to be unrestricted,
1609 * so set the largest MTU we can.
1611 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1615 err = rtnl_configure_link(dev, NULL);
1621 ip_tunnel_dellink(dev, &list_kill);
1622 unregister_netdevice_many(&list_kill);
1623 return ERR_PTR(err);
1625 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1627 static int __net_init ipgre_tap_init_net(struct net *net)
1629 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1632 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1634 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1637 static struct pernet_operations ipgre_tap_net_ops = {
1638 .init = ipgre_tap_init_net,
1639 .exit_batch = ipgre_tap_exit_batch_net,
1640 .id = &gre_tap_net_id,
1641 .size = sizeof(struct ip_tunnel_net),
1644 static int __net_init erspan_init_net(struct net *net)
1646 return ip_tunnel_init_net(net, erspan_net_id,
1647 &erspan_link_ops, "erspan0");
1650 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1652 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1655 static struct pernet_operations erspan_net_ops = {
1656 .init = erspan_init_net,
1657 .exit_batch = erspan_exit_batch_net,
1658 .id = &erspan_net_id,
1659 .size = sizeof(struct ip_tunnel_net),
1662 static int __init ipgre_init(void)
1666 pr_info("GRE over IPv4 tunneling driver\n");
1668 err = register_pernet_device(&ipgre_net_ops);
1672 err = register_pernet_device(&ipgre_tap_net_ops);
1674 goto pnet_tap_failed;
1676 err = register_pernet_device(&erspan_net_ops);
1678 goto pnet_erspan_failed;
1680 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1682 pr_info("%s: can't add protocol\n", __func__);
1683 goto add_proto_failed;
1686 err = rtnl_link_register(&ipgre_link_ops);
1688 goto rtnl_link_failed;
1690 err = rtnl_link_register(&ipgre_tap_ops);
1692 goto tap_ops_failed;
1694 err = rtnl_link_register(&erspan_link_ops);
1696 goto erspan_link_failed;
1701 rtnl_link_unregister(&ipgre_tap_ops);
1703 rtnl_link_unregister(&ipgre_link_ops);
1705 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1707 unregister_pernet_device(&erspan_net_ops);
1709 unregister_pernet_device(&ipgre_tap_net_ops);
1711 unregister_pernet_device(&ipgre_net_ops);
1715 static void __exit ipgre_fini(void)
1717 rtnl_link_unregister(&ipgre_tap_ops);
1718 rtnl_link_unregister(&ipgre_link_ops);
1719 rtnl_link_unregister(&erspan_link_ops);
1720 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1721 unregister_pernet_device(&ipgre_tap_net_ops);
1722 unregister_pernet_device(&ipgre_net_ops);
1723 unregister_pernet_device(&erspan_net_ops);
1726 module_init(ipgre_init);
1727 module_exit(ipgre_fini);
1728 MODULE_LICENSE("GPL");
1729 MODULE_ALIAS_RTNL_LINK("gre");
1730 MODULE_ALIAS_RTNL_LINK("gretap");
1731 MODULE_ALIAS_RTNL_LINK("erspan");
1732 MODULE_ALIAS_NETDEV("gre0");
1733 MODULE_ALIAS_NETDEV("gretap0");
1734 MODULE_ALIAS_NETDEV("erspan0");