1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/udp.h>
17 #include <linux/rculist.h>
18 #include <linux/jhash.h>
19 #include <linux/if_tunnel.h>
20 #include <linux/net.h>
21 #include <linux/file.h>
22 #include <linux/gtp.h>
24 #include <net/net_namespace.h>
25 #include <net/protocol.h>
29 #include <net/udp_tunnel.h>
32 #include <net/genetlink.h>
33 #include <net/netns/generic.h>
36 /* An active session for the subscriber. */
38 struct hlist_node hlist_tid;
39 struct hlist_node hlist_addr;
56 struct in6_addr addr6;
60 struct in6_addr addr6;
64 struct net_device *dev;
67 struct rcu_head rcu_head;
70 /* One instance of the GTP device. */
72 struct list_head list;
78 struct net_device *dev;
82 unsigned int hash_size;
83 struct hlist_head *tid_hash;
84 struct hlist_head *addr_hash;
101 static unsigned int gtp_net_id __read_mostly;
104 struct list_head gtp_dev_list;
107 static u32 gtp_h_initval;
109 static struct genl_family gtp_genl_family;
111 enum gtp_multicast_groups {
115 static const struct genl_multicast_group gtp_genl_mcgrps[] = {
116 [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
119 static void pdp_context_delete(struct pdp_ctx *pctx);
121 static inline u32 gtp0_hashfn(u64 tid)
123 u32 *tid32 = (u32 *) &tid;
124 return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
127 static inline u32 gtp1u_hashfn(u32 tid)
129 return jhash_1word(tid, gtp_h_initval);
132 static inline u32 ipv4_hashfn(__be32 ip)
134 return jhash_1word((__force u32)ip, gtp_h_initval);
137 static u32 ipv6_hashfn(const struct in6_addr *ip6)
139 return jhash_2words((__force u32)ip6->s6_addr32[0],
140 (__force u32)ip6->s6_addr32[1], gtp_h_initval);
143 /* Resolve a PDP context structure based on the 64bit TID. */
144 static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family)
146 struct hlist_head *head;
149 head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
151 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
152 if (pdp->af == family &&
153 pdp->gtp_version == GTP_V0 &&
154 pdp->u.v0.tid == tid)
160 /* Resolve a PDP context structure based on the 32bit TEI. */
161 static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family)
163 struct hlist_head *head;
166 head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
168 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
169 if (pdp->af == family &&
170 pdp->gtp_version == GTP_V1 &&
171 pdp->u.v1.i_tei == tid)
177 /* Resolve a PDP context based on IPv4 address of MS. */
178 static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
180 struct hlist_head *head;
183 head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
185 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
186 if (pdp->af == AF_INET &&
187 pdp->ms.addr.s_addr == ms_addr)
194 /* 3GPP TS 29.060: PDN Connection: the association between a MS represented by
195 * [...] one IPv6 *prefix* and a PDN represented by an APN.
197 * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be
198 * according to the maximum prefix length for a global IPv6 address as
199 * specified in the IPv6 Addressing Architecture, see RFC 4291.
201 * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other
202 * than those that start with binary 000 have a 64-bit interface ID field
203 * (i.e., n + m = 64).
205 static bool ipv6_pdp_addr_equal(const struct in6_addr *a,
206 const struct in6_addr *b)
208 return a->s6_addr32[0] == b->s6_addr32[0] &&
209 a->s6_addr32[1] == b->s6_addr32[1];
212 static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp,
213 const struct in6_addr *ms_addr)
215 struct hlist_head *head;
218 head = >p->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size];
220 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
221 if (pdp->af == AF_INET6 &&
222 ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr))
229 static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
230 unsigned int hdrlen, unsigned int role)
234 if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
237 iph = (struct iphdr *)(skb->data + hdrlen);
239 if (role == GTP_ROLE_SGSN)
240 return iph->daddr == pctx->ms.addr.s_addr;
242 return iph->saddr == pctx->ms.addr.s_addr;
245 static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx,
246 unsigned int hdrlen, unsigned int role)
248 struct ipv6hdr *ip6h;
251 if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr)))
254 ip6h = (struct ipv6hdr *)(skb->data + hdrlen);
256 if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) ||
257 (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL))
260 if (role == GTP_ROLE_SGSN) {
261 ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6);
263 ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6);
269 /* Check if the inner IP address in this packet is assigned to any
270 * existing mobile subscriber.
272 static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
273 unsigned int hdrlen, unsigned int role,
276 switch (inner_proto) {
278 return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
280 return gtp_check_ms_ipv6(skb, pctx, hdrlen, role);
285 static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen,
288 __u8 *ip_version, _ip_version;
290 ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version),
295 switch (*ip_version & 0xf0) {
297 *inner_proto = ETH_P_IP;
300 *inner_proto = ETH_P_IPV6;
309 static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
310 unsigned int hdrlen, unsigned int role, __u16 inner_proto)
312 if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) {
313 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
317 /* Get rid of the GTP + UDP headers. */
318 if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto),
319 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
320 pctx->dev->stats.rx_length_errors++;
324 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
326 /* Now that the UDP and the GTP header have been removed, set up the
327 * new network header. This is required by the upper layer to
328 * calculate the transport header.
330 skb_reset_network_header(skb);
331 skb_reset_mac_header(skb);
333 skb->dev = pctx->dev;
335 dev_sw_netstats_rx_add(pctx->dev, skb->len);
341 pctx->dev->stats.rx_dropped++;
345 static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
346 const struct sock *sk,
347 __be32 daddr, __be32 saddr)
349 memset(fl4, 0, sizeof(*fl4));
350 fl4->flowi4_oif = sk->sk_bound_dev_if;
353 fl4->flowi4_tos = ip_sock_rt_tos(sk);
354 fl4->flowi4_scope = ip_sock_rt_scope(sk);
355 fl4->flowi4_proto = sk->sk_protocol;
357 return ip_route_output_key(sock_net(sk), fl4);
360 static struct rt6_info *ip6_route_output_gtp(struct net *net,
362 const struct sock *sk,
363 const struct in6_addr *daddr,
364 struct in6_addr *saddr)
366 struct dst_entry *dst;
368 memset(fl6, 0, sizeof(*fl6));
369 fl6->flowi6_oif = sk->sk_bound_dev_if;
372 fl6->flowi6_proto = sk->sk_protocol;
374 dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL);
376 return ERR_PTR(-ENETUNREACH);
378 return (struct rt6_info *)dst;
382 * In all Path Management messages:
383 * - TID: is not used and shall be set to 0.
384 * - Flow Label is not used and shall be set to 0
385 * In signalling messages:
386 * - number: this field is not yet used in signalling messages.
387 * It shall be set to 255 by the sender and shall be ignored
389 * Returns true if the echo req was correct, false otherwise.
391 static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
393 return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
394 gtp0->number != 0xff || gtp0->flow);
397 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
398 static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
400 int len_pkt, len_hdr;
402 hdr->flags = 0x1e; /* v0, GTP-non-prime. */
403 hdr->type = msg_type;
404 /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
405 * are not used and shall be set to 0.
410 hdr->spare[0] = 0xff;
411 hdr->spare[1] = 0xff;
412 hdr->spare[2] = 0xff;
414 len_pkt = sizeof(struct gtp0_packet);
415 len_hdr = sizeof(struct gtp0_header);
417 if (msg_type == GTP_ECHO_RSP)
418 hdr->length = htons(len_pkt - len_hdr);
423 static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
425 struct iphdr *iph = ip_hdr(skb);
429 /* find route to the sender,
430 * src address becomes dst address and vice versa.
432 rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
434 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
439 udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
440 fl4.saddr, fl4.daddr,
442 ip4_dst_hoplimit(&rt->dst),
444 htons(GTP0_PORT), htons(GTP0_PORT),
445 !net_eq(sock_net(gtp->sk1u),
452 static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
454 struct gtp0_packet *gtp_pkt;
455 struct gtp0_header *gtp0;
458 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
460 if (!gtp0_validate_echo_hdr(gtp0))
465 /* pull GTP and UDP headers */
466 skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
468 gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
469 memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
471 gtp0_build_echo_msg(>p_pkt->gtp0_h, GTP_ECHO_RSP);
473 /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
474 * message shall be copied from the signalling request message
475 * that the GSN is replying to.
477 gtp_pkt->gtp0_h.seq = seq;
479 gtp_pkt->ie.tag = GTPIE_RECOVERY;
480 gtp_pkt->ie.val = gtp->restart_count;
482 switch (gtp->sk0->sk_family) {
484 if (gtp0_send_echo_resp_ip(gtp, skb) < 0)
494 static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
495 int flags, u32 type, struct echo_info echo)
499 genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags,
504 if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
505 nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) ||
506 nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr))
509 genlmsg_end(skb, genlh);
513 genlmsg_cancel(skb, genlh);
517 static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo)
519 struct iphdr *iph = ip_hdr(skb);
521 echo->ms.addr.s_addr = iph->daddr;
522 echo->peer.addr.s_addr = iph->saddr;
523 echo->gtp_version = GTP_V0;
526 static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
528 struct gtp0_header *gtp0;
529 struct echo_info echo;
533 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
535 if (!gtp0_validate_echo_hdr(gtp0))
538 switch (gtp->sk0->sk_family) {
540 gtp0_handle_echo_resp_ip(skb, &echo);
546 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
550 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
556 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev),
557 msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
560 static int gtp_proto_to_family(__u16 proto)
575 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
576 static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
578 unsigned int hdrlen = sizeof(struct udphdr) +
579 sizeof(struct gtp0_header);
580 struct gtp0_header *gtp0;
581 struct pdp_ctx *pctx;
584 if (!pskb_may_pull(skb, hdrlen))
587 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
589 if ((gtp0->flags >> 5) != GTP_V0)
592 /* If the sockets were created in kernel, it means that
593 * there is no daemon running in userspace which would
594 * handle echo request.
596 if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
597 return gtp0_send_echo_resp(gtp, skb);
599 if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
600 return gtp0_handle_echo_resp(gtp, skb);
602 if (gtp0->type != GTP_TPDU)
605 if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
606 netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
610 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid),
611 gtp_proto_to_family(inner_proto));
613 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
617 return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
620 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
621 static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
623 int len_pkt, len_hdr;
625 /* S flag must be set to 1 */
626 hdr->flags = 0x32; /* v1, GTP-non-prime. */
627 hdr->type = msg_type;
628 /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
631 /* seq, npdu and next should be counted to the length of the GTP packet
632 * that's why szie of gtp1_header should be subtracted,
633 * not size of gtp1_header_long.
636 len_hdr = sizeof(struct gtp1_header);
638 if (msg_type == GTP_ECHO_RSP) {
639 len_pkt = sizeof(struct gtp1u_packet);
640 hdr->length = htons(len_pkt - len_hdr);
642 /* GTP_ECHO_REQ does not carry GTP Information Element,
643 * the why gtp1_header_long is used here.
645 len_pkt = sizeof(struct gtp1_header_long);
646 hdr->length = htons(len_pkt - len_hdr);
650 static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
652 struct gtp1_header_long *gtp1u;
653 struct gtp1u_packet *gtp_pkt;
658 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
660 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
661 * Error Indication and Supported Extension Headers Notification
662 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
664 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
667 /* pull GTP and UDP headers */
669 sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
671 gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
672 memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
674 gtp1u_build_echo_msg(>p_pkt->gtp1u_h, GTP_ECHO_RSP);
676 /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
677 * Recovery information element shall not be used, i.e. it shall
678 * be set to zero by the sender and shall be ignored by the receiver.
679 * The Recovery information element is mandatory due to backwards
680 * compatibility reasons.
682 gtp_pkt->ie.tag = GTPIE_RECOVERY;
687 /* find route to the sender,
688 * src address becomes dst address and vice versa.
690 rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
692 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
697 udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
698 fl4.saddr, fl4.daddr,
700 ip4_dst_hoplimit(&rt->dst),
702 htons(GTP1U_PORT), htons(GTP1U_PORT),
703 !net_eq(sock_net(gtp->sk1u),
709 static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
711 struct gtp1_header_long *gtp1u;
712 struct echo_info echo;
717 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
719 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
720 * Error Indication and Supported Extension Headers Notification
721 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
723 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
727 echo.ms.addr.s_addr = iph->daddr;
728 echo.peer.addr.s_addr = iph->saddr;
729 echo.gtp_version = GTP_V1;
731 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
735 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
741 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev),
742 msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
745 static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen)
747 struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr;
748 unsigned int offset = *hdrlen;
749 __u8 *next_type, _next_type;
751 /* From 29.060: "The Extension Header Length field specifies the length
752 * of the particular Extension header in 4 octets units."
754 * This length field includes length field size itself (1 byte),
755 * payload (variable length) and next type (1 byte). The extension
756 * header is aligned to to 4 bytes.
760 gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr),
762 if (!gtp_exthdr || !gtp_exthdr->len)
765 offset += gtp_exthdr->len * 4;
767 /* From 29.060: "If no such Header follows, then the value of
768 * the Next Extension Header Type shall be 0."
770 next_type = skb_header_pointer(skb, offset - 1,
771 sizeof(_next_type), &_next_type);
775 } while (*next_type != 0);
782 static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
784 unsigned int hdrlen = sizeof(struct udphdr) +
785 sizeof(struct gtp1_header);
786 struct gtp1_header *gtp1;
787 struct pdp_ctx *pctx;
790 if (!pskb_may_pull(skb, hdrlen))
793 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
795 if ((gtp1->flags >> 5) != GTP_V1)
798 /* If the sockets were created in kernel, it means that
799 * there is no daemon running in userspace which would
800 * handle echo request.
802 if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
803 return gtp1u_send_echo_resp(gtp, skb);
805 if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
806 return gtp1u_handle_echo_resp(gtp, skb);
808 if (gtp1->type != GTP_TPDU)
811 /* From 29.060: "This field shall be present if and only if any one or
812 * more of the S, PN and E flags are set.".
814 * If any of the bit is set, then the remaining ones also have to be
817 if (gtp1->flags & GTP1_F_MASK)
820 /* Make sure the header is larger enough, including extensions. */
821 if (!pskb_may_pull(skb, hdrlen))
824 if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
825 netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
829 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
831 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid),
832 gtp_proto_to_family(inner_proto));
834 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
838 if (gtp1->flags & GTP1_F_EXTHDR &&
839 gtp_parse_exthdrs(skb, &hdrlen) < 0)
842 return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
845 static void __gtp_encap_destroy(struct sock *sk)
850 gtp = sk->sk_user_data;
856 WRITE_ONCE(udp_sk(sk)->encap_type, 0);
857 rcu_assign_sk_user_data(sk, NULL);
865 static void gtp_encap_destroy(struct sock *sk)
868 __gtp_encap_destroy(sk);
872 static void gtp_encap_disable_sock(struct sock *sk)
877 __gtp_encap_destroy(sk);
880 static void gtp_encap_disable(struct gtp_dev *gtp)
882 if (gtp->sk_created) {
883 udp_tunnel_sock_release(gtp->sk0->sk_socket);
884 udp_tunnel_sock_release(gtp->sk1u->sk_socket);
885 gtp->sk_created = false;
889 gtp_encap_disable_sock(gtp->sk0);
890 gtp_encap_disable_sock(gtp->sk1u);
894 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
895 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
897 static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
902 gtp = rcu_dereference_sk_user_data(sk);
906 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
908 switch (READ_ONCE(udp_sk(sk)->encap_type)) {
910 netdev_dbg(gtp->dev, "received GTP0 packet\n");
911 ret = gtp0_udp_encap_recv(gtp, skb);
913 case UDP_ENCAP_GTP1U:
914 netdev_dbg(gtp->dev, "received GTP1U packet\n");
915 ret = gtp1u_udp_encap_recv(gtp, skb);
918 ret = -1; /* Shouldn't happen. */
923 netdev_dbg(gtp->dev, "pass up to the process\n");
928 netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
937 static void gtp_dev_uninit(struct net_device *dev)
939 struct gtp_dev *gtp = netdev_priv(dev);
941 gtp_encap_disable(gtp);
944 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
946 int payload_len = skb->len;
947 struct gtp0_header *gtp0;
949 gtp0 = skb_push(skb, sizeof(*gtp0));
951 gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
952 gtp0->type = GTP_TPDU;
953 gtp0->length = htons(payload_len);
954 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
955 gtp0->flow = htons(pctx->u.v0.flow);
957 gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
958 gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
961 static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
963 int payload_len = skb->len;
964 struct gtp1_header *gtp1;
966 gtp1 = skb_push(skb, sizeof(*gtp1));
968 /* Bits 8 7 6 5 4 3 2 1
969 * +--+--+--+--+--+--+--+--+
970 * |version |PT| 0| E| S|PN|
971 * +--+--+--+--+--+--+--+--+
974 gtp1->flags = 0x30; /* v1, GTP-non-prime. */
975 gtp1->type = GTP_TPDU;
976 gtp1->length = htons(payload_len);
977 gtp1->tid = htonl(pctx->u.v1.o_tei);
979 /* TODO: Support for extension header, sequence number and N-PDU.
980 * Update the length field if any of them is available.
992 struct rt6_info *rt6;
994 struct pdp_ctx *pctx;
995 struct net_device *dev;
1000 static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
1002 switch (pktinfo->pctx->gtp_version) {
1004 pktinfo->gtph_port = htons(GTP0_PORT);
1005 gtp0_push_header(skb, pktinfo->pctx);
1008 pktinfo->gtph_port = htons(GTP1U_PORT);
1009 gtp1_push_header(skb, pktinfo->pctx);
1014 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
1015 struct sock *sk, __u8 tos,
1016 struct pdp_ctx *pctx, struct rtable *rt,
1018 struct net_device *dev)
1022 pktinfo->pctx = pctx;
1024 pktinfo->fl4 = *fl4;
1028 static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo,
1029 struct sock *sk, __u8 tos,
1030 struct pdp_ctx *pctx, struct rt6_info *rt6,
1032 struct net_device *dev)
1036 pktinfo->pctx = pctx;
1038 pktinfo->fl6 = *fl6;
1042 static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev,
1043 struct gtp_pktinfo *pktinfo,
1044 struct pdp_ctx *pctx, __u8 tos,
1052 rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr,
1053 inet_sk(pctx->sk)->inet_saddr);
1055 netdev_dbg(dev, "no route to SSGN %pI4\n",
1056 &pctx->peer.addr.s_addr);
1057 dev->stats.tx_carrier_errors++;
1061 if (rt->dst.dev == dev) {
1062 netdev_dbg(dev, "circular route to SSGN %pI4\n",
1063 &pctx->peer.addr.s_addr);
1064 dev->stats.collisions++;
1068 /* This is similar to tnl_update_pmtu(). */
1071 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
1072 sizeof(struct iphdr) - sizeof(struct udphdr);
1073 switch (pctx->gtp_version) {
1075 mtu -= sizeof(struct gtp0_header);
1078 mtu -= sizeof(struct gtp1_header);
1082 mtu = dst_mtu(&rt->dst);
1085 skb_dst_update_pmtu_no_confirm(skb, mtu);
1087 if (frag_off & htons(IP_DF) &&
1088 ((!skb_is_gso(skb) && skb->len > mtu) ||
1089 (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
1090 netdev_dbg(dev, "packet too big, fragmentation needed\n");
1091 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1096 gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev);
1097 gtp_push_header(skb, pktinfo);
1106 static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb,
1107 struct net_device *dev,
1108 struct gtp_pktinfo *pktinfo,
1109 struct pdp_ctx *pctx, __u8 tos)
1111 struct dst_entry *dst;
1112 struct rt6_info *rt;
1116 rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6,
1117 &inet6_sk(pctx->sk)->saddr);
1119 netdev_dbg(dev, "no route to SSGN %pI6\n",
1121 dev->stats.tx_carrier_errors++;
1126 if (rt->dst.dev == dev) {
1127 netdev_dbg(dev, "circular route to SSGN %pI6\n",
1129 dev->stats.collisions++;
1133 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
1134 sizeof(struct ipv6hdr) - sizeof(struct udphdr);
1135 switch (pctx->gtp_version) {
1137 mtu -= sizeof(struct gtp0_header);
1140 mtu -= sizeof(struct gtp1_header);
1144 skb_dst_update_pmtu_no_confirm(skb, mtu);
1146 if ((!skb_is_gso(skb) && skb->len > mtu) ||
1147 (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
1148 netdev_dbg(dev, "packet too big, fragmentation needed\n");
1149 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1153 gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev);
1154 gtp_push_header(skb, pktinfo);
1163 static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
1164 struct gtp_pktinfo *pktinfo)
1166 struct gtp_dev *gtp = netdev_priv(dev);
1167 struct net *net = gtp->net;
1168 struct pdp_ctx *pctx;
1172 /* Read the IP destination address and resolve the PDP context.
1173 * Prepend PDP header with TEI/TID from PDP ctx.
1176 if (gtp->role == GTP_ROLE_SGSN)
1177 pctx = ipv4_pdp_find(gtp, iph->saddr);
1179 pctx = ipv4_pdp_find(gtp, iph->daddr);
1182 netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
1186 netdev_dbg(dev, "found PDP context %p\n", pctx);
1188 switch (pctx->sk->sk_family) {
1190 ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx,
1191 iph->tos, iph->frag_off);
1194 ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx,
1206 netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
1207 &iph->saddr, &iph->daddr);
1212 static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev,
1213 struct gtp_pktinfo *pktinfo)
1215 struct gtp_dev *gtp = netdev_priv(dev);
1216 struct net *net = gtp->net;
1217 struct pdp_ctx *pctx;
1218 struct ipv6hdr *ip6h;
1222 /* Read the IP destination address and resolve the PDP context.
1223 * Prepend PDP header with TEI/TID from PDP ctx.
1225 ip6h = ipv6_hdr(skb);
1226 if (gtp->role == GTP_ROLE_SGSN)
1227 pctx = ipv6_pdp_find(gtp, &ip6h->saddr);
1229 pctx = ipv6_pdp_find(gtp, &ip6h->daddr);
1232 netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n",
1236 netdev_dbg(dev, "found PDP context %p\n", pctx);
1238 tos = ipv6_get_dsfield(ip6h);
1240 switch (pctx->sk->sk_family) {
1242 ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0);
1245 ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos);
1256 netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n",
1257 &ip6h->saddr, &ip6h->daddr);
1262 static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1264 unsigned int proto = ntohs(skb->protocol);
1265 struct gtp_pktinfo pktinfo;
1268 /* Ensure there is sufficient headroom. */
1269 if (skb_cow_head(skb, dev->needed_headroom))
1272 skb_reset_inner_headers(skb);
1274 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
1278 err = gtp_build_skb_ip4(skb, dev, &pktinfo);
1281 err = gtp_build_skb_ip6(skb, dev, &pktinfo);
1292 switch (pktinfo.pctx->sk->sk_family) {
1294 udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
1295 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
1297 ip4_dst_hoplimit(&pktinfo.rt->dst),
1299 pktinfo.gtph_port, pktinfo.gtph_port,
1300 !net_eq(sock_net(pktinfo.pctx->sk),
1305 #if IS_ENABLED(CONFIG_IPV6)
1306 udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev,
1307 &pktinfo.fl6.saddr, &pktinfo.fl6.daddr,
1309 ip6_dst_hoplimit(&pktinfo.rt->dst),
1311 pktinfo.gtph_port, pktinfo.gtph_port,
1319 return NETDEV_TX_OK;
1321 dev->stats.tx_errors++;
1323 return NETDEV_TX_OK;
1326 static const struct net_device_ops gtp_netdev_ops = {
1327 .ndo_uninit = gtp_dev_uninit,
1328 .ndo_start_xmit = gtp_dev_xmit,
1331 static const struct device_type gtp_type = {
1335 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1336 #define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN)
1338 static void gtp_link_setup(struct net_device *dev)
1340 struct gtp_dev *gtp = netdev_priv(dev);
1342 dev->netdev_ops = >p_netdev_ops;
1343 dev->needs_free_netdev = true;
1344 SET_NETDEV_DEVTYPE(dev, >p_type);
1346 dev->hard_header_len = 0;
1348 dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN;
1350 /* Zero header length. */
1351 dev->type = ARPHRD_NONE;
1352 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1354 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1355 dev->priv_flags |= IFF_NO_QUEUE;
1356 dev->features |= NETIF_F_LLTX;
1357 netif_keep_dst(dev);
1359 dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
1363 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
1364 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
1366 static void gtp_destructor(struct net_device *dev)
1368 struct gtp_dev *gtp = netdev_priv(dev);
1370 kfree(gtp->addr_hash);
1371 kfree(gtp->tid_hash);
1374 static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf,
1375 const struct nlattr *nla, int family)
1377 udp_conf->family = family;
1379 switch (udp_conf->family) {
1381 udp_conf->local_ip.s_addr = nla_get_be32(nla);
1383 #if IS_ENABLED(CONFIG_IPV6)
1385 udp_conf->local_ip6 = nla_get_in6_addr(nla);
1395 static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp,
1396 const struct nlattr *nla, int family)
1398 struct udp_tunnel_sock_cfg tuncfg = {};
1399 struct udp_port_cfg udp_conf = {};
1400 struct net *net = gtp->net;
1401 struct socket *sock;
1405 err = gtp_sock_udp_config(&udp_conf, nla, family);
1407 return ERR_PTR(err);
1409 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
1410 udp_conf.family = AF_INET;
1413 if (type == UDP_ENCAP_GTP0)
1414 udp_conf.local_udp_port = htons(GTP0_PORT);
1415 else if (type == UDP_ENCAP_GTP1U)
1416 udp_conf.local_udp_port = htons(GTP1U_PORT);
1418 return ERR_PTR(-EINVAL);
1420 err = udp_sock_create(net, &udp_conf, &sock);
1422 return ERR_PTR(err);
1424 tuncfg.sk_user_data = gtp;
1425 tuncfg.encap_type = type;
1426 tuncfg.encap_rcv = gtp_encap_recv;
1427 tuncfg.encap_destroy = NULL;
1429 setup_udp_tunnel_sock(net, sock, &tuncfg);
1434 static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
1440 sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family);
1442 return PTR_ERR(sk0);
1444 sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family);
1446 udp_tunnel_sock_release(sk0->sk_socket);
1447 return PTR_ERR(sk1u);
1450 gtp->sk_created = true;
1457 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1458 #define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
1460 static int gtp_newlink(struct net *src_net, struct net_device *dev,
1461 struct nlattr *tb[], struct nlattr *data[],
1462 struct netlink_ext_ack *extack)
1464 unsigned int role = GTP_ROLE_GGSN;
1465 struct gtp_dev *gtp;
1469 #if !IS_ENABLED(CONFIG_IPV6)
1470 if (data[IFLA_GTP_LOCAL6])
1471 return -EAFNOSUPPORT;
1474 gtp = netdev_priv(dev);
1476 if (!data[IFLA_GTP_PDP_HASHSIZE]) {
1479 hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
1484 if (data[IFLA_GTP_ROLE]) {
1485 role = nla_get_u32(data[IFLA_GTP_ROLE]);
1486 if (role > GTP_ROLE_SGSN)
1491 if (!data[IFLA_GTP_RESTART_COUNT])
1492 gtp->restart_count = 0;
1494 gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
1498 err = gtp_hashtable_new(gtp, hashsize);
1502 if (data[IFLA_GTP_CREATE_SOCKETS]) {
1503 if (data[IFLA_GTP_LOCAL6])
1504 err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6);
1506 err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET);
1508 err = gtp_encap_enable(gtp, data);
1514 if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) ||
1515 (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) {
1516 dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN;
1517 dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN;
1520 err = register_netdevice(dev);
1522 netdev_dbg(dev, "failed to register new netdev %d\n", err);
1526 gn = net_generic(dev_net(dev), gtp_net_id);
1527 list_add_rcu(>p->list, &gn->gtp_dev_list);
1528 dev->priv_destructor = gtp_destructor;
1530 netdev_dbg(dev, "registered new GTP interface\n");
1535 gtp_encap_disable(gtp);
1537 kfree(gtp->addr_hash);
1538 kfree(gtp->tid_hash);
1542 static void gtp_dellink(struct net_device *dev, struct list_head *head)
1544 struct gtp_dev *gtp = netdev_priv(dev);
1545 struct hlist_node *next;
1546 struct pdp_ctx *pctx;
1549 for (i = 0; i < gtp->hash_size; i++)
1550 hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid)
1551 pdp_context_delete(pctx);
1553 list_del_rcu(>p->list);
1554 unregister_netdevice_queue(dev, head);
1557 static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
1558 [IFLA_GTP_FD0] = { .type = NLA_U32 },
1559 [IFLA_GTP_FD1] = { .type = NLA_U32 },
1560 [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
1561 [IFLA_GTP_ROLE] = { .type = NLA_U32 },
1562 [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
1563 [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
1564 [IFLA_GTP_LOCAL] = { .type = NLA_U32 },
1565 [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) },
1568 static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
1569 struct netlink_ext_ack *extack)
1577 static size_t gtp_get_size(const struct net_device *dev)
1579 return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
1580 nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
1581 nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
1584 static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
1586 struct gtp_dev *gtp = netdev_priv(dev);
1588 if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
1589 goto nla_put_failure;
1590 if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
1591 goto nla_put_failure;
1592 if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
1593 goto nla_put_failure;
1601 static struct rtnl_link_ops gtp_link_ops __read_mostly = {
1603 .maxtype = IFLA_GTP_MAX,
1604 .policy = gtp_policy,
1605 .priv_size = sizeof(struct gtp_dev),
1606 .setup = gtp_link_setup,
1607 .validate = gtp_validate,
1608 .newlink = gtp_newlink,
1609 .dellink = gtp_dellink,
1610 .get_size = gtp_get_size,
1611 .fill_info = gtp_fill_info,
1614 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
1618 gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
1619 GFP_KERNEL | __GFP_NOWARN);
1620 if (gtp->addr_hash == NULL)
1623 gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
1624 GFP_KERNEL | __GFP_NOWARN);
1625 if (gtp->tid_hash == NULL)
1628 gtp->hash_size = hsize;
1630 for (i = 0; i < hsize; i++) {
1631 INIT_HLIST_HEAD(>p->addr_hash[i]);
1632 INIT_HLIST_HEAD(>p->tid_hash[i]);
1636 kfree(gtp->addr_hash);
1640 static struct sock *gtp_encap_enable_socket(int fd, int type,
1641 struct gtp_dev *gtp)
1643 struct udp_tunnel_sock_cfg tuncfg = {NULL};
1644 struct socket *sock;
1648 pr_debug("enable gtp on %d, %d\n", fd, type);
1650 sock = sockfd_lookup(fd, &err);
1652 pr_debug("gtp socket fd=%d not found\n", fd);
1657 if (sk->sk_protocol != IPPROTO_UDP ||
1658 sk->sk_type != SOCK_DGRAM ||
1659 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
1660 pr_debug("socket fd=%d not UDP\n", fd);
1661 sk = ERR_PTR(-EINVAL);
1665 if (sk->sk_family == AF_INET6 &&
1667 sk = ERR_PTR(-EADDRNOTAVAIL);
1672 if (sk->sk_user_data) {
1673 sk = ERR_PTR(-EBUSY);
1679 tuncfg.sk_user_data = gtp;
1680 tuncfg.encap_type = type;
1681 tuncfg.encap_rcv = gtp_encap_recv;
1682 tuncfg.encap_destroy = gtp_encap_destroy;
1684 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
1687 release_sock(sock->sk);
1693 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
1695 struct sock *sk1u = NULL;
1696 struct sock *sk0 = NULL;
1698 if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
1701 if (data[IFLA_GTP_FD0]) {
1702 u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
1704 sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
1706 return PTR_ERR(sk0);
1709 if (data[IFLA_GTP_FD1]) {
1710 u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
1712 sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
1714 gtp_encap_disable_sock(sk0);
1715 return PTR_ERR(sk1u);
1723 sk0->sk_family != sk1u->sk_family) {
1724 gtp_encap_disable_sock(sk0);
1725 gtp_encap_disable_sock(sk1u);
1732 static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
1734 struct gtp_dev *gtp = NULL;
1735 struct net_device *dev;
1738 /* Examine the link attributes and figure out which network namespace
1739 * we are talking about.
1741 if (nla[GTPA_NET_NS_FD])
1742 net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
1744 net = get_net(src_net);
1749 /* Check if there's an existing gtpX device to configure */
1750 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
1751 if (dev && dev->netdev_ops == >p_netdev_ops)
1752 gtp = netdev_priv(dev);
1758 static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1760 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
1762 switch (pctx->gtp_version) {
1764 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
1765 * label needs to be the same for uplink and downlink packets,
1766 * so let's annotate this.
1768 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
1769 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
1772 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
1773 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
1780 static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info)
1782 if (info->attrs[GTPA_PEER_ADDRESS]) {
1783 pctx->peer.addr.s_addr =
1784 nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
1785 } else if (info->attrs[GTPA_PEER_ADDR6]) {
1786 pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]);
1790 static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1792 ip_pdp_peer_fill(pctx, info);
1793 pctx->ms.addr.s_addr =
1794 nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1795 gtp_pdp_fill(pctx, info);
1798 static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1800 ip_pdp_peer_fill(pctx, info);
1801 pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
1802 if (pctx->ms.addr6.s6_addr32[2] ||
1803 pctx->ms.addr6.s6_addr32[3])
1806 gtp_pdp_fill(pctx, info);
1811 static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
1812 struct genl_info *info)
1814 struct pdp_ctx *pctx, *pctx_tid = NULL;
1815 struct net_device *dev = gtp->dev;
1816 u32 hash_ms, hash_tid = 0;
1817 struct in6_addr ms_addr6;
1818 unsigned int version;
1823 version = nla_get_u32(info->attrs[GTPA_VERSION]);
1825 if (info->attrs[GTPA_FAMILY])
1826 family = nla_get_u8(info->attrs[GTPA_FAMILY]);
1830 #if !IS_ENABLED(CONFIG_IPV6)
1831 if (family == AF_INET6)
1832 return ERR_PTR(-EAFNOSUPPORT);
1834 if (!info->attrs[GTPA_PEER_ADDRESS] &&
1835 !info->attrs[GTPA_PEER_ADDR6])
1836 return ERR_PTR(-EINVAL);
1838 if ((info->attrs[GTPA_PEER_ADDRESS] &&
1839 sk->sk_family == AF_INET6) ||
1840 (info->attrs[GTPA_PEER_ADDR6] &&
1841 sk->sk_family == AF_INET))
1842 return ERR_PTR(-EAFNOSUPPORT);
1846 if (!info->attrs[GTPA_MS_ADDRESS] ||
1847 info->attrs[GTPA_MS_ADDR6])
1848 return ERR_PTR(-EINVAL);
1850 ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1851 hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
1852 pctx = ipv4_pdp_find(gtp, ms_addr);
1855 if (!info->attrs[GTPA_MS_ADDR6] ||
1856 info->attrs[GTPA_MS_ADDRESS])
1857 return ERR_PTR(-EINVAL);
1859 ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
1860 hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size;
1861 pctx = ipv6_pdp_find(gtp, &ms_addr6);
1864 return ERR_PTR(-EAFNOSUPPORT);
1868 if (version == GTP_V0)
1869 pctx_tid = gtp0_pdp_find(gtp,
1870 nla_get_u64(info->attrs[GTPA_TID]),
1872 else if (version == GTP_V1)
1873 pctx_tid = gtp1_pdp_find(gtp,
1874 nla_get_u32(info->attrs[GTPA_I_TEI]),
1880 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
1881 return ERR_PTR(-EEXIST);
1882 if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
1883 return ERR_PTR(-EOPNOTSUPP);
1885 if (pctx && pctx_tid)
1886 return ERR_PTR(-EEXIST);
1892 ipv4_pdp_fill(pctx, info);
1895 if (!ipv6_pdp_fill(pctx, info))
1896 return ERR_PTR(-EADDRNOTAVAIL);
1900 if (pctx->gtp_version == GTP_V0)
1901 netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
1902 pctx->u.v0.tid, pctx);
1903 else if (pctx->gtp_version == GTP_V1)
1904 netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
1905 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1911 pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
1913 return ERR_PTR(-ENOMEM);
1917 pctx->dev = gtp->dev;
1922 if (!info->attrs[GTPA_MS_ADDRESS]) {
1925 return ERR_PTR(-EINVAL);
1928 ipv4_pdp_fill(pctx, info);
1931 if (!info->attrs[GTPA_MS_ADDR6]) {
1934 return ERR_PTR(-EINVAL);
1937 if (!ipv6_pdp_fill(pctx, info)) {
1940 return ERR_PTR(-EADDRNOTAVAIL);
1944 atomic_set(&pctx->tx_seq, 0);
1946 switch (pctx->gtp_version) {
1948 /* TS 09.60: "The flow label identifies unambiguously a GTP
1949 * flow.". We use the tid for this instead, I cannot find a
1950 * situation in which this doesn't unambiguosly identify the
1953 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
1956 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
1960 hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]);
1961 hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]);
1963 switch (pctx->gtp_version) {
1965 netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1966 pctx->u.v0.tid, &pctx->peer.addr,
1967 &pctx->ms.addr, pctx);
1970 netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1971 pctx->u.v1.i_tei, pctx->u.v1.o_tei,
1972 &pctx->peer.addr, &pctx->ms.addr, pctx);
1979 static void pdp_context_free(struct rcu_head *head)
1981 struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
1987 static void pdp_context_delete(struct pdp_ctx *pctx)
1989 hlist_del_rcu(&pctx->hlist_tid);
1990 hlist_del_rcu(&pctx->hlist_addr);
1991 call_rcu(&pctx->rcu_head, pdp_context_free);
1994 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
1996 static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
1998 unsigned int version;
1999 struct pdp_ctx *pctx;
2000 struct gtp_dev *gtp;
2004 if (!info->attrs[GTPA_VERSION] ||
2005 !info->attrs[GTPA_LINK])
2008 version = nla_get_u32(info->attrs[GTPA_VERSION]);
2012 if (!info->attrs[GTPA_TID] ||
2013 !info->attrs[GTPA_FLOW])
2017 if (!info->attrs[GTPA_I_TEI] ||
2018 !info->attrs[GTPA_O_TEI])
2028 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
2034 if (version == GTP_V0)
2036 else if (version == GTP_V1)
2046 pctx = gtp_pdp_add(gtp, sk, info);
2048 err = PTR_ERR(pctx);
2050 gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
2059 static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
2060 struct nlattr *nla[])
2062 struct gtp_dev *gtp;
2065 if (nla[GTPA_FAMILY])
2066 family = nla_get_u8(nla[GTPA_FAMILY]);
2070 gtp = gtp_find_dev(net, nla);
2072 return ERR_PTR(-ENODEV);
2074 if (nla[GTPA_MS_ADDRESS]) {
2075 __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
2077 if (family != AF_INET)
2078 return ERR_PTR(-EINVAL);
2080 return ipv4_pdp_find(gtp, ip);
2081 } else if (nla[GTPA_MS_ADDR6]) {
2082 struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]);
2084 if (family != AF_INET6)
2085 return ERR_PTR(-EINVAL);
2087 if (addr.s6_addr32[2] ||
2089 return ERR_PTR(-EADDRNOTAVAIL);
2091 return ipv6_pdp_find(gtp, &addr);
2092 } else if (nla[GTPA_VERSION]) {
2093 u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
2095 if (gtp_version == GTP_V0 && nla[GTPA_TID]) {
2096 return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]),
2098 } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) {
2099 return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]),
2104 return ERR_PTR(-EINVAL);
2107 static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
2109 struct pdp_ctx *pctx;
2112 pctx = gtp_find_pdp_by_link(net, nla);
2114 pctx = ERR_PTR(-EINVAL);
2117 pctx = ERR_PTR(-ENOENT);
2122 static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
2124 struct pdp_ctx *pctx;
2127 if (!info->attrs[GTPA_VERSION])
2132 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
2134 err = PTR_ERR(pctx);
2138 if (pctx->gtp_version == GTP_V0)
2139 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
2140 pctx->u.v0.tid, pctx);
2141 else if (pctx->gtp_version == GTP_V1)
2142 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
2143 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
2145 gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
2146 pdp_context_delete(pctx);
2153 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
2154 int flags, u32 type, struct pdp_ctx *pctx)
2158 genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags,
2163 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
2164 nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
2165 nla_put_u8(skb, GTPA_FAMILY, pctx->af))
2166 goto nla_put_failure;
2170 if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr))
2171 goto nla_put_failure;
2174 if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6))
2175 goto nla_put_failure;
2179 switch (pctx->sk->sk_family) {
2181 if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr))
2182 goto nla_put_failure;
2185 if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6))
2186 goto nla_put_failure;
2190 switch (pctx->gtp_version) {
2192 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
2193 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
2194 goto nla_put_failure;
2197 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
2198 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
2199 goto nla_put_failure;
2202 genlmsg_end(skb, genlh);
2207 genlmsg_cancel(skb, genlh);
2211 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
2213 struct sk_buff *msg;
2216 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
2220 ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
2226 ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg,
2227 0, GTP_GENL_MCGRP, GFP_ATOMIC);
2231 static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
2233 struct pdp_ctx *pctx = NULL;
2234 struct sk_buff *skb2;
2237 if (!info->attrs[GTPA_VERSION])
2242 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
2244 err = PTR_ERR(pctx);
2248 skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
2254 err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
2255 0, info->nlhdr->nlmsg_type, pctx);
2257 goto err_unlock_free;
2260 return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
2269 static int gtp_genl_dump_pdp(struct sk_buff *skb,
2270 struct netlink_callback *cb)
2272 struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
2273 int i, j, bucket = cb->args[0], skip = cb->args[1];
2274 struct net *net = sock_net(skb->sk);
2275 struct pdp_ctx *pctx;
2278 gn = net_generic(net, gtp_net_id);
2284 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
2285 if (last_gtp && last_gtp != gtp)
2290 for (i = bucket; i < gtp->hash_size; i++) {
2292 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i],
2295 gtp_genl_fill_info(skb,
2296 NETLINK_CB(cb->skb).portid,
2299 cb->nlh->nlmsg_type, pctx)) {
2302 cb->args[2] = (unsigned long)gtp;
2317 static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
2319 struct sk_buff *skb_to_send;
2320 __be32 src_ip, dst_ip;
2321 unsigned int version;
2322 struct gtp_dev *gtp;
2329 if (!info->attrs[GTPA_VERSION] ||
2330 !info->attrs[GTPA_LINK] ||
2331 !info->attrs[GTPA_PEER_ADDRESS] ||
2332 !info->attrs[GTPA_MS_ADDRESS])
2335 version = nla_get_u32(info->attrs[GTPA_VERSION]);
2336 dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
2337 src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
2339 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
2343 if (!gtp->sk_created)
2345 if (!(gtp->dev->flags & IFF_UP))
2348 if (version == GTP_V0) {
2349 struct gtp0_header *gtp0_h;
2351 len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
2352 sizeof(struct iphdr) + sizeof(struct udphdr);
2354 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
2359 port = htons(GTP0_PORT);
2361 gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
2362 memset(gtp0_h, 0, sizeof(struct gtp0_header));
2363 gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
2364 } else if (version == GTP_V1) {
2365 struct gtp1_header_long *gtp1u_h;
2367 len = LL_RESERVED_SPACE(gtp->dev) +
2368 sizeof(struct gtp1_header_long) +
2369 sizeof(struct iphdr) + sizeof(struct udphdr);
2371 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
2376 port = htons(GTP1U_PORT);
2378 gtp1u_h = skb_push(skb_to_send,
2379 sizeof(struct gtp1_header_long));
2380 memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
2381 gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
2386 rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
2388 netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
2390 kfree_skb(skb_to_send);
2394 udp_tunnel_xmit_skb(rt, sk, skb_to_send,
2395 fl4.saddr, fl4.daddr,
2397 ip4_dst_hoplimit(&rt->dst),
2400 !net_eq(sock_net(sk),
2406 static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
2407 [GTPA_LINK] = { .type = NLA_U32, },
2408 [GTPA_VERSION] = { .type = NLA_U32, },
2409 [GTPA_TID] = { .type = NLA_U64, },
2410 [GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
2411 [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
2412 [GTPA_FLOW] = { .type = NLA_U16, },
2413 [GTPA_NET_NS_FD] = { .type = NLA_U32, },
2414 [GTPA_I_TEI] = { .type = NLA_U32, },
2415 [GTPA_O_TEI] = { .type = NLA_U32, },
2416 [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), },
2417 [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), },
2418 [GTPA_FAMILY] = { .type = NLA_U8, },
2421 static const struct genl_small_ops gtp_genl_ops[] = {
2423 .cmd = GTP_CMD_NEWPDP,
2424 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2425 .doit = gtp_genl_new_pdp,
2426 .flags = GENL_ADMIN_PERM,
2429 .cmd = GTP_CMD_DELPDP,
2430 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2431 .doit = gtp_genl_del_pdp,
2432 .flags = GENL_ADMIN_PERM,
2435 .cmd = GTP_CMD_GETPDP,
2436 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2437 .doit = gtp_genl_get_pdp,
2438 .dumpit = gtp_genl_dump_pdp,
2439 .flags = GENL_ADMIN_PERM,
2442 .cmd = GTP_CMD_ECHOREQ,
2443 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2444 .doit = gtp_genl_send_echo_req,
2445 .flags = GENL_ADMIN_PERM,
2449 static struct genl_family gtp_genl_family __ro_after_init = {
2453 .maxattr = GTPA_MAX,
2454 .policy = gtp_genl_policy,
2456 .module = THIS_MODULE,
2457 .small_ops = gtp_genl_ops,
2458 .n_small_ops = ARRAY_SIZE(gtp_genl_ops),
2459 .resv_start_op = GTP_CMD_ECHOREQ + 1,
2460 .mcgrps = gtp_genl_mcgrps,
2461 .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
2464 static int __net_init gtp_net_init(struct net *net)
2466 struct gtp_net *gn = net_generic(net, gtp_net_id);
2468 INIT_LIST_HEAD(&gn->gtp_dev_list);
2472 static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
2473 struct list_head *dev_to_kill)
2477 list_for_each_entry(net, net_list, exit_list) {
2478 struct gtp_net *gn = net_generic(net, gtp_net_id);
2479 struct gtp_dev *gtp;
2481 list_for_each_entry(gtp, &gn->gtp_dev_list, list)
2482 gtp_dellink(gtp->dev, dev_to_kill);
2486 static struct pernet_operations gtp_net_ops = {
2487 .init = gtp_net_init,
2488 .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
2490 .size = sizeof(struct gtp_net),
2493 static int __init gtp_init(void)
2497 get_random_bytes(>p_h_initval, sizeof(gtp_h_initval));
2499 err = register_pernet_subsys(>p_net_ops);
2503 err = rtnl_link_register(>p_link_ops);
2505 goto unreg_pernet_subsys;
2507 err = genl_register_family(>p_genl_family);
2509 goto unreg_rtnl_link;
2511 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
2512 sizeof(struct pdp_ctx));
2516 rtnl_link_unregister(>p_link_ops);
2517 unreg_pernet_subsys:
2518 unregister_pernet_subsys(>p_net_ops);
2520 pr_err("error loading GTP module loaded\n");
2523 late_initcall(gtp_init);
2525 static void __exit gtp_fini(void)
2527 genl_unregister_family(>p_genl_family);
2528 rtnl_link_unregister(>p_link_ops);
2529 unregister_pernet_subsys(>p_net_ops);
2531 pr_info("GTP module unloaded\n");
2533 module_exit(gtp_fini);
2535 MODULE_LICENSE("GPL");
2537 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
2538 MODULE_ALIAS_RTNL_LINK("gtp");
2539 MODULE_ALIAS_GENL_FAMILY("gtp");