1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/udp.h>
17 #include <linux/rculist.h>
18 #include <linux/jhash.h>
19 #include <linux/if_tunnel.h>
20 #include <linux/net.h>
21 #include <linux/file.h>
22 #include <linux/gtp.h>
24 #include <net/net_namespace.h>
25 #include <net/protocol.h>
26 #include <net/inet_dscp.h>
27 #include <net/inet_sock.h>
31 #include <net/udp_tunnel.h>
34 #include <net/genetlink.h>
35 #include <net/netns/generic.h>
38 /* An active session for the subscriber. */
40 struct hlist_node hlist_tid;
41 struct hlist_node hlist_addr;
58 struct in6_addr addr6;
62 struct in6_addr addr6;
66 struct net_device *dev;
69 struct rcu_head rcu_head;
72 /* One instance of the GTP device. */
74 struct list_head list;
80 struct net_device *dev;
84 unsigned int hash_size;
85 struct hlist_head *tid_hash;
86 struct hlist_head *addr_hash;
103 static unsigned int gtp_net_id __read_mostly;
106 struct list_head gtp_dev_list;
109 static u32 gtp_h_initval;
111 static struct genl_family gtp_genl_family;
113 enum gtp_multicast_groups {
117 static const struct genl_multicast_group gtp_genl_mcgrps[] = {
118 [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
121 static void pdp_context_delete(struct pdp_ctx *pctx);
123 static inline u32 gtp0_hashfn(u64 tid)
125 u32 *tid32 = (u32 *) &tid;
126 return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
129 static inline u32 gtp1u_hashfn(u32 tid)
131 return jhash_1word(tid, gtp_h_initval);
134 static inline u32 ipv4_hashfn(__be32 ip)
136 return jhash_1word((__force u32)ip, gtp_h_initval);
139 static u32 ipv6_hashfn(const struct in6_addr *ip6)
141 return jhash_2words((__force u32)ip6->s6_addr32[0],
142 (__force u32)ip6->s6_addr32[1], gtp_h_initval);
145 /* Resolve a PDP context structure based on the 64bit TID. */
146 static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family)
148 struct hlist_head *head;
151 head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
153 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
154 if (pdp->af == family &&
155 pdp->gtp_version == GTP_V0 &&
156 pdp->u.v0.tid == tid)
162 /* Resolve a PDP context structure based on the 32bit TEI. */
163 static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family)
165 struct hlist_head *head;
168 head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
170 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
171 if (pdp->af == family &&
172 pdp->gtp_version == GTP_V1 &&
173 pdp->u.v1.i_tei == tid)
179 /* Resolve a PDP context based on IPv4 address of MS. */
180 static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
182 struct hlist_head *head;
185 head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
187 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
188 if (pdp->af == AF_INET &&
189 pdp->ms.addr.s_addr == ms_addr)
196 /* 3GPP TS 29.060: PDN Connection: the association between a MS represented by
197 * [...] one IPv6 *prefix* and a PDN represented by an APN.
199 * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be
200 * according to the maximum prefix length for a global IPv6 address as
201 * specified in the IPv6 Addressing Architecture, see RFC 4291.
203 * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other
204 * than those that start with binary 000 have a 64-bit interface ID field
205 * (i.e., n + m = 64).
207 static bool ipv6_pdp_addr_equal(const struct in6_addr *a,
208 const struct in6_addr *b)
210 return a->s6_addr32[0] == b->s6_addr32[0] &&
211 a->s6_addr32[1] == b->s6_addr32[1];
214 static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp,
215 const struct in6_addr *ms_addr)
217 struct hlist_head *head;
220 head = >p->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size];
222 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
223 if (pdp->af == AF_INET6 &&
224 ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr))
231 static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
232 unsigned int hdrlen, unsigned int role)
236 if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
239 iph = (struct iphdr *)(skb->data + hdrlen);
241 if (role == GTP_ROLE_SGSN)
242 return iph->daddr == pctx->ms.addr.s_addr;
244 return iph->saddr == pctx->ms.addr.s_addr;
247 static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx,
248 unsigned int hdrlen, unsigned int role)
250 struct ipv6hdr *ip6h;
253 if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr)))
256 ip6h = (struct ipv6hdr *)(skb->data + hdrlen);
258 if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) ||
259 (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL))
262 if (role == GTP_ROLE_SGSN) {
263 ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6);
265 ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6);
271 /* Check if the inner IP address in this packet is assigned to any
272 * existing mobile subscriber.
274 static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
275 unsigned int hdrlen, unsigned int role,
278 switch (inner_proto) {
280 return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
282 return gtp_check_ms_ipv6(skb, pctx, hdrlen, role);
287 static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen,
290 __u8 *ip_version, _ip_version;
292 ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version),
297 switch (*ip_version & 0xf0) {
299 *inner_proto = ETH_P_IP;
302 *inner_proto = ETH_P_IPV6;
311 static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
312 unsigned int hdrlen, unsigned int role, __u16 inner_proto)
314 if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) {
315 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
319 /* Get rid of the GTP + UDP headers. */
320 if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto),
321 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
322 pctx->dev->stats.rx_length_errors++;
326 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
328 /* Now that the UDP and the GTP header have been removed, set up the
329 * new network header. This is required by the upper layer to
330 * calculate the transport header.
332 skb_reset_network_header(skb);
333 skb_reset_mac_header(skb);
335 skb->dev = pctx->dev;
337 dev_sw_netstats_rx_add(pctx->dev, skb->len);
343 pctx->dev->stats.rx_dropped++;
347 static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
348 const struct sock *sk,
349 __be32 daddr, __be32 saddr)
351 memset(fl4, 0, sizeof(*fl4));
352 fl4->flowi4_oif = sk->sk_bound_dev_if;
355 fl4->flowi4_tos = inet_dscp_to_dsfield(inet_sk_dscp(inet_sk(sk)));
356 fl4->flowi4_scope = ip_sock_rt_scope(sk);
357 fl4->flowi4_proto = sk->sk_protocol;
359 return ip_route_output_key(sock_net(sk), fl4);
362 static struct rt6_info *ip6_route_output_gtp(struct net *net,
364 const struct sock *sk,
365 const struct in6_addr *daddr,
366 struct in6_addr *saddr)
368 struct dst_entry *dst;
370 memset(fl6, 0, sizeof(*fl6));
371 fl6->flowi6_oif = sk->sk_bound_dev_if;
374 fl6->flowi6_proto = sk->sk_protocol;
376 dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL);
378 return ERR_PTR(-ENETUNREACH);
380 return (struct rt6_info *)dst;
384 * In all Path Management messages:
385 * - TID: is not used and shall be set to 0.
386 * - Flow Label is not used and shall be set to 0
387 * In signalling messages:
388 * - number: this field is not yet used in signalling messages.
389 * It shall be set to 255 by the sender and shall be ignored
391 * Returns true if the echo req was correct, false otherwise.
393 static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
395 return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
396 gtp0->number != 0xff || gtp0->flow);
399 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
400 static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
402 int len_pkt, len_hdr;
404 hdr->flags = 0x1e; /* v0, GTP-non-prime. */
405 hdr->type = msg_type;
406 /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
407 * are not used and shall be set to 0.
412 hdr->spare[0] = 0xff;
413 hdr->spare[1] = 0xff;
414 hdr->spare[2] = 0xff;
416 len_pkt = sizeof(struct gtp0_packet);
417 len_hdr = sizeof(struct gtp0_header);
419 if (msg_type == GTP_ECHO_RSP)
420 hdr->length = htons(len_pkt - len_hdr);
425 static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
427 struct iphdr *iph = ip_hdr(skb);
431 /* find route to the sender,
432 * src address becomes dst address and vice versa.
434 rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
436 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
441 udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
442 fl4.saddr, fl4.daddr,
444 ip4_dst_hoplimit(&rt->dst),
446 htons(GTP0_PORT), htons(GTP0_PORT),
447 !net_eq(sock_net(gtp->sk1u),
454 static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
456 struct gtp0_packet *gtp_pkt;
457 struct gtp0_header *gtp0;
460 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
462 if (!gtp0_validate_echo_hdr(gtp0))
467 /* pull GTP and UDP headers */
468 skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
470 gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
471 memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
473 gtp0_build_echo_msg(>p_pkt->gtp0_h, GTP_ECHO_RSP);
475 /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
476 * message shall be copied from the signalling request message
477 * that the GSN is replying to.
479 gtp_pkt->gtp0_h.seq = seq;
481 gtp_pkt->ie.tag = GTPIE_RECOVERY;
482 gtp_pkt->ie.val = gtp->restart_count;
484 switch (gtp->sk0->sk_family) {
486 if (gtp0_send_echo_resp_ip(gtp, skb) < 0)
496 static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
497 int flags, u32 type, struct echo_info echo)
501 genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags,
506 if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
507 nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) ||
508 nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr))
511 genlmsg_end(skb, genlh);
515 genlmsg_cancel(skb, genlh);
519 static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo)
521 struct iphdr *iph = ip_hdr(skb);
523 echo->ms.addr.s_addr = iph->daddr;
524 echo->peer.addr.s_addr = iph->saddr;
525 echo->gtp_version = GTP_V0;
528 static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
530 struct gtp0_header *gtp0;
531 struct echo_info echo;
535 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
537 if (!gtp0_validate_echo_hdr(gtp0))
540 switch (gtp->sk0->sk_family) {
542 gtp0_handle_echo_resp_ip(skb, &echo);
548 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
552 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
558 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev),
559 msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
562 static int gtp_proto_to_family(__u16 proto)
577 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
578 static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
580 unsigned int hdrlen = sizeof(struct udphdr) +
581 sizeof(struct gtp0_header);
582 struct gtp0_header *gtp0;
583 struct pdp_ctx *pctx;
586 if (!pskb_may_pull(skb, hdrlen))
589 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
591 if ((gtp0->flags >> 5) != GTP_V0)
594 /* If the sockets were created in kernel, it means that
595 * there is no daemon running in userspace which would
596 * handle echo request.
598 if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
599 return gtp0_send_echo_resp(gtp, skb);
601 if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
602 return gtp0_handle_echo_resp(gtp, skb);
604 if (gtp0->type != GTP_TPDU)
607 if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
608 netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
612 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid),
613 gtp_proto_to_family(inner_proto));
615 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
619 return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
622 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
623 static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
625 int len_pkt, len_hdr;
627 /* S flag must be set to 1 */
628 hdr->flags = 0x32; /* v1, GTP-non-prime. */
629 hdr->type = msg_type;
630 /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
633 /* seq, npdu and next should be counted to the length of the GTP packet
634 * that's why szie of gtp1_header should be subtracted,
635 * not size of gtp1_header_long.
638 len_hdr = sizeof(struct gtp1_header);
640 if (msg_type == GTP_ECHO_RSP) {
641 len_pkt = sizeof(struct gtp1u_packet);
642 hdr->length = htons(len_pkt - len_hdr);
644 /* GTP_ECHO_REQ does not carry GTP Information Element,
645 * the why gtp1_header_long is used here.
647 len_pkt = sizeof(struct gtp1_header_long);
648 hdr->length = htons(len_pkt - len_hdr);
652 static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
654 struct gtp1_header_long *gtp1u;
655 struct gtp1u_packet *gtp_pkt;
660 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
662 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
663 * Error Indication and Supported Extension Headers Notification
664 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
666 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
669 /* pull GTP and UDP headers */
671 sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
673 gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
674 memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
676 gtp1u_build_echo_msg(>p_pkt->gtp1u_h, GTP_ECHO_RSP);
678 /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
679 * Recovery information element shall not be used, i.e. it shall
680 * be set to zero by the sender and shall be ignored by the receiver.
681 * The Recovery information element is mandatory due to backwards
682 * compatibility reasons.
684 gtp_pkt->ie.tag = GTPIE_RECOVERY;
689 /* find route to the sender,
690 * src address becomes dst address and vice versa.
692 rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
694 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
699 udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
700 fl4.saddr, fl4.daddr,
702 ip4_dst_hoplimit(&rt->dst),
704 htons(GTP1U_PORT), htons(GTP1U_PORT),
705 !net_eq(sock_net(gtp->sk1u),
711 static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
713 struct gtp1_header_long *gtp1u;
714 struct echo_info echo;
719 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
721 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
722 * Error Indication and Supported Extension Headers Notification
723 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
725 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
729 echo.ms.addr.s_addr = iph->daddr;
730 echo.peer.addr.s_addr = iph->saddr;
731 echo.gtp_version = GTP_V1;
733 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
737 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
743 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev),
744 msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
747 static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen)
749 struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr;
750 unsigned int offset = *hdrlen;
751 __u8 *next_type, _next_type;
753 /* From 29.060: "The Extension Header Length field specifies the length
754 * of the particular Extension header in 4 octets units."
756 * This length field includes length field size itself (1 byte),
757 * payload (variable length) and next type (1 byte). The extension
758 * header is aligned to to 4 bytes.
762 gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr),
764 if (!gtp_exthdr || !gtp_exthdr->len)
767 offset += gtp_exthdr->len * 4;
769 /* From 29.060: "If no such Header follows, then the value of
770 * the Next Extension Header Type shall be 0."
772 next_type = skb_header_pointer(skb, offset - 1,
773 sizeof(_next_type), &_next_type);
777 } while (*next_type != 0);
784 static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
786 unsigned int hdrlen = sizeof(struct udphdr) +
787 sizeof(struct gtp1_header);
788 struct gtp1_header *gtp1;
789 struct pdp_ctx *pctx;
792 if (!pskb_may_pull(skb, hdrlen))
795 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
797 if ((gtp1->flags >> 5) != GTP_V1)
800 /* If the sockets were created in kernel, it means that
801 * there is no daemon running in userspace which would
802 * handle echo request.
804 if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
805 return gtp1u_send_echo_resp(gtp, skb);
807 if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
808 return gtp1u_handle_echo_resp(gtp, skb);
810 if (gtp1->type != GTP_TPDU)
813 /* From 29.060: "This field shall be present if and only if any one or
814 * more of the S, PN and E flags are set.".
816 * If any of the bit is set, then the remaining ones also have to be
819 if (gtp1->flags & GTP1_F_MASK)
822 /* Make sure the header is larger enough, including extensions. */
823 if (!pskb_may_pull(skb, hdrlen))
826 if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
827 netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
831 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
833 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid),
834 gtp_proto_to_family(inner_proto));
836 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
840 if (gtp1->flags & GTP1_F_EXTHDR &&
841 gtp_parse_exthdrs(skb, &hdrlen) < 0)
844 return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
847 static void __gtp_encap_destroy(struct sock *sk)
852 gtp = sk->sk_user_data;
858 WRITE_ONCE(udp_sk(sk)->encap_type, 0);
859 rcu_assign_sk_user_data(sk, NULL);
867 static void gtp_encap_destroy(struct sock *sk)
870 __gtp_encap_destroy(sk);
874 static void gtp_encap_disable_sock(struct sock *sk)
879 __gtp_encap_destroy(sk);
882 static void gtp_encap_disable(struct gtp_dev *gtp)
884 if (gtp->sk_created) {
885 udp_tunnel_sock_release(gtp->sk0->sk_socket);
886 udp_tunnel_sock_release(gtp->sk1u->sk_socket);
887 gtp->sk_created = false;
891 gtp_encap_disable_sock(gtp->sk0);
892 gtp_encap_disable_sock(gtp->sk1u);
896 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
897 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
899 static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
904 gtp = rcu_dereference_sk_user_data(sk);
908 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
910 switch (READ_ONCE(udp_sk(sk)->encap_type)) {
912 netdev_dbg(gtp->dev, "received GTP0 packet\n");
913 ret = gtp0_udp_encap_recv(gtp, skb);
915 case UDP_ENCAP_GTP1U:
916 netdev_dbg(gtp->dev, "received GTP1U packet\n");
917 ret = gtp1u_udp_encap_recv(gtp, skb);
920 ret = -1; /* Shouldn't happen. */
925 netdev_dbg(gtp->dev, "pass up to the process\n");
930 netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
939 static void gtp_dev_uninit(struct net_device *dev)
941 struct gtp_dev *gtp = netdev_priv(dev);
943 gtp_encap_disable(gtp);
946 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
948 int payload_len = skb->len;
949 struct gtp0_header *gtp0;
951 gtp0 = skb_push(skb, sizeof(*gtp0));
953 gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
954 gtp0->type = GTP_TPDU;
955 gtp0->length = htons(payload_len);
956 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
957 gtp0->flow = htons(pctx->u.v0.flow);
959 gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
960 gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
963 static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
965 int payload_len = skb->len;
966 struct gtp1_header *gtp1;
968 gtp1 = skb_push(skb, sizeof(*gtp1));
970 /* Bits 8 7 6 5 4 3 2 1
971 * +--+--+--+--+--+--+--+--+
972 * |version |PT| 0| E| S|PN|
973 * +--+--+--+--+--+--+--+--+
976 gtp1->flags = 0x30; /* v1, GTP-non-prime. */
977 gtp1->type = GTP_TPDU;
978 gtp1->length = htons(payload_len);
979 gtp1->tid = htonl(pctx->u.v1.o_tei);
981 /* TODO: Support for extension header, sequence number and N-PDU.
982 * Update the length field if any of them is available.
994 struct rt6_info *rt6;
996 struct pdp_ctx *pctx;
997 struct net_device *dev;
1002 static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
1004 switch (pktinfo->pctx->gtp_version) {
1006 pktinfo->gtph_port = htons(GTP0_PORT);
1007 gtp0_push_header(skb, pktinfo->pctx);
1010 pktinfo->gtph_port = htons(GTP1U_PORT);
1011 gtp1_push_header(skb, pktinfo->pctx);
1016 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
1017 struct sock *sk, __u8 tos,
1018 struct pdp_ctx *pctx, struct rtable *rt,
1020 struct net_device *dev)
1024 pktinfo->pctx = pctx;
1026 pktinfo->fl4 = *fl4;
1030 static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo,
1031 struct sock *sk, __u8 tos,
1032 struct pdp_ctx *pctx, struct rt6_info *rt6,
1034 struct net_device *dev)
1038 pktinfo->pctx = pctx;
1040 pktinfo->fl6 = *fl6;
1044 static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev,
1045 struct gtp_pktinfo *pktinfo,
1046 struct pdp_ctx *pctx, __u8 tos,
1054 rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr,
1055 inet_sk(pctx->sk)->inet_saddr);
1057 netdev_dbg(dev, "no route to SSGN %pI4\n",
1058 &pctx->peer.addr.s_addr);
1059 dev->stats.tx_carrier_errors++;
1063 if (rt->dst.dev == dev) {
1064 netdev_dbg(dev, "circular route to SSGN %pI4\n",
1065 &pctx->peer.addr.s_addr);
1066 dev->stats.collisions++;
1070 /* This is similar to tnl_update_pmtu(). */
1073 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
1074 sizeof(struct iphdr) - sizeof(struct udphdr);
1075 switch (pctx->gtp_version) {
1077 mtu -= sizeof(struct gtp0_header);
1080 mtu -= sizeof(struct gtp1_header);
1084 mtu = dst_mtu(&rt->dst);
1087 skb_dst_update_pmtu_no_confirm(skb, mtu);
1089 if (frag_off & htons(IP_DF) &&
1090 ((!skb_is_gso(skb) && skb->len > mtu) ||
1091 (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
1092 netdev_dbg(dev, "packet too big, fragmentation needed\n");
1093 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1098 gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev);
1099 gtp_push_header(skb, pktinfo);
1108 static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb,
1109 struct net_device *dev,
1110 struct gtp_pktinfo *pktinfo,
1111 struct pdp_ctx *pctx, __u8 tos)
1113 struct dst_entry *dst;
1114 struct rt6_info *rt;
1118 rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6,
1119 &inet6_sk(pctx->sk)->saddr);
1121 netdev_dbg(dev, "no route to SSGN %pI6\n",
1123 dev->stats.tx_carrier_errors++;
1128 if (rt->dst.dev == dev) {
1129 netdev_dbg(dev, "circular route to SSGN %pI6\n",
1131 dev->stats.collisions++;
1135 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
1136 sizeof(struct ipv6hdr) - sizeof(struct udphdr);
1137 switch (pctx->gtp_version) {
1139 mtu -= sizeof(struct gtp0_header);
1142 mtu -= sizeof(struct gtp1_header);
1146 skb_dst_update_pmtu_no_confirm(skb, mtu);
1148 if ((!skb_is_gso(skb) && skb->len > mtu) ||
1149 (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
1150 netdev_dbg(dev, "packet too big, fragmentation needed\n");
1151 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1155 gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev);
1156 gtp_push_header(skb, pktinfo);
1165 static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
1166 struct gtp_pktinfo *pktinfo)
1168 struct gtp_dev *gtp = netdev_priv(dev);
1169 struct net *net = gtp->net;
1170 struct pdp_ctx *pctx;
1174 /* Read the IP destination address and resolve the PDP context.
1175 * Prepend PDP header with TEI/TID from PDP ctx.
1178 if (gtp->role == GTP_ROLE_SGSN)
1179 pctx = ipv4_pdp_find(gtp, iph->saddr);
1181 pctx = ipv4_pdp_find(gtp, iph->daddr);
1184 netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
1188 netdev_dbg(dev, "found PDP context %p\n", pctx);
1190 switch (pctx->sk->sk_family) {
1192 ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx,
1193 iph->tos, iph->frag_off);
1196 ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx,
1208 netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
1209 &iph->saddr, &iph->daddr);
1214 static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev,
1215 struct gtp_pktinfo *pktinfo)
1217 struct gtp_dev *gtp = netdev_priv(dev);
1218 struct net *net = gtp->net;
1219 struct pdp_ctx *pctx;
1220 struct ipv6hdr *ip6h;
1224 /* Read the IP destination address and resolve the PDP context.
1225 * Prepend PDP header with TEI/TID from PDP ctx.
1227 ip6h = ipv6_hdr(skb);
1228 if (gtp->role == GTP_ROLE_SGSN)
1229 pctx = ipv6_pdp_find(gtp, &ip6h->saddr);
1231 pctx = ipv6_pdp_find(gtp, &ip6h->daddr);
1234 netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n",
1238 netdev_dbg(dev, "found PDP context %p\n", pctx);
1240 tos = ipv6_get_dsfield(ip6h);
1242 switch (pctx->sk->sk_family) {
1244 ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0);
1247 ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos);
1258 netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n",
1259 &ip6h->saddr, &ip6h->daddr);
1264 static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1266 unsigned int proto = ntohs(skb->protocol);
1267 struct gtp_pktinfo pktinfo;
1270 /* Ensure there is sufficient headroom. */
1271 if (skb_cow_head(skb, dev->needed_headroom))
1274 if (!pskb_inet_may_pull(skb))
1277 skb_reset_inner_headers(skb);
1279 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
1283 err = gtp_build_skb_ip4(skb, dev, &pktinfo);
1286 err = gtp_build_skb_ip6(skb, dev, &pktinfo);
1297 switch (pktinfo.pctx->sk->sk_family) {
1299 udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
1300 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
1302 ip4_dst_hoplimit(&pktinfo.rt->dst),
1304 pktinfo.gtph_port, pktinfo.gtph_port,
1305 !net_eq(sock_net(pktinfo.pctx->sk),
1310 #if IS_ENABLED(CONFIG_IPV6)
1311 udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev,
1312 &pktinfo.fl6.saddr, &pktinfo.fl6.daddr,
1314 ip6_dst_hoplimit(&pktinfo.rt->dst),
1316 pktinfo.gtph_port, pktinfo.gtph_port,
1324 return NETDEV_TX_OK;
1326 dev->stats.tx_errors++;
1328 return NETDEV_TX_OK;
1331 static const struct net_device_ops gtp_netdev_ops = {
1332 .ndo_uninit = gtp_dev_uninit,
1333 .ndo_start_xmit = gtp_dev_xmit,
1336 static const struct device_type gtp_type = {
1340 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1341 #define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN)
1343 static void gtp_link_setup(struct net_device *dev)
1345 struct gtp_dev *gtp = netdev_priv(dev);
1347 dev->netdev_ops = >p_netdev_ops;
1348 dev->needs_free_netdev = true;
1349 SET_NETDEV_DEVTYPE(dev, >p_type);
1351 dev->hard_header_len = 0;
1353 dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN;
1355 /* Zero header length. */
1356 dev->type = ARPHRD_NONE;
1357 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1359 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1360 dev->priv_flags |= IFF_NO_QUEUE;
1362 netif_keep_dst(dev);
1364 dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
1368 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
1369 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
1371 static void gtp_destructor(struct net_device *dev)
1373 struct gtp_dev *gtp = netdev_priv(dev);
1375 kfree(gtp->addr_hash);
1376 kfree(gtp->tid_hash);
1379 static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf,
1380 const struct nlattr *nla, int family)
1382 udp_conf->family = family;
1384 switch (udp_conf->family) {
1386 udp_conf->local_ip.s_addr = nla_get_be32(nla);
1388 #if IS_ENABLED(CONFIG_IPV6)
1390 udp_conf->local_ip6 = nla_get_in6_addr(nla);
1400 static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp,
1401 const struct nlattr *nla, int family)
1403 struct udp_tunnel_sock_cfg tuncfg = {};
1404 struct udp_port_cfg udp_conf = {};
1405 struct net *net = gtp->net;
1406 struct socket *sock;
1410 err = gtp_sock_udp_config(&udp_conf, nla, family);
1412 return ERR_PTR(err);
1414 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
1415 udp_conf.family = AF_INET;
1418 if (type == UDP_ENCAP_GTP0)
1419 udp_conf.local_udp_port = htons(GTP0_PORT);
1420 else if (type == UDP_ENCAP_GTP1U)
1421 udp_conf.local_udp_port = htons(GTP1U_PORT);
1423 return ERR_PTR(-EINVAL);
1425 err = udp_sock_create(net, &udp_conf, &sock);
1427 return ERR_PTR(err);
1429 tuncfg.sk_user_data = gtp;
1430 tuncfg.encap_type = type;
1431 tuncfg.encap_rcv = gtp_encap_recv;
1432 tuncfg.encap_destroy = NULL;
1434 setup_udp_tunnel_sock(net, sock, &tuncfg);
1439 static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
1445 sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family);
1447 return PTR_ERR(sk0);
1449 sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family);
1451 udp_tunnel_sock_release(sk0->sk_socket);
1452 return PTR_ERR(sk1u);
1455 gtp->sk_created = true;
1462 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1463 #define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
1465 static int gtp_newlink(struct net *src_net, struct net_device *dev,
1466 struct nlattr *tb[], struct nlattr *data[],
1467 struct netlink_ext_ack *extack)
1469 unsigned int role = GTP_ROLE_GGSN;
1470 struct gtp_dev *gtp;
1474 #if !IS_ENABLED(CONFIG_IPV6)
1475 if (data[IFLA_GTP_LOCAL6])
1476 return -EAFNOSUPPORT;
1479 gtp = netdev_priv(dev);
1481 if (!data[IFLA_GTP_PDP_HASHSIZE]) {
1484 hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
1489 if (data[IFLA_GTP_ROLE]) {
1490 role = nla_get_u32(data[IFLA_GTP_ROLE]);
1491 if (role > GTP_ROLE_SGSN)
1496 gtp->restart_count = nla_get_u8_default(data[IFLA_GTP_RESTART_COUNT],
1501 err = gtp_hashtable_new(gtp, hashsize);
1505 if (data[IFLA_GTP_CREATE_SOCKETS]) {
1506 if (data[IFLA_GTP_LOCAL6])
1507 err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6);
1509 err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET);
1511 err = gtp_encap_enable(gtp, data);
1517 if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) ||
1518 (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) {
1519 dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN;
1520 dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN;
1523 err = register_netdevice(dev);
1525 netdev_dbg(dev, "failed to register new netdev %d\n", err);
1529 gn = net_generic(src_net, gtp_net_id);
1530 list_add(>p->list, &gn->gtp_dev_list);
1531 dev->priv_destructor = gtp_destructor;
1533 netdev_dbg(dev, "registered new GTP interface\n");
1538 gtp_encap_disable(gtp);
1540 kfree(gtp->addr_hash);
1541 kfree(gtp->tid_hash);
1545 static void gtp_dellink(struct net_device *dev, struct list_head *head)
1547 struct gtp_dev *gtp = netdev_priv(dev);
1548 struct hlist_node *next;
1549 struct pdp_ctx *pctx;
1552 for (i = 0; i < gtp->hash_size; i++)
1553 hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid)
1554 pdp_context_delete(pctx);
1556 list_del(>p->list);
1557 unregister_netdevice_queue(dev, head);
1560 static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
1561 [IFLA_GTP_FD0] = { .type = NLA_U32 },
1562 [IFLA_GTP_FD1] = { .type = NLA_U32 },
1563 [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
1564 [IFLA_GTP_ROLE] = { .type = NLA_U32 },
1565 [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
1566 [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
1567 [IFLA_GTP_LOCAL] = { .type = NLA_U32 },
1568 [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) },
1571 static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
1572 struct netlink_ext_ack *extack)
1580 static size_t gtp_get_size(const struct net_device *dev)
1582 return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
1583 nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
1584 nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
1587 static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
1589 struct gtp_dev *gtp = netdev_priv(dev);
1591 if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
1592 goto nla_put_failure;
1593 if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
1594 goto nla_put_failure;
1595 if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
1596 goto nla_put_failure;
1604 static struct rtnl_link_ops gtp_link_ops __read_mostly = {
1606 .maxtype = IFLA_GTP_MAX,
1607 .policy = gtp_policy,
1608 .priv_size = sizeof(struct gtp_dev),
1609 .setup = gtp_link_setup,
1610 .validate = gtp_validate,
1611 .newlink = gtp_newlink,
1612 .dellink = gtp_dellink,
1613 .get_size = gtp_get_size,
1614 .fill_info = gtp_fill_info,
1617 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
1621 gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
1622 GFP_KERNEL | __GFP_NOWARN);
1623 if (gtp->addr_hash == NULL)
1626 gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
1627 GFP_KERNEL | __GFP_NOWARN);
1628 if (gtp->tid_hash == NULL)
1631 gtp->hash_size = hsize;
1633 for (i = 0; i < hsize; i++) {
1634 INIT_HLIST_HEAD(>p->addr_hash[i]);
1635 INIT_HLIST_HEAD(>p->tid_hash[i]);
1639 kfree(gtp->addr_hash);
1643 static struct sock *gtp_encap_enable_socket(int fd, int type,
1644 struct gtp_dev *gtp)
1646 struct udp_tunnel_sock_cfg tuncfg = {NULL};
1647 struct socket *sock;
1651 pr_debug("enable gtp on %d, %d\n", fd, type);
1653 sock = sockfd_lookup(fd, &err);
1655 pr_debug("gtp socket fd=%d not found\n", fd);
1656 return ERR_PTR(err);
1660 if (sk->sk_protocol != IPPROTO_UDP ||
1661 sk->sk_type != SOCK_DGRAM ||
1662 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
1663 pr_debug("socket fd=%d not UDP\n", fd);
1664 sk = ERR_PTR(-EINVAL);
1668 if (sk->sk_family == AF_INET6 &&
1670 sk = ERR_PTR(-EADDRNOTAVAIL);
1675 if (sk->sk_user_data) {
1676 sk = ERR_PTR(-EBUSY);
1682 tuncfg.sk_user_data = gtp;
1683 tuncfg.encap_type = type;
1684 tuncfg.encap_rcv = gtp_encap_recv;
1685 tuncfg.encap_destroy = gtp_encap_destroy;
1687 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
1690 release_sock(sock->sk);
1696 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
1698 struct sock *sk1u = NULL;
1699 struct sock *sk0 = NULL;
1701 if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
1704 if (data[IFLA_GTP_FD0]) {
1705 int fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
1708 sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
1710 return PTR_ERR(sk0);
1714 if (data[IFLA_GTP_FD1]) {
1715 int fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
1718 sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
1720 gtp_encap_disable_sock(sk0);
1721 return PTR_ERR(sk1u);
1730 sk0->sk_family != sk1u->sk_family) {
1731 gtp_encap_disable_sock(sk0);
1732 gtp_encap_disable_sock(sk1u);
1739 static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
1741 struct gtp_dev *gtp = NULL;
1742 struct net_device *dev;
1745 /* Examine the link attributes and figure out which network namespace
1746 * we are talking about.
1748 if (nla[GTPA_NET_NS_FD])
1749 net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
1751 net = get_net(src_net);
1756 /* Check if there's an existing gtpX device to configure */
1757 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
1758 if (dev && dev->netdev_ops == >p_netdev_ops)
1759 gtp = netdev_priv(dev);
1765 static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1767 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
1769 switch (pctx->gtp_version) {
1771 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
1772 * label needs to be the same for uplink and downlink packets,
1773 * so let's annotate this.
1775 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
1776 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
1779 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
1780 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
1787 static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info)
1789 if (info->attrs[GTPA_PEER_ADDRESS]) {
1790 pctx->peer.addr.s_addr =
1791 nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
1792 } else if (info->attrs[GTPA_PEER_ADDR6]) {
1793 pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]);
1797 static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1799 ip_pdp_peer_fill(pctx, info);
1800 pctx->ms.addr.s_addr =
1801 nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1802 gtp_pdp_fill(pctx, info);
1805 static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1807 ip_pdp_peer_fill(pctx, info);
1808 pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
1809 if (pctx->ms.addr6.s6_addr32[2] ||
1810 pctx->ms.addr6.s6_addr32[3])
1813 gtp_pdp_fill(pctx, info);
1818 static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
1819 struct genl_info *info)
1821 struct pdp_ctx *pctx, *pctx_tid = NULL;
1822 struct net_device *dev = gtp->dev;
1823 u32 hash_ms, hash_tid = 0;
1824 struct in6_addr ms_addr6;
1825 unsigned int version;
1830 version = nla_get_u32(info->attrs[GTPA_VERSION]);
1832 family = nla_get_u8_default(info->attrs[GTPA_FAMILY], AF_INET);
1834 #if !IS_ENABLED(CONFIG_IPV6)
1835 if (family == AF_INET6)
1836 return ERR_PTR(-EAFNOSUPPORT);
1838 if (!info->attrs[GTPA_PEER_ADDRESS] &&
1839 !info->attrs[GTPA_PEER_ADDR6])
1840 return ERR_PTR(-EINVAL);
1842 if ((info->attrs[GTPA_PEER_ADDRESS] &&
1843 sk->sk_family == AF_INET6) ||
1844 (info->attrs[GTPA_PEER_ADDR6] &&
1845 sk->sk_family == AF_INET))
1846 return ERR_PTR(-EAFNOSUPPORT);
1850 if (!info->attrs[GTPA_MS_ADDRESS] ||
1851 info->attrs[GTPA_MS_ADDR6])
1852 return ERR_PTR(-EINVAL);
1854 ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1855 hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
1856 pctx = ipv4_pdp_find(gtp, ms_addr);
1859 if (!info->attrs[GTPA_MS_ADDR6] ||
1860 info->attrs[GTPA_MS_ADDRESS])
1861 return ERR_PTR(-EINVAL);
1863 ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
1864 hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size;
1865 pctx = ipv6_pdp_find(gtp, &ms_addr6);
1868 return ERR_PTR(-EAFNOSUPPORT);
1872 if (version == GTP_V0)
1873 pctx_tid = gtp0_pdp_find(gtp,
1874 nla_get_u64(info->attrs[GTPA_TID]),
1876 else if (version == GTP_V1)
1877 pctx_tid = gtp1_pdp_find(gtp,
1878 nla_get_u32(info->attrs[GTPA_I_TEI]),
1884 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
1885 return ERR_PTR(-EEXIST);
1886 if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
1887 return ERR_PTR(-EOPNOTSUPP);
1889 if (pctx && pctx_tid)
1890 return ERR_PTR(-EEXIST);
1896 ipv4_pdp_fill(pctx, info);
1899 if (!ipv6_pdp_fill(pctx, info))
1900 return ERR_PTR(-EADDRNOTAVAIL);
1904 if (pctx->gtp_version == GTP_V0)
1905 netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
1906 pctx->u.v0.tid, pctx);
1907 else if (pctx->gtp_version == GTP_V1)
1908 netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
1909 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1915 pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
1917 return ERR_PTR(-ENOMEM);
1921 pctx->dev = gtp->dev;
1926 if (!info->attrs[GTPA_MS_ADDRESS]) {
1929 return ERR_PTR(-EINVAL);
1932 ipv4_pdp_fill(pctx, info);
1935 if (!info->attrs[GTPA_MS_ADDR6]) {
1938 return ERR_PTR(-EINVAL);
1941 if (!ipv6_pdp_fill(pctx, info)) {
1944 return ERR_PTR(-EADDRNOTAVAIL);
1948 atomic_set(&pctx->tx_seq, 0);
1950 switch (pctx->gtp_version) {
1952 /* TS 09.60: "The flow label identifies unambiguously a GTP
1953 * flow.". We use the tid for this instead, I cannot find a
1954 * situation in which this doesn't unambiguosly identify the
1957 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
1960 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
1964 hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]);
1965 hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]);
1967 switch (pctx->gtp_version) {
1969 netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1970 pctx->u.v0.tid, &pctx->peer.addr,
1971 &pctx->ms.addr, pctx);
1974 netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1975 pctx->u.v1.i_tei, pctx->u.v1.o_tei,
1976 &pctx->peer.addr, &pctx->ms.addr, pctx);
1983 static void pdp_context_free(struct rcu_head *head)
1985 struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
1991 static void pdp_context_delete(struct pdp_ctx *pctx)
1993 hlist_del_rcu(&pctx->hlist_tid);
1994 hlist_del_rcu(&pctx->hlist_addr);
1995 call_rcu(&pctx->rcu_head, pdp_context_free);
1998 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
2000 static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
2002 unsigned int version;
2003 struct pdp_ctx *pctx;
2004 struct gtp_dev *gtp;
2008 if (!info->attrs[GTPA_VERSION] ||
2009 !info->attrs[GTPA_LINK])
2012 version = nla_get_u32(info->attrs[GTPA_VERSION]);
2016 if (!info->attrs[GTPA_TID] ||
2017 !info->attrs[GTPA_FLOW])
2021 if (!info->attrs[GTPA_I_TEI] ||
2022 !info->attrs[GTPA_O_TEI])
2032 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
2038 if (version == GTP_V0)
2040 else if (version == GTP_V1)
2050 pctx = gtp_pdp_add(gtp, sk, info);
2052 err = PTR_ERR(pctx);
2054 gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
2063 static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
2064 struct nlattr *nla[])
2066 struct gtp_dev *gtp;
2069 family = nla_get_u8_default(nla[GTPA_FAMILY], AF_INET);
2071 gtp = gtp_find_dev(net, nla);
2073 return ERR_PTR(-ENODEV);
2075 if (nla[GTPA_MS_ADDRESS]) {
2076 __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
2078 if (family != AF_INET)
2079 return ERR_PTR(-EINVAL);
2081 return ipv4_pdp_find(gtp, ip);
2082 } else if (nla[GTPA_MS_ADDR6]) {
2083 struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]);
2085 if (family != AF_INET6)
2086 return ERR_PTR(-EINVAL);
2088 if (addr.s6_addr32[2] ||
2090 return ERR_PTR(-EADDRNOTAVAIL);
2092 return ipv6_pdp_find(gtp, &addr);
2093 } else if (nla[GTPA_VERSION]) {
2094 u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
2096 if (gtp_version == GTP_V0 && nla[GTPA_TID]) {
2097 return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]),
2099 } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) {
2100 return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]),
2105 return ERR_PTR(-EINVAL);
2108 static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
2110 struct pdp_ctx *pctx;
2113 pctx = gtp_find_pdp_by_link(net, nla);
2115 pctx = ERR_PTR(-EINVAL);
2118 pctx = ERR_PTR(-ENOENT);
2123 static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
2125 struct pdp_ctx *pctx;
2128 if (!info->attrs[GTPA_VERSION])
2133 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
2135 err = PTR_ERR(pctx);
2139 if (pctx->gtp_version == GTP_V0)
2140 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
2141 pctx->u.v0.tid, pctx);
2142 else if (pctx->gtp_version == GTP_V1)
2143 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
2144 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
2146 gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
2147 pdp_context_delete(pctx);
2154 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
2155 int flags, u32 type, struct pdp_ctx *pctx)
2159 genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags,
2164 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
2165 nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
2166 nla_put_u8(skb, GTPA_FAMILY, pctx->af))
2167 goto nla_put_failure;
2171 if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr))
2172 goto nla_put_failure;
2175 if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6))
2176 goto nla_put_failure;
2180 switch (pctx->sk->sk_family) {
2182 if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr))
2183 goto nla_put_failure;
2186 if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6))
2187 goto nla_put_failure;
2191 switch (pctx->gtp_version) {
2193 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
2194 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
2195 goto nla_put_failure;
2198 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
2199 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
2200 goto nla_put_failure;
2203 genlmsg_end(skb, genlh);
2208 genlmsg_cancel(skb, genlh);
2212 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
2214 struct sk_buff *msg;
2217 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
2221 ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
2227 ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg,
2228 0, GTP_GENL_MCGRP, GFP_ATOMIC);
2232 static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
2234 struct pdp_ctx *pctx = NULL;
2235 struct sk_buff *skb2;
2238 if (!info->attrs[GTPA_VERSION])
2243 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
2245 err = PTR_ERR(pctx);
2249 skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
2255 err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
2256 0, info->nlhdr->nlmsg_type, pctx);
2258 goto err_unlock_free;
2261 return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
2270 static int gtp_genl_dump_pdp(struct sk_buff *skb,
2271 struct netlink_callback *cb)
2273 struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
2274 int i, j, bucket = cb->args[0], skip = cb->args[1];
2275 struct net *net = sock_net(skb->sk);
2276 struct net_device *dev;
2277 struct pdp_ctx *pctx;
2283 for_each_netdev_rcu(net, dev) {
2284 if (dev->rtnl_link_ops != >p_link_ops)
2287 gtp = netdev_priv(dev);
2289 if (last_gtp && last_gtp != gtp)
2294 for (i = bucket; i < gtp->hash_size; i++) {
2296 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i],
2299 gtp_genl_fill_info(skb,
2300 NETLINK_CB(cb->skb).portid,
2303 cb->nlh->nlmsg_type, pctx)) {
2306 cb->args[2] = (unsigned long)gtp;
2321 static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
2323 struct sk_buff *skb_to_send;
2324 __be32 src_ip, dst_ip;
2325 unsigned int version;
2326 struct gtp_dev *gtp;
2333 if (!info->attrs[GTPA_VERSION] ||
2334 !info->attrs[GTPA_LINK] ||
2335 !info->attrs[GTPA_PEER_ADDRESS] ||
2336 !info->attrs[GTPA_MS_ADDRESS])
2339 version = nla_get_u32(info->attrs[GTPA_VERSION]);
2340 dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
2341 src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
2343 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
2347 if (!gtp->sk_created)
2349 if (!(gtp->dev->flags & IFF_UP))
2352 if (version == GTP_V0) {
2353 struct gtp0_header *gtp0_h;
2355 len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
2356 sizeof(struct iphdr) + sizeof(struct udphdr);
2358 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
2363 port = htons(GTP0_PORT);
2365 gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
2366 memset(gtp0_h, 0, sizeof(struct gtp0_header));
2367 gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
2368 } else if (version == GTP_V1) {
2369 struct gtp1_header_long *gtp1u_h;
2371 len = LL_RESERVED_SPACE(gtp->dev) +
2372 sizeof(struct gtp1_header_long) +
2373 sizeof(struct iphdr) + sizeof(struct udphdr);
2375 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
2380 port = htons(GTP1U_PORT);
2382 gtp1u_h = skb_push(skb_to_send,
2383 sizeof(struct gtp1_header_long));
2384 memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
2385 gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
2390 rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
2392 netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
2394 kfree_skb(skb_to_send);
2398 udp_tunnel_xmit_skb(rt, sk, skb_to_send,
2399 fl4.saddr, fl4.daddr,
2401 ip4_dst_hoplimit(&rt->dst),
2404 !net_eq(sock_net(sk),
2410 static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
2411 [GTPA_LINK] = { .type = NLA_U32, },
2412 [GTPA_VERSION] = { .type = NLA_U32, },
2413 [GTPA_TID] = { .type = NLA_U64, },
2414 [GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
2415 [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
2416 [GTPA_FLOW] = { .type = NLA_U16, },
2417 [GTPA_NET_NS_FD] = { .type = NLA_U32, },
2418 [GTPA_I_TEI] = { .type = NLA_U32, },
2419 [GTPA_O_TEI] = { .type = NLA_U32, },
2420 [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), },
2421 [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), },
2422 [GTPA_FAMILY] = { .type = NLA_U8, },
2425 static const struct genl_small_ops gtp_genl_ops[] = {
2427 .cmd = GTP_CMD_NEWPDP,
2428 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2429 .doit = gtp_genl_new_pdp,
2430 .flags = GENL_ADMIN_PERM,
2433 .cmd = GTP_CMD_DELPDP,
2434 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2435 .doit = gtp_genl_del_pdp,
2436 .flags = GENL_ADMIN_PERM,
2439 .cmd = GTP_CMD_GETPDP,
2440 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2441 .doit = gtp_genl_get_pdp,
2442 .dumpit = gtp_genl_dump_pdp,
2443 .flags = GENL_ADMIN_PERM,
2446 .cmd = GTP_CMD_ECHOREQ,
2447 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2448 .doit = gtp_genl_send_echo_req,
2449 .flags = GENL_ADMIN_PERM,
2453 static struct genl_family gtp_genl_family __ro_after_init = {
2457 .maxattr = GTPA_MAX,
2458 .policy = gtp_genl_policy,
2460 .module = THIS_MODULE,
2461 .small_ops = gtp_genl_ops,
2462 .n_small_ops = ARRAY_SIZE(gtp_genl_ops),
2463 .resv_start_op = GTP_CMD_ECHOREQ + 1,
2464 .mcgrps = gtp_genl_mcgrps,
2465 .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
2468 static int __net_init gtp_net_init(struct net *net)
2470 struct gtp_net *gn = net_generic(net, gtp_net_id);
2472 INIT_LIST_HEAD(&gn->gtp_dev_list);
2476 static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
2477 struct list_head *dev_to_kill)
2481 list_for_each_entry(net, net_list, exit_list) {
2482 struct gtp_net *gn = net_generic(net, gtp_net_id);
2483 struct gtp_dev *gtp, *gtp_next;
2484 struct net_device *dev;
2486 for_each_netdev(net, dev)
2487 if (dev->rtnl_link_ops == >p_link_ops)
2488 gtp_dellink(dev, dev_to_kill);
2490 list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
2491 gtp_dellink(gtp->dev, dev_to_kill);
2495 static struct pernet_operations gtp_net_ops = {
2496 .init = gtp_net_init,
2497 .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
2499 .size = sizeof(struct gtp_net),
2502 static int __init gtp_init(void)
2506 get_random_bytes(>p_h_initval, sizeof(gtp_h_initval));
2508 err = register_pernet_subsys(>p_net_ops);
2512 err = rtnl_link_register(>p_link_ops);
2514 goto unreg_pernet_subsys;
2516 err = genl_register_family(>p_genl_family);
2518 goto unreg_rtnl_link;
2520 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
2521 sizeof(struct pdp_ctx));
2525 rtnl_link_unregister(>p_link_ops);
2526 unreg_pernet_subsys:
2527 unregister_pernet_subsys(>p_net_ops);
2529 pr_err("error loading GTP module loaded\n");
2532 late_initcall(gtp_init);
2534 static void __exit gtp_fini(void)
2536 genl_unregister_family(>p_genl_family);
2537 rtnl_link_unregister(>p_link_ops);
2538 unregister_pernet_subsys(>p_net_ops);
2540 pr_info("GTP module unloaded\n");
2542 module_exit(gtp_fini);
2544 MODULE_LICENSE("GPL");
2546 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
2547 MODULE_ALIAS_RTNL_LINK("gtp");
2548 MODULE_ALIAS_GENL_FAMILY("gtp");