1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GTP according to GSM TS 09.60 / 3GPP TS 29.060
4 * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/skbuff.h>
16 #include <linux/udp.h>
17 #include <linux/rculist.h>
18 #include <linux/jhash.h>
19 #include <linux/if_tunnel.h>
20 #include <linux/net.h>
21 #include <linux/file.h>
22 #include <linux/gtp.h>
24 #include <net/net_namespace.h>
25 #include <net/protocol.h>
29 #include <net/udp_tunnel.h>
32 #include <net/genetlink.h>
33 #include <net/netns/generic.h>
36 /* An active session for the subscriber. */
38 struct hlist_node hlist_tid;
39 struct hlist_node hlist_addr;
56 struct in6_addr addr6;
60 struct in6_addr addr6;
64 struct net_device *dev;
67 struct rcu_head rcu_head;
70 /* One instance of the GTP device. */
72 struct list_head list;
78 struct net_device *dev;
82 unsigned int hash_size;
83 struct hlist_head *tid_hash;
84 struct hlist_head *addr_hash;
101 static unsigned int gtp_net_id __read_mostly;
104 struct list_head gtp_dev_list;
107 static u32 gtp_h_initval;
109 static struct genl_family gtp_genl_family;
111 enum gtp_multicast_groups {
115 static const struct genl_multicast_group gtp_genl_mcgrps[] = {
116 [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
119 static void pdp_context_delete(struct pdp_ctx *pctx);
121 static inline u32 gtp0_hashfn(u64 tid)
123 u32 *tid32 = (u32 *) &tid;
124 return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
127 static inline u32 gtp1u_hashfn(u32 tid)
129 return jhash_1word(tid, gtp_h_initval);
132 static inline u32 ipv4_hashfn(__be32 ip)
134 return jhash_1word((__force u32)ip, gtp_h_initval);
137 static u32 ipv6_hashfn(const struct in6_addr *ip6)
139 return jhash_2words((__force u32)ip6->s6_addr32[0],
140 (__force u32)ip6->s6_addr32[1], gtp_h_initval);
143 /* Resolve a PDP context structure based on the 64bit TID. */
144 static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family)
146 struct hlist_head *head;
149 head = >p->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
151 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
152 if (pdp->af == family &&
153 pdp->gtp_version == GTP_V0 &&
154 pdp->u.v0.tid == tid)
160 /* Resolve a PDP context structure based on the 32bit TEI. */
161 static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family)
163 struct hlist_head *head;
166 head = >p->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
168 hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
169 if (pdp->af == family &&
170 pdp->gtp_version == GTP_V1 &&
171 pdp->u.v1.i_tei == tid)
177 /* Resolve a PDP context based on IPv4 address of MS. */
178 static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
180 struct hlist_head *head;
183 head = >p->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
185 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
186 if (pdp->af == AF_INET &&
187 pdp->ms.addr.s_addr == ms_addr)
194 /* 3GPP TS 29.060: PDN Connection: the association between a MS represented by
195 * [...] one IPv6 *prefix* and a PDN represented by an APN.
197 * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be
198 * according to the maximum prefix length for a global IPv6 address as
199 * specified in the IPv6 Addressing Architecture, see RFC 4291.
201 * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other
202 * than those that start with binary 000 have a 64-bit interface ID field
203 * (i.e., n + m = 64).
205 static bool ipv6_pdp_addr_equal(const struct in6_addr *a,
206 const struct in6_addr *b)
208 return a->s6_addr32[0] == b->s6_addr32[0] &&
209 a->s6_addr32[1] == b->s6_addr32[1];
212 static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp,
213 const struct in6_addr *ms_addr)
215 struct hlist_head *head;
218 head = >p->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size];
220 hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
221 if (pdp->af == AF_INET6 &&
222 ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr))
229 static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
230 unsigned int hdrlen, unsigned int role)
234 if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
237 iph = (struct iphdr *)(skb->data + hdrlen);
239 if (role == GTP_ROLE_SGSN)
240 return iph->daddr == pctx->ms.addr.s_addr;
242 return iph->saddr == pctx->ms.addr.s_addr;
245 static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx,
246 unsigned int hdrlen, unsigned int role)
248 struct ipv6hdr *ip6h;
251 if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr)))
254 ip6h = (struct ipv6hdr *)(skb->data + hdrlen);
256 if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) ||
257 (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL))
260 if (role == GTP_ROLE_SGSN) {
261 ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6);
263 ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6);
269 /* Check if the inner IP address in this packet is assigned to any
270 * existing mobile subscriber.
272 static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
273 unsigned int hdrlen, unsigned int role,
276 switch (inner_proto) {
278 return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
280 return gtp_check_ms_ipv6(skb, pctx, hdrlen, role);
285 static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen,
288 __u8 *ip_version, _ip_version;
290 ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version),
295 switch (*ip_version & 0xf0) {
297 *inner_proto = ETH_P_IP;
300 *inner_proto = ETH_P_IPV6;
309 static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
310 unsigned int hdrlen, unsigned int role, __u16 inner_proto)
312 if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) {
313 netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
317 /* Get rid of the GTP + UDP headers. */
318 if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto),
319 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
320 pctx->dev->stats.rx_length_errors++;
324 netdev_dbg(pctx->dev, "forwarding packet from GGSN to uplink\n");
326 /* Now that the UDP and the GTP header have been removed, set up the
327 * new network header. This is required by the upper layer to
328 * calculate the transport header.
330 skb_reset_network_header(skb);
331 skb_reset_mac_header(skb);
333 skb->dev = pctx->dev;
335 dev_sw_netstats_rx_add(pctx->dev, skb->len);
341 pctx->dev->stats.rx_dropped++;
345 static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
346 const struct sock *sk,
347 __be32 daddr, __be32 saddr)
349 memset(fl4, 0, sizeof(*fl4));
350 fl4->flowi4_oif = sk->sk_bound_dev_if;
353 fl4->flowi4_tos = ip_sock_rt_tos(sk);
354 fl4->flowi4_scope = ip_sock_rt_scope(sk);
355 fl4->flowi4_proto = sk->sk_protocol;
357 return ip_route_output_key(sock_net(sk), fl4);
360 static struct rt6_info *ip6_route_output_gtp(struct net *net,
362 const struct sock *sk,
363 const struct in6_addr *daddr,
364 struct in6_addr *saddr)
366 struct dst_entry *dst;
368 memset(fl6, 0, sizeof(*fl6));
369 fl6->flowi6_oif = sk->sk_bound_dev_if;
372 fl6->flowi6_proto = sk->sk_protocol;
374 dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL);
376 return ERR_PTR(-ENETUNREACH);
378 return (struct rt6_info *)dst;
382 * In all Path Management messages:
383 * - TID: is not used and shall be set to 0.
384 * - Flow Label is not used and shall be set to 0
385 * In signalling messages:
386 * - number: this field is not yet used in signalling messages.
387 * It shall be set to 255 by the sender and shall be ignored
389 * Returns true if the echo req was correct, false otherwise.
391 static bool gtp0_validate_echo_hdr(struct gtp0_header *gtp0)
393 return !(gtp0->tid || (gtp0->flags ^ 0x1e) ||
394 gtp0->number != 0xff || gtp0->flow);
397 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
398 static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
400 int len_pkt, len_hdr;
402 hdr->flags = 0x1e; /* v0, GTP-non-prime. */
403 hdr->type = msg_type;
404 /* GSM TS 09.60. 7.3 In all Path Management Flow Label and TID
405 * are not used and shall be set to 0.
410 hdr->spare[0] = 0xff;
411 hdr->spare[1] = 0xff;
412 hdr->spare[2] = 0xff;
414 len_pkt = sizeof(struct gtp0_packet);
415 len_hdr = sizeof(struct gtp0_header);
417 if (msg_type == GTP_ECHO_RSP)
418 hdr->length = htons(len_pkt - len_hdr);
423 static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
425 struct iphdr *iph = ip_hdr(skb);
429 /* find route to the sender,
430 * src address becomes dst address and vice versa.
432 rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
434 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
439 udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
440 fl4.saddr, fl4.daddr,
442 ip4_dst_hoplimit(&rt->dst),
444 htons(GTP0_PORT), htons(GTP0_PORT),
445 !net_eq(sock_net(gtp->sk1u),
452 static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
454 struct gtp0_packet *gtp_pkt;
455 struct gtp0_header *gtp0;
458 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
460 if (!gtp0_validate_echo_hdr(gtp0))
465 /* pull GTP and UDP headers */
466 skb_pull_data(skb, sizeof(struct gtp0_header) + sizeof(struct udphdr));
468 gtp_pkt = skb_push(skb, sizeof(struct gtp0_packet));
469 memset(gtp_pkt, 0, sizeof(struct gtp0_packet));
471 gtp0_build_echo_msg(>p_pkt->gtp0_h, GTP_ECHO_RSP);
473 /* GSM TS 09.60. 7.3 The Sequence Number in a signalling response
474 * message shall be copied from the signalling request message
475 * that the GSN is replying to.
477 gtp_pkt->gtp0_h.seq = seq;
479 gtp_pkt->ie.tag = GTPIE_RECOVERY;
480 gtp_pkt->ie.val = gtp->restart_count;
482 switch (gtp->sk0->sk_family) {
484 if (gtp0_send_echo_resp_ip(gtp, skb) < 0)
494 static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
495 int flags, u32 type, struct echo_info echo)
499 genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags,
504 if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
505 nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) ||
506 nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr))
509 genlmsg_end(skb, genlh);
513 genlmsg_cancel(skb, genlh);
517 static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo)
519 struct iphdr *iph = ip_hdr(skb);
521 echo->ms.addr.s_addr = iph->daddr;
522 echo->peer.addr.s_addr = iph->saddr;
523 echo->gtp_version = GTP_V0;
526 static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
528 struct gtp0_header *gtp0;
529 struct echo_info echo;
533 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
535 if (!gtp0_validate_echo_hdr(gtp0))
538 switch (gtp->sk0->sk_family) {
540 gtp0_handle_echo_resp_ip(skb, &echo);
546 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
550 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
556 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev),
557 msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
560 static int gtp_proto_to_family(__u16 proto)
575 /* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
576 static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
578 unsigned int hdrlen = sizeof(struct udphdr) +
579 sizeof(struct gtp0_header);
580 struct gtp0_header *gtp0;
581 struct pdp_ctx *pctx;
584 if (!pskb_may_pull(skb, hdrlen))
587 gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
589 if ((gtp0->flags >> 5) != GTP_V0)
592 /* If the sockets were created in kernel, it means that
593 * there is no daemon running in userspace which would
594 * handle echo request.
596 if (gtp0->type == GTP_ECHO_REQ && gtp->sk_created)
597 return gtp0_send_echo_resp(gtp, skb);
599 if (gtp0->type == GTP_ECHO_RSP && gtp->sk_created)
600 return gtp0_handle_echo_resp(gtp, skb);
602 if (gtp0->type != GTP_TPDU)
605 if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
606 netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
610 pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid),
611 gtp_proto_to_family(inner_proto));
613 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
617 return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
620 /* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
621 static void gtp1u_build_echo_msg(struct gtp1_header_long *hdr, __u8 msg_type)
623 int len_pkt, len_hdr;
625 /* S flag must be set to 1 */
626 hdr->flags = 0x32; /* v1, GTP-non-prime. */
627 hdr->type = msg_type;
628 /* 3GPP TS 29.281 5.1 - TEID has to be set to 0 */
631 /* seq, npdu and next should be counted to the length of the GTP packet
632 * that's why szie of gtp1_header should be subtracted,
633 * not size of gtp1_header_long.
636 len_hdr = sizeof(struct gtp1_header);
638 if (msg_type == GTP_ECHO_RSP) {
639 len_pkt = sizeof(struct gtp1u_packet);
640 hdr->length = htons(len_pkt - len_hdr);
642 /* GTP_ECHO_REQ does not carry GTP Information Element,
643 * the why gtp1_header_long is used here.
645 len_pkt = sizeof(struct gtp1_header_long);
646 hdr->length = htons(len_pkt - len_hdr);
650 static int gtp1u_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
652 struct gtp1_header_long *gtp1u;
653 struct gtp1u_packet *gtp_pkt;
658 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
660 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
661 * Error Indication and Supported Extension Headers Notification
662 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
664 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
667 /* pull GTP and UDP headers */
669 sizeof(struct gtp1_header_long) + sizeof(struct udphdr));
671 gtp_pkt = skb_push(skb, sizeof(struct gtp1u_packet));
672 memset(gtp_pkt, 0, sizeof(struct gtp1u_packet));
674 gtp1u_build_echo_msg(>p_pkt->gtp1u_h, GTP_ECHO_RSP);
676 /* 3GPP TS 29.281 7.7.2 - The Restart Counter value in the
677 * Recovery information element shall not be used, i.e. it shall
678 * be set to zero by the sender and shall be ignored by the receiver.
679 * The Recovery information element is mandatory due to backwards
680 * compatibility reasons.
682 gtp_pkt->ie.tag = GTPIE_RECOVERY;
687 /* find route to the sender,
688 * src address becomes dst address and vice versa.
690 rt = ip4_route_output_gtp(&fl4, gtp->sk1u, iph->saddr, iph->daddr);
692 netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
697 udp_tunnel_xmit_skb(rt, gtp->sk1u, skb,
698 fl4.saddr, fl4.daddr,
700 ip4_dst_hoplimit(&rt->dst),
702 htons(GTP1U_PORT), htons(GTP1U_PORT),
703 !net_eq(sock_net(gtp->sk1u),
709 static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
711 struct gtp1_header_long *gtp1u;
712 struct echo_info echo;
717 gtp1u = (struct gtp1_header_long *)(skb->data + sizeof(struct udphdr));
719 /* 3GPP TS 29.281 5.1 - For the Echo Request, Echo Response,
720 * Error Indication and Supported Extension Headers Notification
721 * messages, the S flag shall be set to 1 and TEID shall be set to 0.
723 if (!(gtp1u->flags & GTP1_F_SEQ) || gtp1u->tid)
727 echo.ms.addr.s_addr = iph->daddr;
728 echo.peer.addr.s_addr = iph->saddr;
729 echo.gtp_version = GTP_V1;
731 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
735 ret = gtp_genl_fill_echo(msg, 0, 0, 0, GTP_CMD_ECHOREQ, echo);
741 return genlmsg_multicast_netns(>p_genl_family, dev_net(gtp->dev),
742 msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
745 static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen)
747 struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr;
748 unsigned int offset = *hdrlen;
749 __u8 *next_type, _next_type;
751 /* From 29.060: "The Extension Header Length field specifies the length
752 * of the particular Extension header in 4 octets units."
754 * This length field includes length field size itself (1 byte),
755 * payload (variable length) and next type (1 byte). The extension
756 * header is aligned to to 4 bytes.
760 gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr),
762 if (!gtp_exthdr || !gtp_exthdr->len)
765 offset += gtp_exthdr->len * 4;
767 /* From 29.060: "If no such Header follows, then the value of
768 * the Next Extension Header Type shall be 0."
770 next_type = skb_header_pointer(skb, offset - 1,
771 sizeof(_next_type), &_next_type);
775 } while (*next_type != 0);
782 static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
784 unsigned int hdrlen = sizeof(struct udphdr) +
785 sizeof(struct gtp1_header);
786 struct gtp1_header *gtp1;
787 struct pdp_ctx *pctx;
790 if (!pskb_may_pull(skb, hdrlen))
793 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
795 if ((gtp1->flags >> 5) != GTP_V1)
798 /* If the sockets were created in kernel, it means that
799 * there is no daemon running in userspace which would
800 * handle echo request.
802 if (gtp1->type == GTP_ECHO_REQ && gtp->sk_created)
803 return gtp1u_send_echo_resp(gtp, skb);
805 if (gtp1->type == GTP_ECHO_RSP && gtp->sk_created)
806 return gtp1u_handle_echo_resp(gtp, skb);
808 if (gtp1->type != GTP_TPDU)
811 /* From 29.060: "This field shall be present if and only if any one or
812 * more of the S, PN and E flags are set.".
814 * If any of the bit is set, then the remaining ones also have to be
817 if (gtp1->flags & GTP1_F_MASK)
820 /* Make sure the header is larger enough, including extensions. */
821 if (!pskb_may_pull(skb, hdrlen))
824 if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
825 netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
829 gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
831 pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid),
832 gtp_proto_to_family(inner_proto));
834 netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
838 if (gtp1->flags & GTP1_F_EXTHDR &&
839 gtp_parse_exthdrs(skb, &hdrlen) < 0)
842 return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
845 static void __gtp_encap_destroy(struct sock *sk)
850 gtp = sk->sk_user_data;
856 WRITE_ONCE(udp_sk(sk)->encap_type, 0);
857 rcu_assign_sk_user_data(sk, NULL);
865 static void gtp_encap_destroy(struct sock *sk)
868 __gtp_encap_destroy(sk);
872 static void gtp_encap_disable_sock(struct sock *sk)
877 __gtp_encap_destroy(sk);
880 static void gtp_encap_disable(struct gtp_dev *gtp)
882 if (gtp->sk_created) {
883 udp_tunnel_sock_release(gtp->sk0->sk_socket);
884 udp_tunnel_sock_release(gtp->sk1u->sk_socket);
885 gtp->sk_created = false;
889 gtp_encap_disable_sock(gtp->sk0);
890 gtp_encap_disable_sock(gtp->sk1u);
894 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
895 * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
897 static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
902 gtp = rcu_dereference_sk_user_data(sk);
906 netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
908 switch (READ_ONCE(udp_sk(sk)->encap_type)) {
910 netdev_dbg(gtp->dev, "received GTP0 packet\n");
911 ret = gtp0_udp_encap_recv(gtp, skb);
913 case UDP_ENCAP_GTP1U:
914 netdev_dbg(gtp->dev, "received GTP1U packet\n");
915 ret = gtp1u_udp_encap_recv(gtp, skb);
918 ret = -1; /* Shouldn't happen. */
923 netdev_dbg(gtp->dev, "pass up to the process\n");
928 netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
937 static void gtp_dev_uninit(struct net_device *dev)
939 struct gtp_dev *gtp = netdev_priv(dev);
941 gtp_encap_disable(gtp);
944 static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
946 int payload_len = skb->len;
947 struct gtp0_header *gtp0;
949 gtp0 = skb_push(skb, sizeof(*gtp0));
951 gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
952 gtp0->type = GTP_TPDU;
953 gtp0->length = htons(payload_len);
954 gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
955 gtp0->flow = htons(pctx->u.v0.flow);
957 gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
958 gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
961 static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
963 int payload_len = skb->len;
964 struct gtp1_header *gtp1;
966 gtp1 = skb_push(skb, sizeof(*gtp1));
968 /* Bits 8 7 6 5 4 3 2 1
969 * +--+--+--+--+--+--+--+--+
970 * |version |PT| 0| E| S|PN|
971 * +--+--+--+--+--+--+--+--+
974 gtp1->flags = 0x30; /* v1, GTP-non-prime. */
975 gtp1->type = GTP_TPDU;
976 gtp1->length = htons(payload_len);
977 gtp1->tid = htonl(pctx->u.v1.o_tei);
979 /* TODO: Support for extension header, sequence number and N-PDU.
980 * Update the length field if any of them is available.
992 struct rt6_info *rt6;
994 struct pdp_ctx *pctx;
995 struct net_device *dev;
1000 static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
1002 switch (pktinfo->pctx->gtp_version) {
1004 pktinfo->gtph_port = htons(GTP0_PORT);
1005 gtp0_push_header(skb, pktinfo->pctx);
1008 pktinfo->gtph_port = htons(GTP1U_PORT);
1009 gtp1_push_header(skb, pktinfo->pctx);
1014 static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
1015 struct sock *sk, __u8 tos,
1016 struct pdp_ctx *pctx, struct rtable *rt,
1018 struct net_device *dev)
1022 pktinfo->pctx = pctx;
1024 pktinfo->fl4 = *fl4;
1028 static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo,
1029 struct sock *sk, __u8 tos,
1030 struct pdp_ctx *pctx, struct rt6_info *rt6,
1032 struct net_device *dev)
1036 pktinfo->pctx = pctx;
1038 pktinfo->fl6 = *fl6;
1042 static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev,
1043 struct gtp_pktinfo *pktinfo,
1044 struct pdp_ctx *pctx, __u8 tos,
1052 rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr,
1053 inet_sk(pctx->sk)->inet_saddr);
1055 netdev_dbg(dev, "no route to SSGN %pI4\n",
1056 &pctx->peer.addr.s_addr);
1057 dev->stats.tx_carrier_errors++;
1061 if (rt->dst.dev == dev) {
1062 netdev_dbg(dev, "circular route to SSGN %pI4\n",
1063 &pctx->peer.addr.s_addr);
1064 dev->stats.collisions++;
1068 /* This is similar to tnl_update_pmtu(). */
1071 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
1072 sizeof(struct iphdr) - sizeof(struct udphdr);
1073 switch (pctx->gtp_version) {
1075 mtu -= sizeof(struct gtp0_header);
1078 mtu -= sizeof(struct gtp1_header);
1082 mtu = dst_mtu(&rt->dst);
1085 skb_dst_update_pmtu_no_confirm(skb, mtu);
1087 if (frag_off & htons(IP_DF) &&
1088 ((!skb_is_gso(skb) && skb->len > mtu) ||
1089 (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
1090 netdev_dbg(dev, "packet too big, fragmentation needed\n");
1091 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1096 gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev);
1097 gtp_push_header(skb, pktinfo);
1106 static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb,
1107 struct net_device *dev,
1108 struct gtp_pktinfo *pktinfo,
1109 struct pdp_ctx *pctx, __u8 tos)
1111 struct dst_entry *dst;
1112 struct rt6_info *rt;
1116 rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6,
1117 &inet6_sk(pctx->sk)->saddr);
1119 netdev_dbg(dev, "no route to SSGN %pI6\n",
1121 dev->stats.tx_carrier_errors++;
1126 if (rt->dst.dev == dev) {
1127 netdev_dbg(dev, "circular route to SSGN %pI6\n",
1129 dev->stats.collisions++;
1133 mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
1134 sizeof(struct ipv6hdr) - sizeof(struct udphdr);
1135 switch (pctx->gtp_version) {
1137 mtu -= sizeof(struct gtp0_header);
1140 mtu -= sizeof(struct gtp1_header);
1144 skb_dst_update_pmtu_no_confirm(skb, mtu);
1146 if ((!skb_is_gso(skb) && skb->len > mtu) ||
1147 (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
1148 netdev_dbg(dev, "packet too big, fragmentation needed\n");
1149 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1153 gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev);
1154 gtp_push_header(skb, pktinfo);
1163 static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
1164 struct gtp_pktinfo *pktinfo)
1166 struct gtp_dev *gtp = netdev_priv(dev);
1167 struct net *net = gtp->net;
1168 struct pdp_ctx *pctx;
1172 /* Read the IP destination address and resolve the PDP context.
1173 * Prepend PDP header with TEI/TID from PDP ctx.
1176 if (gtp->role == GTP_ROLE_SGSN)
1177 pctx = ipv4_pdp_find(gtp, iph->saddr);
1179 pctx = ipv4_pdp_find(gtp, iph->daddr);
1182 netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
1186 netdev_dbg(dev, "found PDP context %p\n", pctx);
1188 switch (pctx->sk->sk_family) {
1190 ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx,
1191 iph->tos, iph->frag_off);
1194 ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx,
1206 netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
1207 &iph->saddr, &iph->daddr);
1212 static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev,
1213 struct gtp_pktinfo *pktinfo)
1215 struct gtp_dev *gtp = netdev_priv(dev);
1216 struct net *net = gtp->net;
1217 struct pdp_ctx *pctx;
1218 struct ipv6hdr *ip6h;
1222 /* Read the IP destination address and resolve the PDP context.
1223 * Prepend PDP header with TEI/TID from PDP ctx.
1225 ip6h = ipv6_hdr(skb);
1226 if (gtp->role == GTP_ROLE_SGSN)
1227 pctx = ipv6_pdp_find(gtp, &ip6h->saddr);
1229 pctx = ipv6_pdp_find(gtp, &ip6h->daddr);
1232 netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n",
1236 netdev_dbg(dev, "found PDP context %p\n", pctx);
1238 tos = ipv6_get_dsfield(ip6h);
1240 switch (pctx->sk->sk_family) {
1242 ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0);
1245 ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos);
1256 netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n",
1257 &ip6h->saddr, &ip6h->daddr);
1262 static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
1264 unsigned int proto = ntohs(skb->protocol);
1265 struct gtp_pktinfo pktinfo;
1268 /* Ensure there is sufficient headroom. */
1269 if (skb_cow_head(skb, dev->needed_headroom))
1272 if (!pskb_inet_may_pull(skb))
1275 skb_reset_inner_headers(skb);
1277 /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
1281 err = gtp_build_skb_ip4(skb, dev, &pktinfo);
1284 err = gtp_build_skb_ip6(skb, dev, &pktinfo);
1295 switch (pktinfo.pctx->sk->sk_family) {
1297 udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
1298 pktinfo.fl4.saddr, pktinfo.fl4.daddr,
1300 ip4_dst_hoplimit(&pktinfo.rt->dst),
1302 pktinfo.gtph_port, pktinfo.gtph_port,
1303 !net_eq(sock_net(pktinfo.pctx->sk),
1308 #if IS_ENABLED(CONFIG_IPV6)
1309 udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev,
1310 &pktinfo.fl6.saddr, &pktinfo.fl6.daddr,
1312 ip6_dst_hoplimit(&pktinfo.rt->dst),
1314 pktinfo.gtph_port, pktinfo.gtph_port,
1322 return NETDEV_TX_OK;
1324 dev->stats.tx_errors++;
1326 return NETDEV_TX_OK;
1329 static const struct net_device_ops gtp_netdev_ops = {
1330 .ndo_uninit = gtp_dev_uninit,
1331 .ndo_start_xmit = gtp_dev_xmit,
1334 static const struct device_type gtp_type = {
1338 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1339 #define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN)
1341 static void gtp_link_setup(struct net_device *dev)
1343 struct gtp_dev *gtp = netdev_priv(dev);
1345 dev->netdev_ops = >p_netdev_ops;
1346 dev->needs_free_netdev = true;
1347 SET_NETDEV_DEVTYPE(dev, >p_type);
1349 dev->hard_header_len = 0;
1351 dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN;
1353 /* Zero header length. */
1354 dev->type = ARPHRD_NONE;
1355 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1357 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1358 dev->priv_flags |= IFF_NO_QUEUE;
1359 dev->features |= NETIF_F_LLTX;
1360 netif_keep_dst(dev);
1362 dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
1366 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
1367 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
1369 static void gtp_destructor(struct net_device *dev)
1371 struct gtp_dev *gtp = netdev_priv(dev);
1373 kfree(gtp->addr_hash);
1374 kfree(gtp->tid_hash);
1377 static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf,
1378 const struct nlattr *nla, int family)
1380 udp_conf->family = family;
1382 switch (udp_conf->family) {
1384 udp_conf->local_ip.s_addr = nla_get_be32(nla);
1386 #if IS_ENABLED(CONFIG_IPV6)
1388 udp_conf->local_ip6 = nla_get_in6_addr(nla);
1398 static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp,
1399 const struct nlattr *nla, int family)
1401 struct udp_tunnel_sock_cfg tuncfg = {};
1402 struct udp_port_cfg udp_conf = {};
1403 struct net *net = gtp->net;
1404 struct socket *sock;
1408 err = gtp_sock_udp_config(&udp_conf, nla, family);
1410 return ERR_PTR(err);
1412 udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
1413 udp_conf.family = AF_INET;
1416 if (type == UDP_ENCAP_GTP0)
1417 udp_conf.local_udp_port = htons(GTP0_PORT);
1418 else if (type == UDP_ENCAP_GTP1U)
1419 udp_conf.local_udp_port = htons(GTP1U_PORT);
1421 return ERR_PTR(-EINVAL);
1423 err = udp_sock_create(net, &udp_conf, &sock);
1425 return ERR_PTR(err);
1427 tuncfg.sk_user_data = gtp;
1428 tuncfg.encap_type = type;
1429 tuncfg.encap_rcv = gtp_encap_recv;
1430 tuncfg.encap_destroy = NULL;
1432 setup_udp_tunnel_sock(net, sock, &tuncfg);
1437 static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
1443 sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family);
1445 return PTR_ERR(sk0);
1447 sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family);
1449 udp_tunnel_sock_release(sk0->sk_socket);
1450 return PTR_ERR(sk1u);
1453 gtp->sk_created = true;
1460 #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
1461 #define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
1463 static int gtp_newlink(struct net *src_net, struct net_device *dev,
1464 struct nlattr *tb[], struct nlattr *data[],
1465 struct netlink_ext_ack *extack)
1467 unsigned int role = GTP_ROLE_GGSN;
1468 struct gtp_dev *gtp;
1472 #if !IS_ENABLED(CONFIG_IPV6)
1473 if (data[IFLA_GTP_LOCAL6])
1474 return -EAFNOSUPPORT;
1477 gtp = netdev_priv(dev);
1479 if (!data[IFLA_GTP_PDP_HASHSIZE]) {
1482 hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
1487 if (data[IFLA_GTP_ROLE]) {
1488 role = nla_get_u32(data[IFLA_GTP_ROLE]);
1489 if (role > GTP_ROLE_SGSN)
1494 if (!data[IFLA_GTP_RESTART_COUNT])
1495 gtp->restart_count = 0;
1497 gtp->restart_count = nla_get_u8(data[IFLA_GTP_RESTART_COUNT]);
1501 err = gtp_hashtable_new(gtp, hashsize);
1505 if (data[IFLA_GTP_CREATE_SOCKETS]) {
1506 if (data[IFLA_GTP_LOCAL6])
1507 err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6);
1509 err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET);
1511 err = gtp_encap_enable(gtp, data);
1517 if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) ||
1518 (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) {
1519 dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN;
1520 dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN;
1523 err = register_netdevice(dev);
1525 netdev_dbg(dev, "failed to register new netdev %d\n", err);
1529 gn = net_generic(dev_net(dev), gtp_net_id);
1530 list_add_rcu(>p->list, &gn->gtp_dev_list);
1531 dev->priv_destructor = gtp_destructor;
1533 netdev_dbg(dev, "registered new GTP interface\n");
1538 gtp_encap_disable(gtp);
1540 kfree(gtp->addr_hash);
1541 kfree(gtp->tid_hash);
1545 static void gtp_dellink(struct net_device *dev, struct list_head *head)
1547 struct gtp_dev *gtp = netdev_priv(dev);
1548 struct hlist_node *next;
1549 struct pdp_ctx *pctx;
1552 for (i = 0; i < gtp->hash_size; i++)
1553 hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid)
1554 pdp_context_delete(pctx);
1556 list_del_rcu(>p->list);
1557 unregister_netdevice_queue(dev, head);
1560 static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
1561 [IFLA_GTP_FD0] = { .type = NLA_U32 },
1562 [IFLA_GTP_FD1] = { .type = NLA_U32 },
1563 [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
1564 [IFLA_GTP_ROLE] = { .type = NLA_U32 },
1565 [IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
1566 [IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
1567 [IFLA_GTP_LOCAL] = { .type = NLA_U32 },
1568 [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) },
1571 static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
1572 struct netlink_ext_ack *extack)
1580 static size_t gtp_get_size(const struct net_device *dev)
1582 return nla_total_size(sizeof(__u32)) + /* IFLA_GTP_PDP_HASHSIZE */
1583 nla_total_size(sizeof(__u32)) + /* IFLA_GTP_ROLE */
1584 nla_total_size(sizeof(__u8)); /* IFLA_GTP_RESTART_COUNT */
1587 static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
1589 struct gtp_dev *gtp = netdev_priv(dev);
1591 if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
1592 goto nla_put_failure;
1593 if (nla_put_u32(skb, IFLA_GTP_ROLE, gtp->role))
1594 goto nla_put_failure;
1595 if (nla_put_u8(skb, IFLA_GTP_RESTART_COUNT, gtp->restart_count))
1596 goto nla_put_failure;
1604 static struct rtnl_link_ops gtp_link_ops __read_mostly = {
1606 .maxtype = IFLA_GTP_MAX,
1607 .policy = gtp_policy,
1608 .priv_size = sizeof(struct gtp_dev),
1609 .setup = gtp_link_setup,
1610 .validate = gtp_validate,
1611 .newlink = gtp_newlink,
1612 .dellink = gtp_dellink,
1613 .get_size = gtp_get_size,
1614 .fill_info = gtp_fill_info,
1617 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
1621 gtp->addr_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
1622 GFP_KERNEL | __GFP_NOWARN);
1623 if (gtp->addr_hash == NULL)
1626 gtp->tid_hash = kmalloc_array(hsize, sizeof(struct hlist_head),
1627 GFP_KERNEL | __GFP_NOWARN);
1628 if (gtp->tid_hash == NULL)
1631 gtp->hash_size = hsize;
1633 for (i = 0; i < hsize; i++) {
1634 INIT_HLIST_HEAD(>p->addr_hash[i]);
1635 INIT_HLIST_HEAD(>p->tid_hash[i]);
1639 kfree(gtp->addr_hash);
1643 static struct sock *gtp_encap_enable_socket(int fd, int type,
1644 struct gtp_dev *gtp)
1646 struct udp_tunnel_sock_cfg tuncfg = {NULL};
1647 struct socket *sock;
1651 pr_debug("enable gtp on %d, %d\n", fd, type);
1653 sock = sockfd_lookup(fd, &err);
1655 pr_debug("gtp socket fd=%d not found\n", fd);
1656 return ERR_PTR(err);
1660 if (sk->sk_protocol != IPPROTO_UDP ||
1661 sk->sk_type != SOCK_DGRAM ||
1662 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
1663 pr_debug("socket fd=%d not UDP\n", fd);
1664 sk = ERR_PTR(-EINVAL);
1668 if (sk->sk_family == AF_INET6 &&
1670 sk = ERR_PTR(-EADDRNOTAVAIL);
1675 if (sk->sk_user_data) {
1676 sk = ERR_PTR(-EBUSY);
1682 tuncfg.sk_user_data = gtp;
1683 tuncfg.encap_type = type;
1684 tuncfg.encap_rcv = gtp_encap_recv;
1685 tuncfg.encap_destroy = gtp_encap_destroy;
1687 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
1690 release_sock(sock->sk);
1696 static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
1698 struct sock *sk1u = NULL;
1699 struct sock *sk0 = NULL;
1701 if (!data[IFLA_GTP_FD0] && !data[IFLA_GTP_FD1])
1704 if (data[IFLA_GTP_FD0]) {
1705 u32 fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
1707 sk0 = gtp_encap_enable_socket(fd0, UDP_ENCAP_GTP0, gtp);
1709 return PTR_ERR(sk0);
1712 if (data[IFLA_GTP_FD1]) {
1713 u32 fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
1715 sk1u = gtp_encap_enable_socket(fd1, UDP_ENCAP_GTP1U, gtp);
1717 gtp_encap_disable_sock(sk0);
1718 return PTR_ERR(sk1u);
1726 sk0->sk_family != sk1u->sk_family) {
1727 gtp_encap_disable_sock(sk0);
1728 gtp_encap_disable_sock(sk1u);
1735 static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
1737 struct gtp_dev *gtp = NULL;
1738 struct net_device *dev;
1741 /* Examine the link attributes and figure out which network namespace
1742 * we are talking about.
1744 if (nla[GTPA_NET_NS_FD])
1745 net = get_net_ns_by_fd(nla_get_u32(nla[GTPA_NET_NS_FD]));
1747 net = get_net(src_net);
1752 /* Check if there's an existing gtpX device to configure */
1753 dev = dev_get_by_index_rcu(net, nla_get_u32(nla[GTPA_LINK]));
1754 if (dev && dev->netdev_ops == >p_netdev_ops)
1755 gtp = netdev_priv(dev);
1761 static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1763 pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
1765 switch (pctx->gtp_version) {
1767 /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
1768 * label needs to be the same for uplink and downlink packets,
1769 * so let's annotate this.
1771 pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
1772 pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
1775 pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
1776 pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
1783 static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info)
1785 if (info->attrs[GTPA_PEER_ADDRESS]) {
1786 pctx->peer.addr.s_addr =
1787 nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
1788 } else if (info->attrs[GTPA_PEER_ADDR6]) {
1789 pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]);
1793 static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1795 ip_pdp_peer_fill(pctx, info);
1796 pctx->ms.addr.s_addr =
1797 nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1798 gtp_pdp_fill(pctx, info);
1801 static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
1803 ip_pdp_peer_fill(pctx, info);
1804 pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
1805 if (pctx->ms.addr6.s6_addr32[2] ||
1806 pctx->ms.addr6.s6_addr32[3])
1809 gtp_pdp_fill(pctx, info);
1814 static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
1815 struct genl_info *info)
1817 struct pdp_ctx *pctx, *pctx_tid = NULL;
1818 struct net_device *dev = gtp->dev;
1819 u32 hash_ms, hash_tid = 0;
1820 struct in6_addr ms_addr6;
1821 unsigned int version;
1826 version = nla_get_u32(info->attrs[GTPA_VERSION]);
1828 if (info->attrs[GTPA_FAMILY])
1829 family = nla_get_u8(info->attrs[GTPA_FAMILY]);
1833 #if !IS_ENABLED(CONFIG_IPV6)
1834 if (family == AF_INET6)
1835 return ERR_PTR(-EAFNOSUPPORT);
1837 if (!info->attrs[GTPA_PEER_ADDRESS] &&
1838 !info->attrs[GTPA_PEER_ADDR6])
1839 return ERR_PTR(-EINVAL);
1841 if ((info->attrs[GTPA_PEER_ADDRESS] &&
1842 sk->sk_family == AF_INET6) ||
1843 (info->attrs[GTPA_PEER_ADDR6] &&
1844 sk->sk_family == AF_INET))
1845 return ERR_PTR(-EAFNOSUPPORT);
1849 if (!info->attrs[GTPA_MS_ADDRESS] ||
1850 info->attrs[GTPA_MS_ADDR6])
1851 return ERR_PTR(-EINVAL);
1853 ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
1854 hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
1855 pctx = ipv4_pdp_find(gtp, ms_addr);
1858 if (!info->attrs[GTPA_MS_ADDR6] ||
1859 info->attrs[GTPA_MS_ADDRESS])
1860 return ERR_PTR(-EINVAL);
1862 ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
1863 hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size;
1864 pctx = ipv6_pdp_find(gtp, &ms_addr6);
1867 return ERR_PTR(-EAFNOSUPPORT);
1871 if (version == GTP_V0)
1872 pctx_tid = gtp0_pdp_find(gtp,
1873 nla_get_u64(info->attrs[GTPA_TID]),
1875 else if (version == GTP_V1)
1876 pctx_tid = gtp1_pdp_find(gtp,
1877 nla_get_u32(info->attrs[GTPA_I_TEI]),
1883 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
1884 return ERR_PTR(-EEXIST);
1885 if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
1886 return ERR_PTR(-EOPNOTSUPP);
1888 if (pctx && pctx_tid)
1889 return ERR_PTR(-EEXIST);
1895 ipv4_pdp_fill(pctx, info);
1898 if (!ipv6_pdp_fill(pctx, info))
1899 return ERR_PTR(-EADDRNOTAVAIL);
1903 if (pctx->gtp_version == GTP_V0)
1904 netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
1905 pctx->u.v0.tid, pctx);
1906 else if (pctx->gtp_version == GTP_V1)
1907 netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
1908 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
1914 pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
1916 return ERR_PTR(-ENOMEM);
1920 pctx->dev = gtp->dev;
1925 if (!info->attrs[GTPA_MS_ADDRESS]) {
1928 return ERR_PTR(-EINVAL);
1931 ipv4_pdp_fill(pctx, info);
1934 if (!info->attrs[GTPA_MS_ADDR6]) {
1937 return ERR_PTR(-EINVAL);
1940 if (!ipv6_pdp_fill(pctx, info)) {
1943 return ERR_PTR(-EADDRNOTAVAIL);
1947 atomic_set(&pctx->tx_seq, 0);
1949 switch (pctx->gtp_version) {
1951 /* TS 09.60: "The flow label identifies unambiguously a GTP
1952 * flow.". We use the tid for this instead, I cannot find a
1953 * situation in which this doesn't unambiguosly identify the
1956 hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
1959 hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
1963 hlist_add_head_rcu(&pctx->hlist_addr, >p->addr_hash[hash_ms]);
1964 hlist_add_head_rcu(&pctx->hlist_tid, >p->tid_hash[hash_tid]);
1966 switch (pctx->gtp_version) {
1968 netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1969 pctx->u.v0.tid, &pctx->peer.addr,
1970 &pctx->ms.addr, pctx);
1973 netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
1974 pctx->u.v1.i_tei, pctx->u.v1.o_tei,
1975 &pctx->peer.addr, &pctx->ms.addr, pctx);
1982 static void pdp_context_free(struct rcu_head *head)
1984 struct pdp_ctx *pctx = container_of(head, struct pdp_ctx, rcu_head);
1990 static void pdp_context_delete(struct pdp_ctx *pctx)
1992 hlist_del_rcu(&pctx->hlist_tid);
1993 hlist_del_rcu(&pctx->hlist_addr);
1994 call_rcu(&pctx->rcu_head, pdp_context_free);
1997 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
1999 static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
2001 unsigned int version;
2002 struct pdp_ctx *pctx;
2003 struct gtp_dev *gtp;
2007 if (!info->attrs[GTPA_VERSION] ||
2008 !info->attrs[GTPA_LINK])
2011 version = nla_get_u32(info->attrs[GTPA_VERSION]);
2015 if (!info->attrs[GTPA_TID] ||
2016 !info->attrs[GTPA_FLOW])
2020 if (!info->attrs[GTPA_I_TEI] ||
2021 !info->attrs[GTPA_O_TEI])
2031 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
2037 if (version == GTP_V0)
2039 else if (version == GTP_V1)
2049 pctx = gtp_pdp_add(gtp, sk, info);
2051 err = PTR_ERR(pctx);
2053 gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
2062 static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
2063 struct nlattr *nla[])
2065 struct gtp_dev *gtp;
2068 if (nla[GTPA_FAMILY])
2069 family = nla_get_u8(nla[GTPA_FAMILY]);
2073 gtp = gtp_find_dev(net, nla);
2075 return ERR_PTR(-ENODEV);
2077 if (nla[GTPA_MS_ADDRESS]) {
2078 __be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
2080 if (family != AF_INET)
2081 return ERR_PTR(-EINVAL);
2083 return ipv4_pdp_find(gtp, ip);
2084 } else if (nla[GTPA_MS_ADDR6]) {
2085 struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]);
2087 if (family != AF_INET6)
2088 return ERR_PTR(-EINVAL);
2090 if (addr.s6_addr32[2] ||
2092 return ERR_PTR(-EADDRNOTAVAIL);
2094 return ipv6_pdp_find(gtp, &addr);
2095 } else if (nla[GTPA_VERSION]) {
2096 u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
2098 if (gtp_version == GTP_V0 && nla[GTPA_TID]) {
2099 return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]),
2101 } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) {
2102 return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]),
2107 return ERR_PTR(-EINVAL);
2110 static struct pdp_ctx *gtp_find_pdp(struct net *net, struct nlattr *nla[])
2112 struct pdp_ctx *pctx;
2115 pctx = gtp_find_pdp_by_link(net, nla);
2117 pctx = ERR_PTR(-EINVAL);
2120 pctx = ERR_PTR(-ENOENT);
2125 static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
2127 struct pdp_ctx *pctx;
2130 if (!info->attrs[GTPA_VERSION])
2135 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
2137 err = PTR_ERR(pctx);
2141 if (pctx->gtp_version == GTP_V0)
2142 netdev_dbg(pctx->dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
2143 pctx->u.v0.tid, pctx);
2144 else if (pctx->gtp_version == GTP_V1)
2145 netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
2146 pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
2148 gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
2149 pdp_context_delete(pctx);
2156 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
2157 int flags, u32 type, struct pdp_ctx *pctx)
2161 genlh = genlmsg_put(skb, snd_portid, snd_seq, >p_genl_family, flags,
2166 if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
2167 nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
2168 nla_put_u8(skb, GTPA_FAMILY, pctx->af))
2169 goto nla_put_failure;
2173 if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr))
2174 goto nla_put_failure;
2177 if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6))
2178 goto nla_put_failure;
2182 switch (pctx->sk->sk_family) {
2184 if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr))
2185 goto nla_put_failure;
2188 if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6))
2189 goto nla_put_failure;
2193 switch (pctx->gtp_version) {
2195 if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
2196 nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
2197 goto nla_put_failure;
2200 if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
2201 nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
2202 goto nla_put_failure;
2205 genlmsg_end(skb, genlh);
2210 genlmsg_cancel(skb, genlh);
2214 static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
2216 struct sk_buff *msg;
2219 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
2223 ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
2229 ret = genlmsg_multicast_netns(>p_genl_family, dev_net(pctx->dev), msg,
2230 0, GTP_GENL_MCGRP, GFP_ATOMIC);
2234 static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
2236 struct pdp_ctx *pctx = NULL;
2237 struct sk_buff *skb2;
2240 if (!info->attrs[GTPA_VERSION])
2245 pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
2247 err = PTR_ERR(pctx);
2251 skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
2257 err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
2258 0, info->nlhdr->nlmsg_type, pctx);
2260 goto err_unlock_free;
2263 return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
2272 static int gtp_genl_dump_pdp(struct sk_buff *skb,
2273 struct netlink_callback *cb)
2275 struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
2276 int i, j, bucket = cb->args[0], skip = cb->args[1];
2277 struct net *net = sock_net(skb->sk);
2278 struct pdp_ctx *pctx;
2281 gn = net_generic(net, gtp_net_id);
2287 list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
2288 if (last_gtp && last_gtp != gtp)
2293 for (i = bucket; i < gtp->hash_size; i++) {
2295 hlist_for_each_entry_rcu(pctx, >p->tid_hash[i],
2298 gtp_genl_fill_info(skb,
2299 NETLINK_CB(cb->skb).portid,
2302 cb->nlh->nlmsg_type, pctx)) {
2305 cb->args[2] = (unsigned long)gtp;
2320 static int gtp_genl_send_echo_req(struct sk_buff *skb, struct genl_info *info)
2322 struct sk_buff *skb_to_send;
2323 __be32 src_ip, dst_ip;
2324 unsigned int version;
2325 struct gtp_dev *gtp;
2332 if (!info->attrs[GTPA_VERSION] ||
2333 !info->attrs[GTPA_LINK] ||
2334 !info->attrs[GTPA_PEER_ADDRESS] ||
2335 !info->attrs[GTPA_MS_ADDRESS])
2338 version = nla_get_u32(info->attrs[GTPA_VERSION]);
2339 dst_ip = nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
2340 src_ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
2342 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
2346 if (!gtp->sk_created)
2348 if (!(gtp->dev->flags & IFF_UP))
2351 if (version == GTP_V0) {
2352 struct gtp0_header *gtp0_h;
2354 len = LL_RESERVED_SPACE(gtp->dev) + sizeof(struct gtp0_header) +
2355 sizeof(struct iphdr) + sizeof(struct udphdr);
2357 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
2362 port = htons(GTP0_PORT);
2364 gtp0_h = skb_push(skb_to_send, sizeof(struct gtp0_header));
2365 memset(gtp0_h, 0, sizeof(struct gtp0_header));
2366 gtp0_build_echo_msg(gtp0_h, GTP_ECHO_REQ);
2367 } else if (version == GTP_V1) {
2368 struct gtp1_header_long *gtp1u_h;
2370 len = LL_RESERVED_SPACE(gtp->dev) +
2371 sizeof(struct gtp1_header_long) +
2372 sizeof(struct iphdr) + sizeof(struct udphdr);
2374 skb_to_send = netdev_alloc_skb_ip_align(gtp->dev, len);
2379 port = htons(GTP1U_PORT);
2381 gtp1u_h = skb_push(skb_to_send,
2382 sizeof(struct gtp1_header_long));
2383 memset(gtp1u_h, 0, sizeof(struct gtp1_header_long));
2384 gtp1u_build_echo_msg(gtp1u_h, GTP_ECHO_REQ);
2389 rt = ip4_route_output_gtp(&fl4, sk, dst_ip, src_ip);
2391 netdev_dbg(gtp->dev, "no route for echo request to %pI4\n",
2393 kfree_skb(skb_to_send);
2397 udp_tunnel_xmit_skb(rt, sk, skb_to_send,
2398 fl4.saddr, fl4.daddr,
2400 ip4_dst_hoplimit(&rt->dst),
2403 !net_eq(sock_net(sk),
2409 static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
2410 [GTPA_LINK] = { .type = NLA_U32, },
2411 [GTPA_VERSION] = { .type = NLA_U32, },
2412 [GTPA_TID] = { .type = NLA_U64, },
2413 [GTPA_PEER_ADDRESS] = { .type = NLA_U32, },
2414 [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
2415 [GTPA_FLOW] = { .type = NLA_U16, },
2416 [GTPA_NET_NS_FD] = { .type = NLA_U32, },
2417 [GTPA_I_TEI] = { .type = NLA_U32, },
2418 [GTPA_O_TEI] = { .type = NLA_U32, },
2419 [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), },
2420 [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), },
2421 [GTPA_FAMILY] = { .type = NLA_U8, },
2424 static const struct genl_small_ops gtp_genl_ops[] = {
2426 .cmd = GTP_CMD_NEWPDP,
2427 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2428 .doit = gtp_genl_new_pdp,
2429 .flags = GENL_ADMIN_PERM,
2432 .cmd = GTP_CMD_DELPDP,
2433 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2434 .doit = gtp_genl_del_pdp,
2435 .flags = GENL_ADMIN_PERM,
2438 .cmd = GTP_CMD_GETPDP,
2439 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2440 .doit = gtp_genl_get_pdp,
2441 .dumpit = gtp_genl_dump_pdp,
2442 .flags = GENL_ADMIN_PERM,
2445 .cmd = GTP_CMD_ECHOREQ,
2446 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2447 .doit = gtp_genl_send_echo_req,
2448 .flags = GENL_ADMIN_PERM,
2452 static struct genl_family gtp_genl_family __ro_after_init = {
2456 .maxattr = GTPA_MAX,
2457 .policy = gtp_genl_policy,
2459 .module = THIS_MODULE,
2460 .small_ops = gtp_genl_ops,
2461 .n_small_ops = ARRAY_SIZE(gtp_genl_ops),
2462 .resv_start_op = GTP_CMD_ECHOREQ + 1,
2463 .mcgrps = gtp_genl_mcgrps,
2464 .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
2467 static int __net_init gtp_net_init(struct net *net)
2469 struct gtp_net *gn = net_generic(net, gtp_net_id);
2471 INIT_LIST_HEAD(&gn->gtp_dev_list);
2475 static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
2476 struct list_head *dev_to_kill)
2480 list_for_each_entry(net, net_list, exit_list) {
2481 struct gtp_net *gn = net_generic(net, gtp_net_id);
2482 struct gtp_dev *gtp;
2484 list_for_each_entry(gtp, &gn->gtp_dev_list, list)
2485 gtp_dellink(gtp->dev, dev_to_kill);
2489 static struct pernet_operations gtp_net_ops = {
2490 .init = gtp_net_init,
2491 .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
2493 .size = sizeof(struct gtp_net),
2496 static int __init gtp_init(void)
2500 get_random_bytes(>p_h_initval, sizeof(gtp_h_initval));
2502 err = register_pernet_subsys(>p_net_ops);
2506 err = rtnl_link_register(>p_link_ops);
2508 goto unreg_pernet_subsys;
2510 err = genl_register_family(>p_genl_family);
2512 goto unreg_rtnl_link;
2514 pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
2515 sizeof(struct pdp_ctx));
2519 rtnl_link_unregister(>p_link_ops);
2520 unreg_pernet_subsys:
2521 unregister_pernet_subsys(>p_net_ops);
2523 pr_err("error loading GTP module loaded\n");
2526 late_initcall(gtp_init);
2528 static void __exit gtp_fini(void)
2530 genl_unregister_family(>p_genl_family);
2531 rtnl_link_unregister(>p_link_ops);
2532 unregister_pernet_subsys(>p_net_ops);
2534 pr_info("GTP module unloaded\n");
2536 module_exit(gtp_fini);
2538 MODULE_LICENSE("GPL");
2540 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
2541 MODULE_ALIAS_RTNL_LINK("gtp");
2542 MODULE_ALIAS_GENL_FAMILY("gtp");