3 * Linux INET6 implementation
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 #include <trace/events/tcp.h>
74 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
87 const struct in6_addr *addr)
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95 struct dst_entry *dst = skb_dst(skb);
97 if (dst && dst_hold_safe(dst)) {
98 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
106 static u32 tcp_v6_init_seq(const struct sk_buff *skb)
108 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
109 ipv6_hdr(skb)->saddr.s6_addr32,
111 tcp_hdr(skb)->source);
114 static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
116 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
117 ipv6_hdr(skb)->saddr.s6_addr32);
120 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
124 struct inet_sock *inet = inet_sk(sk);
125 struct inet_connection_sock *icsk = inet_csk(sk);
126 struct ipv6_pinfo *np = inet6_sk(sk);
127 struct tcp_sock *tp = tcp_sk(sk);
128 struct in6_addr *saddr = NULL, *final_p, final;
129 struct ipv6_txoptions *opt;
131 struct dst_entry *dst;
134 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
136 if (addr_len < SIN6_LEN_RFC2133)
139 if (usin->sin6_family != AF_INET6)
140 return -EAFNOSUPPORT;
142 memset(&fl6, 0, sizeof(fl6));
145 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
146 IP6_ECN_flow_init(fl6.flowlabel);
147 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
148 struct ip6_flowlabel *flowlabel;
149 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
152 fl6_sock_release(flowlabel);
157 * connect() to INADDR_ANY means loopback (BSD'ism).
160 if (ipv6_addr_any(&usin->sin6_addr)) {
161 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
162 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
165 usin->sin6_addr = in6addr_loopback;
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
170 if (addr_type & IPV6_ADDR_MULTICAST)
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
198 sk->sk_v6_daddr = usin->sin6_addr;
199 np->flow_label = fl6.flowlabel;
205 if (addr_type & IPV6_ADDR_MAPPED) {
206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 struct sockaddr_in sin;
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk))
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 icsk->icsk_af_ops = &ipv6_mapped;
219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
235 np->saddr = sk->sk_v6_rcv_saddr;
240 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
241 saddr = &sk->sk_v6_rcv_saddr;
243 fl6.flowi6_proto = IPPROTO_TCP;
244 fl6.daddr = sk->sk_v6_daddr;
245 fl6.saddr = saddr ? *saddr : np->saddr;
246 fl6.flowi6_oif = sk->sk_bound_dev_if;
247 fl6.flowi6_mark = sk->sk_mark;
248 fl6.fl6_dport = usin->sin6_port;
249 fl6.fl6_sport = inet->inet_sport;
250 fl6.flowi6_uid = sk->sk_uid;
252 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
253 final_p = fl6_update_dst(&fl6, opt, &final);
255 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
257 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
265 sk->sk_v6_rcv_saddr = *saddr;
268 /* set the source address */
270 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
272 sk->sk_gso_type = SKB_GSO_TCPV6;
273 ip6_dst_store(sk, dst, NULL, NULL);
275 icsk->icsk_ext_hdr_len = 0;
277 icsk->icsk_ext_hdr_len = opt->opt_flen +
280 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
282 inet->inet_dport = usin->sin6_port;
284 tcp_set_state(sk, TCP_SYN_SENT);
285 err = inet6_hash_connect(tcp_death_row, sk);
291 if (likely(!tp->repair)) {
293 tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
294 sk->sk_v6_daddr.s6_addr32,
297 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
299 sk->sk_v6_daddr.s6_addr32);
302 if (tcp_fastopen_defer_connect(sk, &err))
307 err = tcp_connect(sk);
314 tcp_set_state(sk, TCP_CLOSE);
316 inet->inet_dport = 0;
317 sk->sk_route_caps = 0;
321 static void tcp_v6_mtu_reduced(struct sock *sk)
323 struct dst_entry *dst;
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 u8 type, u8 code, int offset, __be32 info)
341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 struct net *net = dev_net(skb->dev);
344 struct request_sock *fastopen;
345 struct ipv6_pinfo *np;
352 sk = __inet6_lookup_established(net, &tcp_hashinfo,
353 &hdr->daddr, th->dest,
354 &hdr->saddr, ntohs(th->source),
355 skb->dev->ifindex, inet6_sdif(skb));
358 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
363 if (sk->sk_state == TCP_TIME_WAIT) {
364 inet_twsk_put(inet_twsk(sk));
367 seq = ntohl(th->seq);
368 fatal = icmpv6_err_convert(type, code, &err);
369 if (sk->sk_state == TCP_NEW_SYN_RECV)
370 return tcp_req_err(sk, seq, fatal);
373 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
374 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
376 if (sk->sk_state == TCP_CLOSE)
379 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
380 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
385 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
386 fastopen = tp->fastopen_rsk;
387 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
388 if (sk->sk_state != TCP_LISTEN &&
389 !between(seq, snd_una, tp->snd_nxt)) {
390 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
396 if (type == NDISC_REDIRECT) {
397 if (!sock_owned_by_user(sk)) {
398 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
401 dst->ops->redirect(dst, sk, skb);
406 if (type == ICMPV6_PKT_TOOBIG) {
407 /* We are not interested in TCP_LISTEN and open_requests
408 * (SYN-ACKs send out by Linux are always <576bytes so
409 * they should go through unfragmented).
411 if (sk->sk_state == TCP_LISTEN)
414 if (!ip6_sk_accept_pmtu(sk))
417 tp->mtu_info = ntohl(info);
418 if (!sock_owned_by_user(sk))
419 tcp_v6_mtu_reduced(sk);
420 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
427 /* Might be for an request_sock */
428 switch (sk->sk_state) {
431 /* Only in fast or simultaneous open. If a fast open socket is
432 * is already accepted it is treated as a connected one below.
434 if (fastopen && !fastopen->sk)
437 if (!sock_owned_by_user(sk)) {
439 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
443 sk->sk_err_soft = err;
447 if (!sock_owned_by_user(sk) && np->recverr) {
449 sk->sk_error_report(sk);
451 sk->sk_err_soft = err;
459 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
461 struct request_sock *req,
462 struct tcp_fastopen_cookie *foc,
463 enum tcp_synack_type synack_type)
465 struct inet_request_sock *ireq = inet_rsk(req);
466 struct ipv6_pinfo *np = inet6_sk(sk);
467 struct ipv6_txoptions *opt;
468 struct flowi6 *fl6 = &fl->u.ip6;
472 /* First, grab a route. */
473 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
474 IPPROTO_TCP)) == NULL)
477 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
480 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
481 &ireq->ir_v6_rmt_addr);
483 fl6->daddr = ireq->ir_v6_rmt_addr;
484 if (np->repflow && ireq->pktopts)
485 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
488 opt = ireq->ipv6_opt;
490 opt = rcu_dereference(np->opt);
491 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
493 err = net_xmit_eval(err);
501 static void tcp_v6_reqsk_destructor(struct request_sock *req)
503 kfree(inet_rsk(req)->ipv6_opt);
504 kfree_skb(inet_rsk(req)->pktopts);
507 #ifdef CONFIG_TCP_MD5SIG
508 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
509 const struct in6_addr *addr)
511 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
514 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
515 const struct sock *addr_sk)
517 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
520 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
521 char __user *optval, int optlen)
523 struct tcp_md5sig cmd;
524 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
527 if (optlen < sizeof(cmd))
530 if (copy_from_user(&cmd, optval, sizeof(cmd)))
533 if (sin6->sin6_family != AF_INET6)
536 if (optname == TCP_MD5SIG_EXT &&
537 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
538 prefixlen = cmd.tcpm_prefixlen;
539 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
543 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
546 if (!cmd.tcpm_keylen) {
547 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
548 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
550 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
551 AF_INET6, prefixlen);
554 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
557 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
558 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
559 AF_INET, prefixlen, cmd.tcpm_key,
560 cmd.tcpm_keylen, GFP_KERNEL);
562 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
563 AF_INET6, prefixlen, cmd.tcpm_key,
564 cmd.tcpm_keylen, GFP_KERNEL);
567 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
568 const struct in6_addr *daddr,
569 const struct in6_addr *saddr,
570 const struct tcphdr *th, int nbytes)
572 struct tcp6_pseudohdr *bp;
573 struct scatterlist sg;
577 /* 1. TCP pseudo-header (RFC2460) */
580 bp->protocol = cpu_to_be32(IPPROTO_TCP);
581 bp->len = cpu_to_be32(nbytes);
583 _th = (struct tcphdr *)(bp + 1);
584 memcpy(_th, th, sizeof(*th));
587 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
588 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
589 sizeof(*bp) + sizeof(*th));
590 return crypto_ahash_update(hp->md5_req);
593 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
594 const struct in6_addr *daddr, struct in6_addr *saddr,
595 const struct tcphdr *th)
597 struct tcp_md5sig_pool *hp;
598 struct ahash_request *req;
600 hp = tcp_get_md5sig_pool();
602 goto clear_hash_noput;
605 if (crypto_ahash_init(req))
607 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
609 if (tcp_md5_hash_key(hp, key))
611 ahash_request_set_crypt(req, NULL, md5_hash, 0);
612 if (crypto_ahash_final(req))
615 tcp_put_md5sig_pool();
619 tcp_put_md5sig_pool();
621 memset(md5_hash, 0, 16);
625 static int tcp_v6_md5_hash_skb(char *md5_hash,
626 const struct tcp_md5sig_key *key,
627 const struct sock *sk,
628 const struct sk_buff *skb)
630 const struct in6_addr *saddr, *daddr;
631 struct tcp_md5sig_pool *hp;
632 struct ahash_request *req;
633 const struct tcphdr *th = tcp_hdr(skb);
635 if (sk) { /* valid for establish/request sockets */
636 saddr = &sk->sk_v6_rcv_saddr;
637 daddr = &sk->sk_v6_daddr;
639 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 saddr = &ip6h->saddr;
641 daddr = &ip6h->daddr;
644 hp = tcp_get_md5sig_pool();
646 goto clear_hash_noput;
649 if (crypto_ahash_init(req))
652 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
654 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
656 if (tcp_md5_hash_key(hp, key))
658 ahash_request_set_crypt(req, NULL, md5_hash, 0);
659 if (crypto_ahash_final(req))
662 tcp_put_md5sig_pool();
666 tcp_put_md5sig_pool();
668 memset(md5_hash, 0, 16);
674 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
675 const struct sk_buff *skb)
677 #ifdef CONFIG_TCP_MD5SIG
678 const __u8 *hash_location = NULL;
679 struct tcp_md5sig_key *hash_expected;
680 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
681 const struct tcphdr *th = tcp_hdr(skb);
685 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
686 hash_location = tcp_parse_md5sig_option(th);
688 /* We've parsed the options - do we have a hash? */
689 if (!hash_expected && !hash_location)
692 if (hash_expected && !hash_location) {
693 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
697 if (!hash_expected && hash_location) {
698 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
702 /* check the signature */
703 genhash = tcp_v6_md5_hash_skb(newhash,
707 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
708 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
709 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
710 genhash ? "failed" : "mismatch",
711 &ip6h->saddr, ntohs(th->source),
712 &ip6h->daddr, ntohs(th->dest));
719 static void tcp_v6_init_req(struct request_sock *req,
720 const struct sock *sk_listener,
723 struct inet_request_sock *ireq = inet_rsk(req);
724 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
726 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
727 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
729 /* So that link locals have meaning */
730 if (!sk_listener->sk_bound_dev_if &&
731 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
732 ireq->ir_iif = tcp_v6_iif(skb);
734 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
735 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
736 np->rxopt.bits.rxinfo ||
737 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
738 np->rxopt.bits.rxohlim || np->repflow)) {
739 refcount_inc(&skb->users);
744 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
746 const struct request_sock *req)
748 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
751 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
753 .obj_size = sizeof(struct tcp6_request_sock),
754 .rtx_syn_ack = tcp_rtx_synack,
755 .send_ack = tcp_v6_reqsk_send_ack,
756 .destructor = tcp_v6_reqsk_destructor,
757 .send_reset = tcp_v6_send_reset,
758 .syn_ack_timeout = tcp_syn_ack_timeout,
761 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
762 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
763 sizeof(struct ipv6hdr),
764 #ifdef CONFIG_TCP_MD5SIG
765 .req_md5_lookup = tcp_v6_md5_lookup,
766 .calc_md5_hash = tcp_v6_md5_hash_skb,
768 .init_req = tcp_v6_init_req,
769 #ifdef CONFIG_SYN_COOKIES
770 .cookie_init_seq = cookie_v6_init_sequence,
772 .route_req = tcp_v6_route_req,
773 .init_seq = tcp_v6_init_seq,
774 .init_ts_off = tcp_v6_init_ts_off,
775 .send_synack = tcp_v6_send_synack,
778 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
779 u32 ack, u32 win, u32 tsval, u32 tsecr,
780 int oif, struct tcp_md5sig_key *key, int rst,
781 u8 tclass, __be32 label)
783 const struct tcphdr *th = tcp_hdr(skb);
785 struct sk_buff *buff;
787 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
788 struct sock *ctl_sk = net->ipv6.tcp_sk;
789 unsigned int tot_len = sizeof(struct tcphdr);
790 struct dst_entry *dst;
794 tot_len += TCPOLEN_TSTAMP_ALIGNED;
795 #ifdef CONFIG_TCP_MD5SIG
797 tot_len += TCPOLEN_MD5SIG_ALIGNED;
800 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
805 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
807 t1 = skb_push(buff, tot_len);
808 skb_reset_transport_header(buff);
810 /* Swap the send and the receive. */
811 memset(t1, 0, sizeof(*t1));
812 t1->dest = th->source;
813 t1->source = th->dest;
814 t1->doff = tot_len / 4;
815 t1->seq = htonl(seq);
816 t1->ack_seq = htonl(ack);
817 t1->ack = !rst || !th->ack;
819 t1->window = htons(win);
821 topt = (__be32 *)(t1 + 1);
824 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
825 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
826 *topt++ = htonl(tsval);
827 *topt++ = htonl(tsecr);
830 #ifdef CONFIG_TCP_MD5SIG
832 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
833 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
834 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
835 &ipv6_hdr(skb)->saddr,
836 &ipv6_hdr(skb)->daddr, t1);
840 memset(&fl6, 0, sizeof(fl6));
841 fl6.daddr = ipv6_hdr(skb)->saddr;
842 fl6.saddr = ipv6_hdr(skb)->daddr;
843 fl6.flowlabel = label;
845 buff->ip_summed = CHECKSUM_PARTIAL;
848 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
850 fl6.flowi6_proto = IPPROTO_TCP;
851 if (rt6_need_strict(&fl6.daddr) && !oif)
852 fl6.flowi6_oif = tcp_v6_iif(skb);
854 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
857 fl6.flowi6_oif = oif;
860 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
861 fl6.fl6_dport = t1->dest;
862 fl6.fl6_sport = t1->source;
863 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
864 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
866 /* Pass a socket to ip6_dst_lookup either it is for RST
867 * Underlying function will use this to retrieve the network
870 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
872 skb_dst_set(buff, dst);
873 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
874 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
876 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
883 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
885 const struct tcphdr *th = tcp_hdr(skb);
886 u32 seq = 0, ack_seq = 0;
887 struct tcp_md5sig_key *key = NULL;
888 #ifdef CONFIG_TCP_MD5SIG
889 const __u8 *hash_location = NULL;
890 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
891 unsigned char newhash[16];
893 struct sock *sk1 = NULL;
900 /* If sk not NULL, it means we did a successful lookup and incoming
901 * route had to be correct. prequeue might have dropped our dst.
903 if (!sk && !ipv6_unicast_destination(skb))
906 #ifdef CONFIG_TCP_MD5SIG
908 hash_location = tcp_parse_md5sig_option(th);
909 if (sk && sk_fullsock(sk)) {
910 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
911 } else if (hash_location) {
913 * active side is lost. Try to find listening socket through
914 * source port, and then find md5 key through listening socket.
915 * we are not loose security here:
916 * Incoming packet is checked with md5 hash with finding key,
917 * no RST generated if md5 hash doesn't match.
919 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
920 &tcp_hashinfo, NULL, 0,
922 th->source, &ipv6h->daddr,
923 ntohs(th->source), tcp_v6_iif(skb),
928 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
932 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
933 if (genhash || memcmp(hash_location, newhash, 16) != 0)
939 seq = ntohl(th->ack_seq);
941 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
945 oif = sk->sk_bound_dev_if;
946 trace_tcp_send_reset(sk, skb);
949 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
951 #ifdef CONFIG_TCP_MD5SIG
957 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
958 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
959 struct tcp_md5sig_key *key, u8 tclass,
962 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
966 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
968 struct inet_timewait_sock *tw = inet_twsk(sk);
969 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
971 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
972 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
973 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
974 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
975 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
980 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
981 struct request_sock *req)
983 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
984 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
987 * The window field (SEG.WND) of every outgoing segment, with the
988 * exception of <SYN> segments, MUST be right-shifted by
989 * Rcv.Wind.Shift bits:
991 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
992 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
993 tcp_rsk(req)->rcv_nxt,
994 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
995 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
996 req->ts_recent, sk->sk_bound_dev_if,
997 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
1002 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
1004 #ifdef CONFIG_SYN_COOKIES
1005 const struct tcphdr *th = tcp_hdr(skb);
1008 sk = cookie_v6_check(sk, skb);
1013 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1015 if (skb->protocol == htons(ETH_P_IP))
1016 return tcp_v4_conn_request(sk, skb);
1018 if (!ipv6_unicast_destination(skb))
1021 return tcp_conn_request(&tcp6_request_sock_ops,
1022 &tcp_request_sock_ipv6_ops, sk, skb);
1026 return 0; /* don't send reset */
1029 static void tcp_v6_restore_cb(struct sk_buff *skb)
1031 /* We need to move header back to the beginning if xfrm6_policy_check()
1032 * and tcp_v6_fill_cb() are going to be called again.
1033 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1035 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1036 sizeof(struct inet6_skb_parm));
1039 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1040 struct request_sock *req,
1041 struct dst_entry *dst,
1042 struct request_sock *req_unhash,
1045 struct inet_request_sock *ireq;
1046 struct ipv6_pinfo *newnp;
1047 const struct ipv6_pinfo *np = inet6_sk(sk);
1048 struct ipv6_txoptions *opt;
1049 struct tcp6_sock *newtcp6sk;
1050 struct inet_sock *newinet;
1051 struct tcp_sock *newtp;
1053 #ifdef CONFIG_TCP_MD5SIG
1054 struct tcp_md5sig_key *key;
1058 if (skb->protocol == htons(ETH_P_IP)) {
1063 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1064 req_unhash, own_req);
1069 newtcp6sk = (struct tcp6_sock *)newsk;
1070 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1072 newinet = inet_sk(newsk);
1073 newnp = inet6_sk(newsk);
1074 newtp = tcp_sk(newsk);
1076 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1078 newnp->saddr = newsk->sk_v6_rcv_saddr;
1080 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1081 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1082 #ifdef CONFIG_TCP_MD5SIG
1083 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1086 newnp->ipv6_mc_list = NULL;
1087 newnp->ipv6_ac_list = NULL;
1088 newnp->ipv6_fl_list = NULL;
1089 newnp->pktoptions = NULL;
1091 newnp->mcast_oif = tcp_v6_iif(skb);
1092 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1093 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1095 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1098 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1099 * here, tcp_create_openreq_child now does this for us, see the comment in
1100 * that function for the gory details. -acme
1103 /* It is tricky place. Until this moment IPv4 tcp
1104 worked with IPv6 icsk.icsk_af_ops.
1107 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1112 ireq = inet_rsk(req);
1114 if (sk_acceptq_is_full(sk))
1118 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1123 newsk = tcp_create_openreq_child(sk, req, skb);
1128 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1129 * count here, tcp_create_openreq_child now does this for us, see the
1130 * comment in that function for the gory details. -acme
1133 newsk->sk_gso_type = SKB_GSO_TCPV6;
1134 ip6_dst_store(newsk, dst, NULL, NULL);
1135 inet6_sk_rx_dst_set(newsk, skb);
1137 newtcp6sk = (struct tcp6_sock *)newsk;
1138 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1140 newtp = tcp_sk(newsk);
1141 newinet = inet_sk(newsk);
1142 newnp = inet6_sk(newsk);
1144 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1146 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1147 newnp->saddr = ireq->ir_v6_loc_addr;
1148 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1149 newsk->sk_bound_dev_if = ireq->ir_iif;
1151 /* Now IPv6 options...
1153 First: no IPv4 options.
1155 newinet->inet_opt = NULL;
1156 newnp->ipv6_mc_list = NULL;
1157 newnp->ipv6_ac_list = NULL;
1158 newnp->ipv6_fl_list = NULL;
1161 newnp->rxopt.all = np->rxopt.all;
1163 newnp->pktoptions = NULL;
1165 newnp->mcast_oif = tcp_v6_iif(skb);
1166 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1167 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1169 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1171 /* Clone native IPv6 options from listening socket (if any)
1173 Yes, keeping reference count would be much more clever,
1174 but we make one more one thing there: reattach optmem
1177 opt = ireq->ipv6_opt;
1179 opt = rcu_dereference(np->opt);
1181 opt = ipv6_dup_options(newsk, opt);
1182 RCU_INIT_POINTER(newnp->opt, opt);
1184 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1186 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1189 tcp_ca_openreq_child(newsk, dst);
1191 tcp_sync_mss(newsk, dst_mtu(dst));
1192 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1194 tcp_initialize_rcv_mss(newsk);
1196 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1197 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1199 #ifdef CONFIG_TCP_MD5SIG
1200 /* Copy over the MD5 key from the original socket */
1201 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1203 /* We're using one, so create a matching key
1204 * on the newsk structure. If we fail to get
1205 * memory, then we end up not copying the key
1208 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1209 AF_INET6, 128, key->key, key->keylen,
1210 sk_gfp_mask(sk, GFP_ATOMIC));
1214 if (__inet_inherit_port(sk, newsk) < 0) {
1215 inet_csk_prepare_forced_close(newsk);
1219 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1221 tcp_move_syn(newtp, req);
1223 /* Clone pktoptions received with SYN, if we own the req */
1224 if (ireq->pktopts) {
1225 newnp->pktoptions = skb_clone(ireq->pktopts,
1226 sk_gfp_mask(sk, GFP_ATOMIC));
1227 consume_skb(ireq->pktopts);
1228 ireq->pktopts = NULL;
1229 if (newnp->pktoptions) {
1230 tcp_v6_restore_cb(newnp->pktoptions);
1231 skb_set_owner_r(newnp->pktoptions, newsk);
1239 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1247 /* The socket must have it's spinlock held when we get
1248 * here, unless it is a TCP_LISTEN socket.
1250 * We have a potential double-lock case here, so even when
1251 * doing backlog processing we use the BH locking scheme.
1252 * This is because we cannot sleep with the original spinlock
1255 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1257 struct ipv6_pinfo *np = inet6_sk(sk);
1258 struct tcp_sock *tp;
1259 struct sk_buff *opt_skb = NULL;
1261 /* Imagine: socket is IPv6. IPv4 packet arrives,
1262 goes to IPv4 receive handler and backlogged.
1263 From backlog it always goes here. Kerboom...
1264 Fortunately, tcp_rcv_established and rcv_established
1265 handle them correctly, but it is not case with
1266 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1269 if (skb->protocol == htons(ETH_P_IP))
1270 return tcp_v4_do_rcv(sk, skb);
1273 * socket locking is here for SMP purposes as backlog rcv
1274 * is currently called with bh processing disabled.
1277 /* Do Stevens' IPV6_PKTOPTIONS.
1279 Yes, guys, it is the only place in our code, where we
1280 may make it not affecting IPv4.
1281 The rest of code is protocol independent,
1282 and I do not like idea to uglify IPv4.
1284 Actually, all the idea behind IPV6_PKTOPTIONS
1285 looks not very well thought. For now we latch
1286 options, received in the last packet, enqueued
1287 by tcp. Feel free to propose better solution.
1291 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1293 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1294 struct dst_entry *dst = sk->sk_rx_dst;
1296 sock_rps_save_rxhash(sk, skb);
1297 sk_mark_napi_id(sk, skb);
1299 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1300 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1302 sk->sk_rx_dst = NULL;
1306 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1308 goto ipv6_pktoptions;
1312 if (tcp_checksum_complete(skb))
1315 if (sk->sk_state == TCP_LISTEN) {
1316 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1322 if (tcp_child_process(sk, nsk, skb))
1325 __kfree_skb(opt_skb);
1329 sock_rps_save_rxhash(sk, skb);
1331 if (tcp_rcv_state_process(sk, skb))
1334 goto ipv6_pktoptions;
1338 tcp_v6_send_reset(sk, skb);
1341 __kfree_skb(opt_skb);
1345 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1346 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1351 /* Do you ask, what is it?
1353 1. skb was enqueued by tcp.
1354 2. skb is added to tail of read queue, rather than out of order.
1355 3. socket is not in passive state.
1356 4. Finally, it really contains options, which user wants to receive.
1359 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1360 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1361 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1362 np->mcast_oif = tcp_v6_iif(opt_skb);
1363 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1364 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1365 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1366 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1368 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1369 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1370 skb_set_owner_r(opt_skb, sk);
1371 tcp_v6_restore_cb(opt_skb);
1372 opt_skb = xchg(&np->pktoptions, opt_skb);
1374 __kfree_skb(opt_skb);
1375 opt_skb = xchg(&np->pktoptions, NULL);
1383 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1384 const struct tcphdr *th)
1386 /* This is tricky: we move IP6CB at its correct location into
1387 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1388 * _decode_session6() uses IP6CB().
1389 * barrier() makes sure compiler won't play aliasing games.
1391 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1392 sizeof(struct inet6_skb_parm));
1395 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1396 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1397 skb->len - th->doff*4);
1398 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1399 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1400 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1401 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1402 TCP_SKB_CB(skb)->sacked = 0;
1403 TCP_SKB_CB(skb)->has_rxtstamp =
1404 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1407 static int tcp_v6_rcv(struct sk_buff *skb)
1409 int sdif = inet6_sdif(skb);
1410 const struct tcphdr *th;
1411 const struct ipv6hdr *hdr;
1415 struct net *net = dev_net(skb->dev);
1417 if (skb->pkt_type != PACKET_HOST)
1421 * Count it even if it's bad.
1423 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1425 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1428 th = (const struct tcphdr *)skb->data;
1430 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1432 if (!pskb_may_pull(skb, th->doff*4))
1435 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1438 th = (const struct tcphdr *)skb->data;
1439 hdr = ipv6_hdr(skb);
1442 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1443 th->source, th->dest, inet6_iif(skb), sdif,
1449 if (sk->sk_state == TCP_TIME_WAIT)
1452 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1453 struct request_sock *req = inet_reqsk(sk);
1456 sk = req->rsk_listener;
1457 tcp_v6_fill_cb(skb, hdr, th);
1458 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1459 sk_drops_add(sk, skb);
1463 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1464 inet_csk_reqsk_queue_drop_and_put(sk, req);
1470 if (!tcp_filter(sk, skb))
1471 nsk = tcp_check_req(sk, skb, req, false);
1474 goto discard_and_relse;
1478 tcp_v6_restore_cb(skb);
1479 } else if (tcp_child_process(sk, nsk, skb)) {
1480 tcp_v6_send_reset(nsk, skb);
1481 goto discard_and_relse;
1487 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1488 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1489 goto discard_and_relse;
1492 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1493 goto discard_and_relse;
1495 tcp_v6_fill_cb(skb, hdr, th);
1497 if (tcp_v6_inbound_md5_hash(sk, skb))
1498 goto discard_and_relse;
1500 if (tcp_filter(sk, skb))
1501 goto discard_and_relse;
1502 th = (const struct tcphdr *)skb->data;
1503 hdr = ipv6_hdr(skb);
1507 if (sk->sk_state == TCP_LISTEN) {
1508 ret = tcp_v6_do_rcv(sk, skb);
1509 goto put_and_return;
1512 sk_incoming_cpu_update(sk);
1514 bh_lock_sock_nested(sk);
1515 tcp_segs_in(tcp_sk(sk), skb);
1517 if (!sock_owned_by_user(sk)) {
1518 ret = tcp_v6_do_rcv(sk, skb);
1519 } else if (tcp_add_backlog(sk, skb)) {
1520 goto discard_and_relse;
1527 return ret ? -1 : 0;
1530 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1533 tcp_v6_fill_cb(skb, hdr, th);
1535 if (tcp_checksum_complete(skb)) {
1537 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1539 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1541 tcp_v6_send_reset(NULL, skb);
1549 sk_drops_add(sk, skb);
1555 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1556 inet_twsk_put(inet_twsk(sk));
1560 tcp_v6_fill_cb(skb, hdr, th);
1562 if (tcp_checksum_complete(skb)) {
1563 inet_twsk_put(inet_twsk(sk));
1567 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1572 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1573 skb, __tcp_hdrlen(th),
1574 &ipv6_hdr(skb)->saddr, th->source,
1575 &ipv6_hdr(skb)->daddr,
1576 ntohs(th->dest), tcp_v6_iif(skb),
1579 struct inet_timewait_sock *tw = inet_twsk(sk);
1580 inet_twsk_deschedule_put(tw);
1582 tcp_v6_restore_cb(skb);
1590 tcp_v6_timewait_ack(sk, skb);
1593 tcp_v6_restore_cb(skb);
1594 tcp_v6_send_reset(sk, skb);
1595 inet_twsk_deschedule_put(inet_twsk(sk));
1597 case TCP_TW_SUCCESS:
1603 static void tcp_v6_early_demux(struct sk_buff *skb)
1605 const struct ipv6hdr *hdr;
1606 const struct tcphdr *th;
1609 if (skb->pkt_type != PACKET_HOST)
1612 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1615 hdr = ipv6_hdr(skb);
1618 if (th->doff < sizeof(struct tcphdr) / 4)
1621 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1622 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1623 &hdr->saddr, th->source,
1624 &hdr->daddr, ntohs(th->dest),
1625 inet6_iif(skb), inet6_sdif(skb));
1628 skb->destructor = sock_edemux;
1629 if (sk_fullsock(sk)) {
1630 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1633 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1635 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1636 skb_dst_set_noref(skb, dst);
1641 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1642 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1643 .twsk_unique = tcp_twsk_unique,
1644 .twsk_destructor = tcp_twsk_destructor,
1647 static const struct inet_connection_sock_af_ops ipv6_specific = {
1648 .queue_xmit = inet6_csk_xmit,
1649 .send_check = tcp_v6_send_check,
1650 .rebuild_header = inet6_sk_rebuild_header,
1651 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1652 .conn_request = tcp_v6_conn_request,
1653 .syn_recv_sock = tcp_v6_syn_recv_sock,
1654 .net_header_len = sizeof(struct ipv6hdr),
1655 .net_frag_header_len = sizeof(struct frag_hdr),
1656 .setsockopt = ipv6_setsockopt,
1657 .getsockopt = ipv6_getsockopt,
1658 .addr2sockaddr = inet6_csk_addr2sockaddr,
1659 .sockaddr_len = sizeof(struct sockaddr_in6),
1660 #ifdef CONFIG_COMPAT
1661 .compat_setsockopt = compat_ipv6_setsockopt,
1662 .compat_getsockopt = compat_ipv6_getsockopt,
1664 .mtu_reduced = tcp_v6_mtu_reduced,
1667 #ifdef CONFIG_TCP_MD5SIG
1668 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1669 .md5_lookup = tcp_v6_md5_lookup,
1670 .calc_md5_hash = tcp_v6_md5_hash_skb,
1671 .md5_parse = tcp_v6_parse_md5_keys,
1676 * TCP over IPv4 via INET6 API
1678 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1679 .queue_xmit = ip_queue_xmit,
1680 .send_check = tcp_v4_send_check,
1681 .rebuild_header = inet_sk_rebuild_header,
1682 .sk_rx_dst_set = inet_sk_rx_dst_set,
1683 .conn_request = tcp_v6_conn_request,
1684 .syn_recv_sock = tcp_v6_syn_recv_sock,
1685 .net_header_len = sizeof(struct iphdr),
1686 .setsockopt = ipv6_setsockopt,
1687 .getsockopt = ipv6_getsockopt,
1688 .addr2sockaddr = inet6_csk_addr2sockaddr,
1689 .sockaddr_len = sizeof(struct sockaddr_in6),
1690 #ifdef CONFIG_COMPAT
1691 .compat_setsockopt = compat_ipv6_setsockopt,
1692 .compat_getsockopt = compat_ipv6_getsockopt,
1694 .mtu_reduced = tcp_v4_mtu_reduced,
1697 #ifdef CONFIG_TCP_MD5SIG
1698 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1699 .md5_lookup = tcp_v4_md5_lookup,
1700 .calc_md5_hash = tcp_v4_md5_hash_skb,
1701 .md5_parse = tcp_v6_parse_md5_keys,
1705 /* NOTE: A lot of things set to zero explicitly by call to
1706 * sk_alloc() so need not be done here.
1708 static int tcp_v6_init_sock(struct sock *sk)
1710 struct inet_connection_sock *icsk = inet_csk(sk);
1714 icsk->icsk_af_ops = &ipv6_specific;
1716 #ifdef CONFIG_TCP_MD5SIG
1717 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1723 static void tcp_v6_destroy_sock(struct sock *sk)
1725 tcp_v4_destroy_sock(sk);
1726 inet6_destroy_sock(sk);
1729 #ifdef CONFIG_PROC_FS
1730 /* Proc filesystem TCPv6 sock list dumping. */
1731 static void get_openreq6(struct seq_file *seq,
1732 const struct request_sock *req, int i)
1734 long ttd = req->rsk_timer.expires - jiffies;
1735 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1736 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1742 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1743 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1745 src->s6_addr32[0], src->s6_addr32[1],
1746 src->s6_addr32[2], src->s6_addr32[3],
1747 inet_rsk(req)->ir_num,
1748 dest->s6_addr32[0], dest->s6_addr32[1],
1749 dest->s6_addr32[2], dest->s6_addr32[3],
1750 ntohs(inet_rsk(req)->ir_rmt_port),
1752 0, 0, /* could print option size, but that is af dependent. */
1753 1, /* timers active (only the expire timer) */
1754 jiffies_to_clock_t(ttd),
1756 from_kuid_munged(seq_user_ns(seq),
1757 sock_i_uid(req->rsk_listener)),
1758 0, /* non standard timer */
1759 0, /* open_requests have no inode */
1763 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1765 const struct in6_addr *dest, *src;
1768 unsigned long timer_expires;
1769 const struct inet_sock *inet = inet_sk(sp);
1770 const struct tcp_sock *tp = tcp_sk(sp);
1771 const struct inet_connection_sock *icsk = inet_csk(sp);
1772 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1776 dest = &sp->sk_v6_daddr;
1777 src = &sp->sk_v6_rcv_saddr;
1778 destp = ntohs(inet->inet_dport);
1779 srcp = ntohs(inet->inet_sport);
1781 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1782 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
1783 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1785 timer_expires = icsk->icsk_timeout;
1786 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1788 timer_expires = icsk->icsk_timeout;
1789 } else if (timer_pending(&sp->sk_timer)) {
1791 timer_expires = sp->sk_timer.expires;
1794 timer_expires = jiffies;
1797 state = sk_state_load(sp);
1798 if (state == TCP_LISTEN)
1799 rx_queue = sp->sk_ack_backlog;
1801 /* Because we don't lock the socket,
1802 * we might find a transient negative value.
1804 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1807 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1808 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1810 src->s6_addr32[0], src->s6_addr32[1],
1811 src->s6_addr32[2], src->s6_addr32[3], srcp,
1812 dest->s6_addr32[0], dest->s6_addr32[1],
1813 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1815 tp->write_seq - tp->snd_una,
1818 jiffies_delta_to_clock_t(timer_expires - jiffies),
1819 icsk->icsk_retransmits,
1820 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1821 icsk->icsk_probes_out,
1823 refcount_read(&sp->sk_refcnt), sp,
1824 jiffies_to_clock_t(icsk->icsk_rto),
1825 jiffies_to_clock_t(icsk->icsk_ack.ato),
1826 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1828 state == TCP_LISTEN ?
1829 fastopenq->max_qlen :
1830 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1834 static void get_timewait6_sock(struct seq_file *seq,
1835 struct inet_timewait_sock *tw, int i)
1837 long delta = tw->tw_timer.expires - jiffies;
1838 const struct in6_addr *dest, *src;
1841 dest = &tw->tw_v6_daddr;
1842 src = &tw->tw_v6_rcv_saddr;
1843 destp = ntohs(tw->tw_dport);
1844 srcp = ntohs(tw->tw_sport);
1847 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1848 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1850 src->s6_addr32[0], src->s6_addr32[1],
1851 src->s6_addr32[2], src->s6_addr32[3], srcp,
1852 dest->s6_addr32[0], dest->s6_addr32[1],
1853 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1854 tw->tw_substate, 0, 0,
1855 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1856 refcount_read(&tw->tw_refcnt), tw);
1859 static int tcp6_seq_show(struct seq_file *seq, void *v)
1861 struct tcp_iter_state *st;
1862 struct sock *sk = v;
1864 if (v == SEQ_START_TOKEN) {
1869 "st tx_queue rx_queue tr tm->when retrnsmt"
1870 " uid timeout inode\n");
1875 if (sk->sk_state == TCP_TIME_WAIT)
1876 get_timewait6_sock(seq, v, st->num);
1877 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1878 get_openreq6(seq, v, st->num);
1880 get_tcp6_sock(seq, v, st->num);
1885 static const struct file_operations tcp6_afinfo_seq_fops = {
1886 .owner = THIS_MODULE,
1887 .open = tcp_seq_open,
1889 .llseek = seq_lseek,
1890 .release = seq_release_net
1893 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1896 .seq_fops = &tcp6_afinfo_seq_fops,
1898 .show = tcp6_seq_show,
1902 int __net_init tcp6_proc_init(struct net *net)
1904 return tcp_proc_register(net, &tcp6_seq_afinfo);
1907 void tcp6_proc_exit(struct net *net)
1909 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1913 struct proto tcpv6_prot = {
1915 .owner = THIS_MODULE,
1917 .connect = tcp_v6_connect,
1918 .disconnect = tcp_disconnect,
1919 .accept = inet_csk_accept,
1921 .init = tcp_v6_init_sock,
1922 .destroy = tcp_v6_destroy_sock,
1923 .shutdown = tcp_shutdown,
1924 .setsockopt = tcp_setsockopt,
1925 .getsockopt = tcp_getsockopt,
1926 .keepalive = tcp_set_keepalive,
1927 .recvmsg = tcp_recvmsg,
1928 .sendmsg = tcp_sendmsg,
1929 .sendpage = tcp_sendpage,
1930 .backlog_rcv = tcp_v6_do_rcv,
1931 .release_cb = tcp_release_cb,
1933 .unhash = inet_unhash,
1934 .get_port = inet_csk_get_port,
1935 .enter_memory_pressure = tcp_enter_memory_pressure,
1936 .leave_memory_pressure = tcp_leave_memory_pressure,
1937 .stream_memory_free = tcp_stream_memory_free,
1938 .sockets_allocated = &tcp_sockets_allocated,
1939 .memory_allocated = &tcp_memory_allocated,
1940 .memory_pressure = &tcp_memory_pressure,
1941 .orphan_count = &tcp_orphan_count,
1942 .sysctl_mem = sysctl_tcp_mem,
1943 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
1944 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
1945 .max_header = MAX_TCP_HEADER,
1946 .obj_size = sizeof(struct tcp6_sock),
1947 .slab_flags = SLAB_TYPESAFE_BY_RCU,
1948 .twsk_prot = &tcp6_timewait_sock_ops,
1949 .rsk_prot = &tcp6_request_sock_ops,
1950 .h.hashinfo = &tcp_hashinfo,
1951 .no_autobind = true,
1952 #ifdef CONFIG_COMPAT
1953 .compat_setsockopt = compat_tcp_setsockopt,
1954 .compat_getsockopt = compat_tcp_getsockopt,
1956 .diag_destroy = tcp_abort,
1959 /* thinking of making this const? Don't.
1960 * early_demux can change based on sysctl.
1962 static struct inet6_protocol tcpv6_protocol = {
1963 .early_demux = tcp_v6_early_demux,
1964 .early_demux_handler = tcp_v6_early_demux,
1965 .handler = tcp_v6_rcv,
1966 .err_handler = tcp_v6_err,
1967 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1970 static struct inet_protosw tcpv6_protosw = {
1971 .type = SOCK_STREAM,
1972 .protocol = IPPROTO_TCP,
1973 .prot = &tcpv6_prot,
1974 .ops = &inet6_stream_ops,
1975 .flags = INET_PROTOSW_PERMANENT |
1979 static int __net_init tcpv6_net_init(struct net *net)
1981 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1982 SOCK_RAW, IPPROTO_TCP, net);
1985 static void __net_exit tcpv6_net_exit(struct net *net)
1987 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1990 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1992 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
1995 static struct pernet_operations tcpv6_net_ops = {
1996 .init = tcpv6_net_init,
1997 .exit = tcpv6_net_exit,
1998 .exit_batch = tcpv6_net_exit_batch,
2001 int __init tcpv6_init(void)
2005 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2009 /* register inet6 protocol */
2010 ret = inet6_register_protosw(&tcpv6_protosw);
2012 goto out_tcpv6_protocol;
2014 ret = register_pernet_subsys(&tcpv6_net_ops);
2016 goto out_tcpv6_protosw;
2021 inet6_unregister_protosw(&tcpv6_protosw);
2023 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2027 void tcpv6_exit(void)
2029 unregister_pernet_subsys(&tcpv6_net_ops);
2030 inet6_unregister_protosw(&tcpv6_protosw);
2031 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);