3 * Linux INET6 implementation
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85 const struct in6_addr *addr)
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
93 struct dst_entry *dst = skb_dst(skb);
95 if (dst && dst_hold_safe(dst)) {
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
106 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
109 tcp_hdr(skb)->source);
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116 struct inet_sock *inet = inet_sk(sk);
117 struct inet_connection_sock *icsk = inet_csk(sk);
118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
120 struct in6_addr *saddr = NULL, *final_p, final;
121 struct ipv6_txoptions *opt;
123 struct dst_entry *dst;
127 if (addr_len < SIN6_LEN_RFC2133)
130 if (usin->sin6_family != AF_INET6)
131 return -EAFNOSUPPORT;
133 memset(&fl6, 0, sizeof(fl6));
136 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137 IP6_ECN_flow_init(fl6.flowlabel);
138 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139 struct ip6_flowlabel *flowlabel;
140 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
143 fl6_sock_release(flowlabel);
148 * connect() to INADDR_ANY means loopback (BSD'ism).
151 if (ipv6_addr_any(&usin->sin6_addr))
152 usin->sin6_addr.s6_addr[15] = 0x1;
154 addr_type = ipv6_addr_type(&usin->sin6_addr);
156 if (addr_type & IPV6_ADDR_MULTICAST)
159 if (addr_type&IPV6_ADDR_LINKLOCAL) {
160 if (addr_len >= sizeof(struct sockaddr_in6) &&
161 usin->sin6_scope_id) {
162 /* If interface is set while binding, indices
165 if (sk->sk_bound_dev_if &&
166 sk->sk_bound_dev_if != usin->sin6_scope_id)
169 sk->sk_bound_dev_if = usin->sin6_scope_id;
172 /* Connect to link-local address requires an interface */
173 if (!sk->sk_bound_dev_if)
177 if (tp->rx_opt.ts_recent_stamp &&
178 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179 tp->rx_opt.ts_recent = 0;
180 tp->rx_opt.ts_recent_stamp = 0;
184 sk->sk_v6_daddr = usin->sin6_addr;
185 np->flow_label = fl6.flowlabel;
191 if (addr_type == IPV6_ADDR_MAPPED) {
192 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193 struct sockaddr_in sin;
195 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
197 if (__ipv6_only_sock(sk))
200 sin.sin_family = AF_INET;
201 sin.sin_port = usin->sin6_port;
202 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
204 icsk->icsk_af_ops = &ipv6_mapped;
205 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
210 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
213 icsk->icsk_ext_hdr_len = exthdrlen;
214 icsk->icsk_af_ops = &ipv6_specific;
215 sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217 tp->af_specific = &tcp_sock_ipv6_specific;
221 np->saddr = sk->sk_v6_rcv_saddr;
226 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227 saddr = &sk->sk_v6_rcv_saddr;
229 fl6.flowi6_proto = IPPROTO_TCP;
230 fl6.daddr = sk->sk_v6_daddr;
231 fl6.saddr = saddr ? *saddr : np->saddr;
232 fl6.flowi6_oif = sk->sk_bound_dev_if;
233 fl6.flowi6_mark = sk->sk_mark;
234 fl6.fl6_dport = usin->sin6_port;
235 fl6.fl6_sport = inet->inet_sport;
236 fl6.flowi6_uid = sk->sk_uid;
238 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
239 final_p = fl6_update_dst(&fl6, opt, &final);
241 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
243 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
251 sk->sk_v6_rcv_saddr = *saddr;
254 /* set the source address */
256 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
258 sk->sk_gso_type = SKB_GSO_TCPV6;
259 ip6_dst_store(sk, dst, NULL, NULL);
261 if (tcp_death_row.sysctl_tw_recycle &&
262 !tp->rx_opt.ts_recent_stamp &&
263 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
264 tcp_fetch_timewait_stamp(sk, dst);
266 icsk->icsk_ext_hdr_len = 0;
268 icsk->icsk_ext_hdr_len = opt->opt_flen +
271 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
273 inet->inet_dport = usin->sin6_port;
275 tcp_set_state(sk, TCP_SYN_SENT);
276 err = inet6_hash_connect(&tcp_death_row, sk);
282 if (!tp->write_seq && likely(!tp->repair))
283 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
284 sk->sk_v6_daddr.s6_addr32,
288 err = tcp_connect(sk);
295 tcp_set_state(sk, TCP_CLOSE);
298 inet->inet_dport = 0;
299 sk->sk_route_caps = 0;
303 static void tcp_v6_mtu_reduced(struct sock *sk)
305 struct dst_entry *dst;
307 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
310 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
314 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
315 tcp_sync_mss(sk, dst_mtu(dst));
316 tcp_simple_retransmit(sk);
320 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
321 u8 type, u8 code, int offset, __be32 info)
323 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
324 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
325 struct net *net = dev_net(skb->dev);
326 struct request_sock *fastopen;
327 struct ipv6_pinfo *np;
334 sk = __inet6_lookup_established(net, &tcp_hashinfo,
335 &hdr->daddr, th->dest,
336 &hdr->saddr, ntohs(th->source),
340 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
345 if (sk->sk_state == TCP_TIME_WAIT) {
346 inet_twsk_put(inet_twsk(sk));
349 seq = ntohl(th->seq);
350 fatal = icmpv6_err_convert(type, code, &err);
351 if (sk->sk_state == TCP_NEW_SYN_RECV)
352 return tcp_req_err(sk, seq, fatal);
355 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
356 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
358 if (sk->sk_state == TCP_CLOSE)
361 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
362 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
367 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
368 fastopen = tp->fastopen_rsk;
369 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
370 if (sk->sk_state != TCP_LISTEN &&
371 !between(seq, snd_una, tp->snd_nxt)) {
372 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
378 if (type == NDISC_REDIRECT) {
379 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
382 dst->ops->redirect(dst, sk, skb);
386 if (type == ICMPV6_PKT_TOOBIG) {
387 /* We are not interested in TCP_LISTEN and open_requests
388 * (SYN-ACKs send out by Linux are always <576bytes so
389 * they should go through unfragmented).
391 if (sk->sk_state == TCP_LISTEN)
394 if (!ip6_sk_accept_pmtu(sk))
397 tp->mtu_info = ntohl(info);
398 if (!sock_owned_by_user(sk))
399 tcp_v6_mtu_reduced(sk);
400 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
407 /* Might be for an request_sock */
408 switch (sk->sk_state) {
411 /* Only in fast or simultaneous open. If a fast open socket is
412 * is already accepted it is treated as a connected one below.
414 if (fastopen && !fastopen->sk)
417 if (!sock_owned_by_user(sk)) {
419 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
423 sk->sk_err_soft = err;
427 if (!sock_owned_by_user(sk) && np->recverr) {
429 sk->sk_error_report(sk);
431 sk->sk_err_soft = err;
439 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
441 struct request_sock *req,
442 struct tcp_fastopen_cookie *foc,
443 enum tcp_synack_type synack_type)
445 struct inet_request_sock *ireq = inet_rsk(req);
446 struct ipv6_pinfo *np = inet6_sk(sk);
447 struct ipv6_txoptions *opt;
448 struct flowi6 *fl6 = &fl->u.ip6;
452 /* First, grab a route. */
453 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
454 IPPROTO_TCP)) == NULL)
457 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
460 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
461 &ireq->ir_v6_rmt_addr);
463 fl6->daddr = ireq->ir_v6_rmt_addr;
464 if (np->repflow && ireq->pktopts)
465 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
468 opt = ireq->ipv6_opt;
470 opt = rcu_dereference(np->opt);
471 err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
473 err = net_xmit_eval(err);
481 static void tcp_v6_reqsk_destructor(struct request_sock *req)
483 kfree(inet_rsk(req)->ipv6_opt);
484 kfree_skb(inet_rsk(req)->pktopts);
487 #ifdef CONFIG_TCP_MD5SIG
488 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
489 const struct in6_addr *addr)
491 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
494 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
495 const struct sock *addr_sk)
497 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
500 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
503 struct tcp_md5sig cmd;
504 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
506 if (optlen < sizeof(cmd))
509 if (copy_from_user(&cmd, optval, sizeof(cmd)))
512 if (sin6->sin6_family != AF_INET6)
515 if (!cmd.tcpm_keylen) {
516 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
517 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
519 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
523 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
526 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
527 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
528 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
530 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
531 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
534 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
535 const struct in6_addr *daddr,
536 const struct in6_addr *saddr,
537 const struct tcphdr *th, int nbytes)
539 struct tcp6_pseudohdr *bp;
540 struct scatterlist sg;
544 /* 1. TCP pseudo-header (RFC2460) */
547 bp->protocol = cpu_to_be32(IPPROTO_TCP);
548 bp->len = cpu_to_be32(nbytes);
550 _th = (struct tcphdr *)(bp + 1);
551 memcpy(_th, th, sizeof(*th));
554 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
555 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
556 sizeof(*bp) + sizeof(*th));
557 return crypto_ahash_update(hp->md5_req);
560 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
561 const struct in6_addr *daddr, struct in6_addr *saddr,
562 const struct tcphdr *th)
564 struct tcp_md5sig_pool *hp;
565 struct ahash_request *req;
567 hp = tcp_get_md5sig_pool();
569 goto clear_hash_noput;
572 if (crypto_ahash_init(req))
574 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
576 if (tcp_md5_hash_key(hp, key))
578 ahash_request_set_crypt(req, NULL, md5_hash, 0);
579 if (crypto_ahash_final(req))
582 tcp_put_md5sig_pool();
586 tcp_put_md5sig_pool();
588 memset(md5_hash, 0, 16);
592 static int tcp_v6_md5_hash_skb(char *md5_hash,
593 const struct tcp_md5sig_key *key,
594 const struct sock *sk,
595 const struct sk_buff *skb)
597 const struct in6_addr *saddr, *daddr;
598 struct tcp_md5sig_pool *hp;
599 struct ahash_request *req;
600 const struct tcphdr *th = tcp_hdr(skb);
602 if (sk) { /* valid for establish/request sockets */
603 saddr = &sk->sk_v6_rcv_saddr;
604 daddr = &sk->sk_v6_daddr;
606 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
607 saddr = &ip6h->saddr;
608 daddr = &ip6h->daddr;
611 hp = tcp_get_md5sig_pool();
613 goto clear_hash_noput;
616 if (crypto_ahash_init(req))
619 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
621 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
623 if (tcp_md5_hash_key(hp, key))
625 ahash_request_set_crypt(req, NULL, md5_hash, 0);
626 if (crypto_ahash_final(req))
629 tcp_put_md5sig_pool();
633 tcp_put_md5sig_pool();
635 memset(md5_hash, 0, 16);
641 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
642 const struct sk_buff *skb)
644 #ifdef CONFIG_TCP_MD5SIG
645 const __u8 *hash_location = NULL;
646 struct tcp_md5sig_key *hash_expected;
647 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
648 const struct tcphdr *th = tcp_hdr(skb);
652 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
653 hash_location = tcp_parse_md5sig_option(th);
655 /* We've parsed the options - do we have a hash? */
656 if (!hash_expected && !hash_location)
659 if (hash_expected && !hash_location) {
660 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
664 if (!hash_expected && hash_location) {
665 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
669 /* check the signature */
670 genhash = tcp_v6_md5_hash_skb(newhash,
674 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
675 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
676 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
677 genhash ? "failed" : "mismatch",
678 &ip6h->saddr, ntohs(th->source),
679 &ip6h->daddr, ntohs(th->dest));
686 static void tcp_v6_init_req(struct request_sock *req,
687 const struct sock *sk_listener,
690 struct inet_request_sock *ireq = inet_rsk(req);
691 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
693 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
694 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
696 /* So that link locals have meaning */
697 if (!sk_listener->sk_bound_dev_if &&
698 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
699 ireq->ir_iif = tcp_v6_iif(skb);
701 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
702 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
703 np->rxopt.bits.rxinfo ||
704 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
705 np->rxopt.bits.rxohlim || np->repflow)) {
706 atomic_inc(&skb->users);
711 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
713 const struct request_sock *req,
718 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
721 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
723 .obj_size = sizeof(struct tcp6_request_sock),
724 .rtx_syn_ack = tcp_rtx_synack,
725 .send_ack = tcp_v6_reqsk_send_ack,
726 .destructor = tcp_v6_reqsk_destructor,
727 .send_reset = tcp_v6_send_reset,
728 .syn_ack_timeout = tcp_syn_ack_timeout,
731 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
732 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
733 sizeof(struct ipv6hdr),
734 #ifdef CONFIG_TCP_MD5SIG
735 .req_md5_lookup = tcp_v6_md5_lookup,
736 .calc_md5_hash = tcp_v6_md5_hash_skb,
738 .init_req = tcp_v6_init_req,
739 #ifdef CONFIG_SYN_COOKIES
740 .cookie_init_seq = cookie_v6_init_sequence,
742 .route_req = tcp_v6_route_req,
743 .init_seq = tcp_v6_init_sequence,
744 .send_synack = tcp_v6_send_synack,
747 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
748 u32 ack, u32 win, u32 tsval, u32 tsecr,
749 int oif, struct tcp_md5sig_key *key, int rst,
750 u8 tclass, __be32 label)
752 const struct tcphdr *th = tcp_hdr(skb);
754 struct sk_buff *buff;
756 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
757 struct sock *ctl_sk = net->ipv6.tcp_sk;
758 unsigned int tot_len = sizeof(struct tcphdr);
759 struct dst_entry *dst;
763 tot_len += TCPOLEN_TSTAMP_ALIGNED;
764 #ifdef CONFIG_TCP_MD5SIG
766 tot_len += TCPOLEN_MD5SIG_ALIGNED;
769 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
774 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
776 t1 = (struct tcphdr *) skb_push(buff, tot_len);
777 skb_reset_transport_header(buff);
779 /* Swap the send and the receive. */
780 memset(t1, 0, sizeof(*t1));
781 t1->dest = th->source;
782 t1->source = th->dest;
783 t1->doff = tot_len / 4;
784 t1->seq = htonl(seq);
785 t1->ack_seq = htonl(ack);
786 t1->ack = !rst || !th->ack;
788 t1->window = htons(win);
790 topt = (__be32 *)(t1 + 1);
793 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
794 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
795 *topt++ = htonl(tsval);
796 *topt++ = htonl(tsecr);
799 #ifdef CONFIG_TCP_MD5SIG
801 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
802 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
803 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
804 &ipv6_hdr(skb)->saddr,
805 &ipv6_hdr(skb)->daddr, t1);
809 memset(&fl6, 0, sizeof(fl6));
810 fl6.daddr = ipv6_hdr(skb)->saddr;
811 fl6.saddr = ipv6_hdr(skb)->daddr;
812 fl6.flowlabel = label;
814 buff->ip_summed = CHECKSUM_PARTIAL;
817 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
819 fl6.flowi6_proto = IPPROTO_TCP;
820 if (rt6_need_strict(&fl6.daddr) && !oif)
821 fl6.flowi6_oif = tcp_v6_iif(skb);
823 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
826 fl6.flowi6_oif = oif;
829 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
830 fl6.fl6_dport = t1->dest;
831 fl6.fl6_sport = t1->source;
832 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
833 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
835 /* Pass a socket to ip6_dst_lookup either it is for RST
836 * Underlying function will use this to retrieve the network
839 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
841 skb_dst_set(buff, dst);
842 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
843 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
845 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
852 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
854 const struct tcphdr *th = tcp_hdr(skb);
855 u32 seq = 0, ack_seq = 0;
856 struct tcp_md5sig_key *key = NULL;
857 #ifdef CONFIG_TCP_MD5SIG
858 const __u8 *hash_location = NULL;
859 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
860 unsigned char newhash[16];
862 struct sock *sk1 = NULL;
869 /* If sk not NULL, it means we did a successful lookup and incoming
870 * route had to be correct. prequeue might have dropped our dst.
872 if (!sk && !ipv6_unicast_destination(skb))
875 #ifdef CONFIG_TCP_MD5SIG
877 hash_location = tcp_parse_md5sig_option(th);
878 if (sk && sk_fullsock(sk)) {
879 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
880 } else if (hash_location) {
882 * active side is lost. Try to find listening socket through
883 * source port, and then find md5 key through listening socket.
884 * we are not loose security here:
885 * Incoming packet is checked with md5 hash with finding key,
886 * no RST generated if md5 hash doesn't match.
888 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
889 &tcp_hashinfo, NULL, 0,
891 th->source, &ipv6h->daddr,
892 ntohs(th->source), tcp_v6_iif(skb));
896 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
900 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
901 if (genhash || memcmp(hash_location, newhash, 16) != 0)
907 seq = ntohl(th->ack_seq);
909 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
912 oif = sk ? sk->sk_bound_dev_if : 0;
913 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
915 #ifdef CONFIG_TCP_MD5SIG
921 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
922 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
923 struct tcp_md5sig_key *key, u8 tclass,
926 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
930 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
932 struct inet_timewait_sock *tw = inet_twsk(sk);
933 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
935 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
936 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
937 tcp_time_stamp + tcptw->tw_ts_offset,
938 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
939 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
944 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
945 struct request_sock *req)
947 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
948 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
951 * The window field (SEG.WND) of every outgoing segment, with the
952 * exception of <SYN> segments, MUST be right-shifted by
953 * Rcv.Wind.Shift bits:
955 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
956 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
957 tcp_rsk(req)->rcv_nxt,
958 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
959 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
960 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
965 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
967 #ifdef CONFIG_SYN_COOKIES
968 const struct tcphdr *th = tcp_hdr(skb);
971 sk = cookie_v6_check(sk, skb);
976 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
978 if (skb->protocol == htons(ETH_P_IP))
979 return tcp_v4_conn_request(sk, skb);
981 if (!ipv6_unicast_destination(skb))
984 return tcp_conn_request(&tcp6_request_sock_ops,
985 &tcp_request_sock_ipv6_ops, sk, skb);
989 return 0; /* don't send reset */
992 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
993 struct request_sock *req,
994 struct dst_entry *dst,
995 struct request_sock *req_unhash,
998 struct inet_request_sock *ireq;
999 struct ipv6_pinfo *newnp;
1000 const struct ipv6_pinfo *np = inet6_sk(sk);
1001 struct ipv6_txoptions *opt;
1002 struct tcp6_sock *newtcp6sk;
1003 struct inet_sock *newinet;
1004 struct tcp_sock *newtp;
1006 #ifdef CONFIG_TCP_MD5SIG
1007 struct tcp_md5sig_key *key;
1011 if (skb->protocol == htons(ETH_P_IP)) {
1016 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1017 req_unhash, own_req);
1022 newtcp6sk = (struct tcp6_sock *)newsk;
1023 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1025 newinet = inet_sk(newsk);
1026 newnp = inet6_sk(newsk);
1027 newtp = tcp_sk(newsk);
1029 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1031 newnp->saddr = newsk->sk_v6_rcv_saddr;
1033 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1034 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1035 #ifdef CONFIG_TCP_MD5SIG
1036 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1039 newnp->ipv6_ac_list = NULL;
1040 newnp->ipv6_fl_list = NULL;
1041 newnp->pktoptions = NULL;
1043 newnp->mcast_oif = tcp_v6_iif(skb);
1044 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1045 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1047 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1050 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1051 * here, tcp_create_openreq_child now does this for us, see the comment in
1052 * that function for the gory details. -acme
1055 /* It is tricky place. Until this moment IPv4 tcp
1056 worked with IPv6 icsk.icsk_af_ops.
1059 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1064 ireq = inet_rsk(req);
1066 if (sk_acceptq_is_full(sk))
1070 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1075 newsk = tcp_create_openreq_child(sk, req, skb);
1080 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1081 * count here, tcp_create_openreq_child now does this for us, see the
1082 * comment in that function for the gory details. -acme
1085 newsk->sk_gso_type = SKB_GSO_TCPV6;
1086 ip6_dst_store(newsk, dst, NULL, NULL);
1087 inet6_sk_rx_dst_set(newsk, skb);
1089 newtcp6sk = (struct tcp6_sock *)newsk;
1090 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1092 newtp = tcp_sk(newsk);
1093 newinet = inet_sk(newsk);
1094 newnp = inet6_sk(newsk);
1096 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1098 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1099 newnp->saddr = ireq->ir_v6_loc_addr;
1100 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1101 newsk->sk_bound_dev_if = ireq->ir_iif;
1103 /* Now IPv6 options...
1105 First: no IPv4 options.
1107 newinet->inet_opt = NULL;
1108 newnp->ipv6_ac_list = NULL;
1109 newnp->ipv6_fl_list = NULL;
1112 newnp->rxopt.all = np->rxopt.all;
1114 newnp->pktoptions = NULL;
1116 newnp->mcast_oif = tcp_v6_iif(skb);
1117 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1118 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1120 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1122 /* Clone native IPv6 options from listening socket (if any)
1124 Yes, keeping reference count would be much more clever,
1125 but we make one more one thing there: reattach optmem
1128 opt = ireq->ipv6_opt;
1130 opt = rcu_dereference(np->opt);
1132 opt = ipv6_dup_options(newsk, opt);
1133 RCU_INIT_POINTER(newnp->opt, opt);
1135 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1137 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1140 tcp_ca_openreq_child(newsk, dst);
1142 tcp_sync_mss(newsk, dst_mtu(dst));
1143 newtp->advmss = dst_metric_advmss(dst);
1144 if (tcp_sk(sk)->rx_opt.user_mss &&
1145 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1146 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1148 tcp_initialize_rcv_mss(newsk);
1150 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1151 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1153 #ifdef CONFIG_TCP_MD5SIG
1154 /* Copy over the MD5 key from the original socket */
1155 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1157 /* We're using one, so create a matching key
1158 * on the newsk structure. If we fail to get
1159 * memory, then we end up not copying the key
1162 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1163 AF_INET6, key->key, key->keylen,
1164 sk_gfp_mask(sk, GFP_ATOMIC));
1168 if (__inet_inherit_port(sk, newsk) < 0) {
1169 inet_csk_prepare_forced_close(newsk);
1173 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1175 tcp_move_syn(newtp, req);
1177 /* Clone pktoptions received with SYN, if we own the req */
1178 if (ireq->pktopts) {
1179 newnp->pktoptions = skb_clone(ireq->pktopts,
1180 sk_gfp_mask(sk, GFP_ATOMIC));
1181 consume_skb(ireq->pktopts);
1182 ireq->pktopts = NULL;
1183 if (newnp->pktoptions)
1184 skb_set_owner_r(newnp->pktoptions, newsk);
1191 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1199 static void tcp_v6_restore_cb(struct sk_buff *skb)
1201 /* We need to move header back to the beginning if xfrm6_policy_check()
1202 * and tcp_v6_fill_cb() are going to be called again.
1203 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1205 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1206 sizeof(struct inet6_skb_parm));
1209 /* The socket must have it's spinlock held when we get
1210 * here, unless it is a TCP_LISTEN socket.
1212 * We have a potential double-lock case here, so even when
1213 * doing backlog processing we use the BH locking scheme.
1214 * This is because we cannot sleep with the original spinlock
1217 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1219 struct ipv6_pinfo *np = inet6_sk(sk);
1220 struct tcp_sock *tp;
1221 struct sk_buff *opt_skb = NULL;
1223 /* Imagine: socket is IPv6. IPv4 packet arrives,
1224 goes to IPv4 receive handler and backlogged.
1225 From backlog it always goes here. Kerboom...
1226 Fortunately, tcp_rcv_established and rcv_established
1227 handle them correctly, but it is not case with
1228 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1231 if (skb->protocol == htons(ETH_P_IP))
1232 return tcp_v4_do_rcv(sk, skb);
1234 if (tcp_filter(sk, skb))
1238 * socket locking is here for SMP purposes as backlog rcv
1239 * is currently called with bh processing disabled.
1242 /* Do Stevens' IPV6_PKTOPTIONS.
1244 Yes, guys, it is the only place in our code, where we
1245 may make it not affecting IPv4.
1246 The rest of code is protocol independent,
1247 and I do not like idea to uglify IPv4.
1249 Actually, all the idea behind IPV6_PKTOPTIONS
1250 looks not very well thought. For now we latch
1251 options, received in the last packet, enqueued
1252 by tcp. Feel free to propose better solution.
1256 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1258 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1259 struct dst_entry *dst = sk->sk_rx_dst;
1261 sock_rps_save_rxhash(sk, skb);
1262 sk_mark_napi_id(sk, skb);
1264 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1265 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1267 sk->sk_rx_dst = NULL;
1271 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1273 goto ipv6_pktoptions;
1277 if (tcp_checksum_complete(skb))
1280 if (sk->sk_state == TCP_LISTEN) {
1281 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1287 sock_rps_save_rxhash(nsk, skb);
1288 sk_mark_napi_id(nsk, skb);
1289 if (tcp_child_process(sk, nsk, skb))
1292 __kfree_skb(opt_skb);
1296 sock_rps_save_rxhash(sk, skb);
1298 if (tcp_rcv_state_process(sk, skb))
1301 goto ipv6_pktoptions;
1305 tcp_v6_send_reset(sk, skb);
1308 __kfree_skb(opt_skb);
1312 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1313 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1318 /* Do you ask, what is it?
1320 1. skb was enqueued by tcp.
1321 2. skb is added to tail of read queue, rather than out of order.
1322 3. socket is not in passive state.
1323 4. Finally, it really contains options, which user wants to receive.
1326 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1327 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1328 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1329 np->mcast_oif = tcp_v6_iif(opt_skb);
1330 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1331 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1332 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1333 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1335 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1336 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1337 skb_set_owner_r(opt_skb, sk);
1338 tcp_v6_restore_cb(opt_skb);
1339 opt_skb = xchg(&np->pktoptions, opt_skb);
1341 __kfree_skb(opt_skb);
1342 opt_skb = xchg(&np->pktoptions, NULL);
1350 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1351 const struct tcphdr *th)
1353 /* This is tricky: we move IP6CB at its correct location into
1354 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1355 * _decode_session6() uses IP6CB().
1356 * barrier() makes sure compiler won't play aliasing games.
1358 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1359 sizeof(struct inet6_skb_parm));
1362 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1363 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1364 skb->len - th->doff*4);
1365 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1366 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1367 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1368 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1369 TCP_SKB_CB(skb)->sacked = 0;
1372 static int tcp_v6_rcv(struct sk_buff *skb)
1374 const struct tcphdr *th;
1375 const struct ipv6hdr *hdr;
1379 struct net *net = dev_net(skb->dev);
1381 if (skb->pkt_type != PACKET_HOST)
1385 * Count it even if it's bad.
1387 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1389 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1392 th = (const struct tcphdr *)skb->data;
1394 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1396 if (!pskb_may_pull(skb, th->doff*4))
1399 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1402 th = (const struct tcphdr *)skb->data;
1403 hdr = ipv6_hdr(skb);
1406 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1407 th->source, th->dest, inet6_iif(skb),
1413 if (sk->sk_state == TCP_TIME_WAIT)
1416 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1417 struct request_sock *req = inet_reqsk(sk);
1420 sk = req->rsk_listener;
1421 tcp_v6_fill_cb(skb, hdr, th);
1422 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1423 sk_drops_add(sk, skb);
1427 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1428 inet_csk_reqsk_queue_drop_and_put(sk, req);
1433 nsk = tcp_check_req(sk, skb, req, false);
1436 goto discard_and_relse;
1440 tcp_v6_restore_cb(skb);
1441 } else if (tcp_child_process(sk, nsk, skb)) {
1442 tcp_v6_send_reset(nsk, skb);
1443 goto discard_and_relse;
1449 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1450 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1451 goto discard_and_relse;
1454 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1455 goto discard_and_relse;
1457 tcp_v6_fill_cb(skb, hdr, th);
1459 if (tcp_v6_inbound_md5_hash(sk, skb))
1460 goto discard_and_relse;
1462 if (tcp_filter(sk, skb))
1463 goto discard_and_relse;
1464 th = (const struct tcphdr *)skb->data;
1465 hdr = ipv6_hdr(skb);
1469 if (sk->sk_state == TCP_LISTEN) {
1470 ret = tcp_v6_do_rcv(sk, skb);
1471 goto put_and_return;
1474 sk_incoming_cpu_update(sk);
1476 bh_lock_sock_nested(sk);
1477 tcp_segs_in(tcp_sk(sk), skb);
1479 if (!sock_owned_by_user(sk)) {
1480 if (!tcp_prequeue(sk, skb))
1481 ret = tcp_v6_do_rcv(sk, skb);
1482 } else if (tcp_add_backlog(sk, skb)) {
1483 goto discard_and_relse;
1490 return ret ? -1 : 0;
1493 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1496 tcp_v6_fill_cb(skb, hdr, th);
1498 if (tcp_checksum_complete(skb)) {
1500 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1502 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1504 tcp_v6_send_reset(NULL, skb);
1512 sk_drops_add(sk, skb);
1518 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1519 inet_twsk_put(inet_twsk(sk));
1523 tcp_v6_fill_cb(skb, hdr, th);
1525 if (tcp_checksum_complete(skb)) {
1526 inet_twsk_put(inet_twsk(sk));
1530 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1535 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1536 skb, __tcp_hdrlen(th),
1537 &ipv6_hdr(skb)->saddr, th->source,
1538 &ipv6_hdr(skb)->daddr,
1539 ntohs(th->dest), tcp_v6_iif(skb));
1541 struct inet_timewait_sock *tw = inet_twsk(sk);
1542 inet_twsk_deschedule_put(tw);
1544 tcp_v6_restore_cb(skb);
1548 /* Fall through to ACK */
1551 tcp_v6_timewait_ack(sk, skb);
1554 tcp_v6_restore_cb(skb);
1555 tcp_v6_send_reset(sk, skb);
1556 inet_twsk_deschedule_put(inet_twsk(sk));
1558 case TCP_TW_SUCCESS:
1564 static void tcp_v6_early_demux(struct sk_buff *skb)
1566 const struct ipv6hdr *hdr;
1567 const struct tcphdr *th;
1570 if (skb->pkt_type != PACKET_HOST)
1573 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1576 hdr = ipv6_hdr(skb);
1579 if (th->doff < sizeof(struct tcphdr) / 4)
1582 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1583 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1584 &hdr->saddr, th->source,
1585 &hdr->daddr, ntohs(th->dest),
1589 skb->destructor = sock_edemux;
1590 if (sk_fullsock(sk)) {
1591 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1594 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1596 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1597 skb_dst_set_noref(skb, dst);
1602 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1603 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1604 .twsk_unique = tcp_twsk_unique,
1605 .twsk_destructor = tcp_twsk_destructor,
1608 static const struct inet_connection_sock_af_ops ipv6_specific = {
1609 .queue_xmit = inet6_csk_xmit,
1610 .send_check = tcp_v6_send_check,
1611 .rebuild_header = inet6_sk_rebuild_header,
1612 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1613 .conn_request = tcp_v6_conn_request,
1614 .syn_recv_sock = tcp_v6_syn_recv_sock,
1615 .net_header_len = sizeof(struct ipv6hdr),
1616 .net_frag_header_len = sizeof(struct frag_hdr),
1617 .setsockopt = ipv6_setsockopt,
1618 .getsockopt = ipv6_getsockopt,
1619 .addr2sockaddr = inet6_csk_addr2sockaddr,
1620 .sockaddr_len = sizeof(struct sockaddr_in6),
1621 .bind_conflict = inet6_csk_bind_conflict,
1622 #ifdef CONFIG_COMPAT
1623 .compat_setsockopt = compat_ipv6_setsockopt,
1624 .compat_getsockopt = compat_ipv6_getsockopt,
1626 .mtu_reduced = tcp_v6_mtu_reduced,
1629 #ifdef CONFIG_TCP_MD5SIG
1630 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1631 .md5_lookup = tcp_v6_md5_lookup,
1632 .calc_md5_hash = tcp_v6_md5_hash_skb,
1633 .md5_parse = tcp_v6_parse_md5_keys,
1638 * TCP over IPv4 via INET6 API
1640 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1641 .queue_xmit = ip_queue_xmit,
1642 .send_check = tcp_v4_send_check,
1643 .rebuild_header = inet_sk_rebuild_header,
1644 .sk_rx_dst_set = inet_sk_rx_dst_set,
1645 .conn_request = tcp_v6_conn_request,
1646 .syn_recv_sock = tcp_v6_syn_recv_sock,
1647 .net_header_len = sizeof(struct iphdr),
1648 .setsockopt = ipv6_setsockopt,
1649 .getsockopt = ipv6_getsockopt,
1650 .addr2sockaddr = inet6_csk_addr2sockaddr,
1651 .sockaddr_len = sizeof(struct sockaddr_in6),
1652 .bind_conflict = inet6_csk_bind_conflict,
1653 #ifdef CONFIG_COMPAT
1654 .compat_setsockopt = compat_ipv6_setsockopt,
1655 .compat_getsockopt = compat_ipv6_getsockopt,
1657 .mtu_reduced = tcp_v4_mtu_reduced,
1660 #ifdef CONFIG_TCP_MD5SIG
1661 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1662 .md5_lookup = tcp_v4_md5_lookup,
1663 .calc_md5_hash = tcp_v4_md5_hash_skb,
1664 .md5_parse = tcp_v6_parse_md5_keys,
1668 /* NOTE: A lot of things set to zero explicitly by call to
1669 * sk_alloc() so need not be done here.
1671 static int tcp_v6_init_sock(struct sock *sk)
1673 struct inet_connection_sock *icsk = inet_csk(sk);
1677 icsk->icsk_af_ops = &ipv6_specific;
1679 #ifdef CONFIG_TCP_MD5SIG
1680 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1686 static void tcp_v6_destroy_sock(struct sock *sk)
1688 tcp_v4_destroy_sock(sk);
1689 inet6_destroy_sock(sk);
1692 #ifdef CONFIG_PROC_FS
1693 /* Proc filesystem TCPv6 sock list dumping. */
1694 static void get_openreq6(struct seq_file *seq,
1695 const struct request_sock *req, int i)
1697 long ttd = req->rsk_timer.expires - jiffies;
1698 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1699 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1705 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1706 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1708 src->s6_addr32[0], src->s6_addr32[1],
1709 src->s6_addr32[2], src->s6_addr32[3],
1710 inet_rsk(req)->ir_num,
1711 dest->s6_addr32[0], dest->s6_addr32[1],
1712 dest->s6_addr32[2], dest->s6_addr32[3],
1713 ntohs(inet_rsk(req)->ir_rmt_port),
1715 0, 0, /* could print option size, but that is af dependent. */
1716 1, /* timers active (only the expire timer) */
1717 jiffies_to_clock_t(ttd),
1719 from_kuid_munged(seq_user_ns(seq),
1720 sock_i_uid(req->rsk_listener)),
1721 0, /* non standard timer */
1722 0, /* open_requests have no inode */
1726 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1728 const struct in6_addr *dest, *src;
1731 unsigned long timer_expires;
1732 const struct inet_sock *inet = inet_sk(sp);
1733 const struct tcp_sock *tp = tcp_sk(sp);
1734 const struct inet_connection_sock *icsk = inet_csk(sp);
1735 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1739 dest = &sp->sk_v6_daddr;
1740 src = &sp->sk_v6_rcv_saddr;
1741 destp = ntohs(inet->inet_dport);
1742 srcp = ntohs(inet->inet_sport);
1744 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1745 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1746 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1748 timer_expires = icsk->icsk_timeout;
1749 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1751 timer_expires = icsk->icsk_timeout;
1752 } else if (timer_pending(&sp->sk_timer)) {
1754 timer_expires = sp->sk_timer.expires;
1757 timer_expires = jiffies;
1760 state = sk_state_load(sp);
1761 if (state == TCP_LISTEN)
1762 rx_queue = sp->sk_ack_backlog;
1764 /* Because we don't lock the socket,
1765 * we might find a transient negative value.
1767 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1770 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1771 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1773 src->s6_addr32[0], src->s6_addr32[1],
1774 src->s6_addr32[2], src->s6_addr32[3], srcp,
1775 dest->s6_addr32[0], dest->s6_addr32[1],
1776 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1778 tp->write_seq - tp->snd_una,
1781 jiffies_delta_to_clock_t(timer_expires - jiffies),
1782 icsk->icsk_retransmits,
1783 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1784 icsk->icsk_probes_out,
1786 atomic_read(&sp->sk_refcnt), sp,
1787 jiffies_to_clock_t(icsk->icsk_rto),
1788 jiffies_to_clock_t(icsk->icsk_ack.ato),
1789 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1791 state == TCP_LISTEN ?
1792 fastopenq->max_qlen :
1793 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1797 static void get_timewait6_sock(struct seq_file *seq,
1798 struct inet_timewait_sock *tw, int i)
1800 long delta = tw->tw_timer.expires - jiffies;
1801 const struct in6_addr *dest, *src;
1804 dest = &tw->tw_v6_daddr;
1805 src = &tw->tw_v6_rcv_saddr;
1806 destp = ntohs(tw->tw_dport);
1807 srcp = ntohs(tw->tw_sport);
1810 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1811 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1813 src->s6_addr32[0], src->s6_addr32[1],
1814 src->s6_addr32[2], src->s6_addr32[3], srcp,
1815 dest->s6_addr32[0], dest->s6_addr32[1],
1816 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1817 tw->tw_substate, 0, 0,
1818 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1819 atomic_read(&tw->tw_refcnt), tw);
1822 static int tcp6_seq_show(struct seq_file *seq, void *v)
1824 struct tcp_iter_state *st;
1825 struct sock *sk = v;
1827 if (v == SEQ_START_TOKEN) {
1832 "st tx_queue rx_queue tr tm->when retrnsmt"
1833 " uid timeout inode\n");
1838 if (sk->sk_state == TCP_TIME_WAIT)
1839 get_timewait6_sock(seq, v, st->num);
1840 else if (sk->sk_state == TCP_NEW_SYN_RECV)
1841 get_openreq6(seq, v, st->num);
1843 get_tcp6_sock(seq, v, st->num);
1848 static const struct file_operations tcp6_afinfo_seq_fops = {
1849 .owner = THIS_MODULE,
1850 .open = tcp_seq_open,
1852 .llseek = seq_lseek,
1853 .release = seq_release_net
1856 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1859 .seq_fops = &tcp6_afinfo_seq_fops,
1861 .show = tcp6_seq_show,
1865 int __net_init tcp6_proc_init(struct net *net)
1867 return tcp_proc_register(net, &tcp6_seq_afinfo);
1870 void tcp6_proc_exit(struct net *net)
1872 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1876 struct proto tcpv6_prot = {
1878 .owner = THIS_MODULE,
1880 .connect = tcp_v6_connect,
1881 .disconnect = tcp_disconnect,
1882 .accept = inet_csk_accept,
1884 .init = tcp_v6_init_sock,
1885 .destroy = tcp_v6_destroy_sock,
1886 .shutdown = tcp_shutdown,
1887 .setsockopt = tcp_setsockopt,
1888 .getsockopt = tcp_getsockopt,
1889 .recvmsg = tcp_recvmsg,
1890 .sendmsg = tcp_sendmsg,
1891 .sendpage = tcp_sendpage,
1892 .backlog_rcv = tcp_v6_do_rcv,
1893 .release_cb = tcp_release_cb,
1895 .unhash = inet_unhash,
1896 .get_port = inet_csk_get_port,
1897 .enter_memory_pressure = tcp_enter_memory_pressure,
1898 .stream_memory_free = tcp_stream_memory_free,
1899 .sockets_allocated = &tcp_sockets_allocated,
1900 .memory_allocated = &tcp_memory_allocated,
1901 .memory_pressure = &tcp_memory_pressure,
1902 .orphan_count = &tcp_orphan_count,
1903 .sysctl_mem = sysctl_tcp_mem,
1904 .sysctl_wmem = sysctl_tcp_wmem,
1905 .sysctl_rmem = sysctl_tcp_rmem,
1906 .max_header = MAX_TCP_HEADER,
1907 .obj_size = sizeof(struct tcp6_sock),
1908 .slab_flags = SLAB_DESTROY_BY_RCU,
1909 .twsk_prot = &tcp6_timewait_sock_ops,
1910 .rsk_prot = &tcp6_request_sock_ops,
1911 .h.hashinfo = &tcp_hashinfo,
1912 .no_autobind = true,
1913 #ifdef CONFIG_COMPAT
1914 .compat_setsockopt = compat_tcp_setsockopt,
1915 .compat_getsockopt = compat_tcp_getsockopt,
1917 .diag_destroy = tcp_abort,
1920 static const struct inet6_protocol tcpv6_protocol = {
1921 .early_demux = tcp_v6_early_demux,
1922 .handler = tcp_v6_rcv,
1923 .err_handler = tcp_v6_err,
1924 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1927 static struct inet_protosw tcpv6_protosw = {
1928 .type = SOCK_STREAM,
1929 .protocol = IPPROTO_TCP,
1930 .prot = &tcpv6_prot,
1931 .ops = &inet6_stream_ops,
1932 .flags = INET_PROTOSW_PERMANENT |
1936 static int __net_init tcpv6_net_init(struct net *net)
1938 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1939 SOCK_RAW, IPPROTO_TCP, net);
1942 static void __net_exit tcpv6_net_exit(struct net *net)
1944 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1947 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1949 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1952 static struct pernet_operations tcpv6_net_ops = {
1953 .init = tcpv6_net_init,
1954 .exit = tcpv6_net_exit,
1955 .exit_batch = tcpv6_net_exit_batch,
1958 int __init tcpv6_init(void)
1962 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1966 /* register inet6 protocol */
1967 ret = inet6_register_protosw(&tcpv6_protosw);
1969 goto out_tcpv6_protocol;
1971 ret = register_pernet_subsys(&tcpv6_net_ops);
1973 goto out_tcpv6_protosw;
1978 inet6_unregister_protosw(&tcpv6_protosw);
1980 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1984 void tcpv6_exit(void)
1986 unregister_pernet_subsys(&tcpv6_net_ops);
1987 inet6_unregister_protosw(&tcpv6_protosw);
1988 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);