3 * Linux INET6 implementation
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
109 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
110 ipv6_hdr(skb)->saddr.s6_addr32,
112 tcp_hdr(skb)->source);
115 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
118 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
119 struct inet_sock *inet = inet_sk(sk);
120 struct inet_connection_sock *icsk = inet_csk(sk);
121 struct ipv6_pinfo *np = inet6_sk(sk);
122 struct tcp_sock *tp = tcp_sk(sk);
123 struct in6_addr *saddr = NULL, *final_p, final;
126 struct dst_entry *dst;
130 if (addr_len < SIN6_LEN_RFC2133)
133 if (usin->sin6_family != AF_INET6)
134 return -EAFNOSUPPORT;
136 memset(&fl6, 0, sizeof(fl6));
139 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
140 IP6_ECN_flow_init(fl6.flowlabel);
141 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
142 struct ip6_flowlabel *flowlabel;
143 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
146 fl6_sock_release(flowlabel);
151 * connect() to INADDR_ANY means loopback (BSD'ism).
154 if (ipv6_addr_any(&usin->sin6_addr))
155 usin->sin6_addr.s6_addr[15] = 0x1;
157 addr_type = ipv6_addr_type(&usin->sin6_addr);
159 if (addr_type & IPV6_ADDR_MULTICAST)
162 if (addr_type&IPV6_ADDR_LINKLOCAL) {
163 if (addr_len >= sizeof(struct sockaddr_in6) &&
164 usin->sin6_scope_id) {
165 /* If interface is set while binding, indices
168 if (sk->sk_bound_dev_if &&
169 sk->sk_bound_dev_if != usin->sin6_scope_id)
172 sk->sk_bound_dev_if = usin->sin6_scope_id;
175 /* Connect to link-local address requires an interface */
176 if (!sk->sk_bound_dev_if)
180 if (tp->rx_opt.ts_recent_stamp &&
181 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
182 tp->rx_opt.ts_recent = 0;
183 tp->rx_opt.ts_recent_stamp = 0;
187 sk->sk_v6_daddr = usin->sin6_addr;
188 np->flow_label = fl6.flowlabel;
194 if (addr_type == IPV6_ADDR_MAPPED) {
195 u32 exthdrlen = icsk->icsk_ext_hdr_len;
196 struct sockaddr_in sin;
198 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
200 if (__ipv6_only_sock(sk))
203 sin.sin_family = AF_INET;
204 sin.sin_port = usin->sin6_port;
205 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
207 icsk->icsk_af_ops = &ipv6_mapped;
208 sk->sk_backlog_rcv = tcp_v4_do_rcv;
209 #ifdef CONFIG_TCP_MD5SIG
210 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
213 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
216 icsk->icsk_ext_hdr_len = exthdrlen;
217 icsk->icsk_af_ops = &ipv6_specific;
218 sk->sk_backlog_rcv = tcp_v6_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220 tp->af_specific = &tcp_sock_ipv6_specific;
224 np->saddr = sk->sk_v6_rcv_saddr;
229 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
230 saddr = &sk->sk_v6_rcv_saddr;
232 fl6.flowi6_proto = IPPROTO_TCP;
233 fl6.daddr = sk->sk_v6_daddr;
234 fl6.saddr = saddr ? *saddr : np->saddr;
235 fl6.flowi6_oif = sk->sk_bound_dev_if;
236 fl6.flowi6_mark = sk->sk_mark;
237 fl6.fl6_dport = usin->sin6_port;
238 fl6.fl6_sport = inet->inet_sport;
240 final_p = fl6_update_dst(&fl6, np->opt, &final);
242 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
244 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
252 sk->sk_v6_rcv_saddr = *saddr;
255 /* set the source address */
257 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
259 sk->sk_gso_type = SKB_GSO_TCPV6;
260 __ip6_dst_store(sk, dst, NULL, NULL);
262 rt = (struct rt6_info *) dst;
263 if (tcp_death_row.sysctl_tw_recycle &&
264 !tp->rx_opt.ts_recent_stamp &&
265 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
266 tcp_fetch_timewait_stamp(sk, dst);
268 icsk->icsk_ext_hdr_len = 0;
270 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
273 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
275 inet->inet_dport = usin->sin6_port;
277 tcp_set_state(sk, TCP_SYN_SENT);
278 err = inet6_hash_connect(&tcp_death_row, sk);
284 if (!tp->write_seq && likely(!tp->repair))
285 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
286 sk->sk_v6_daddr.s6_addr32,
290 err = tcp_connect(sk);
297 tcp_set_state(sk, TCP_CLOSE);
300 inet->inet_dport = 0;
301 sk->sk_route_caps = 0;
305 static void tcp_v6_mtu_reduced(struct sock *sk)
307 struct dst_entry *dst;
309 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
312 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
316 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
317 tcp_sync_mss(sk, dst_mtu(dst));
318 tcp_simple_retransmit(sk);
322 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 u8 type, u8 code, int offset, __be32 info)
325 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
326 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
327 struct net *net = dev_net(skb->dev);
328 struct request_sock *fastopen;
329 struct ipv6_pinfo *np;
335 sk = __inet6_lookup_established(net, &tcp_hashinfo,
336 &hdr->daddr, th->dest,
337 &hdr->saddr, ntohs(th->source),
341 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
346 if (sk->sk_state == TCP_TIME_WAIT) {
347 inet_twsk_put(inet_twsk(sk));
350 seq = ntohl(th->seq);
351 if (sk->sk_state == TCP_NEW_SYN_RECV)
352 return tcp_req_err(sk, seq);
355 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
356 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
358 if (sk->sk_state == TCP_CLOSE)
361 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
362 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
368 fastopen = tp->fastopen_rsk;
369 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
370 if (sk->sk_state != TCP_LISTEN &&
371 !between(seq, snd_una, tp->snd_nxt)) {
372 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
378 if (type == NDISC_REDIRECT) {
379 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
382 dst->ops->redirect(dst, sk, skb);
386 if (type == ICMPV6_PKT_TOOBIG) {
387 /* We are not interested in TCP_LISTEN and open_requests
388 * (SYN-ACKs send out by Linux are always <576bytes so
389 * they should go through unfragmented).
391 if (sk->sk_state == TCP_LISTEN)
394 if (!ip6_sk_accept_pmtu(sk))
397 tp->mtu_info = ntohl(info);
398 if (!sock_owned_by_user(sk))
399 tcp_v6_mtu_reduced(sk);
400 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
406 icmpv6_err_convert(type, code, &err);
408 /* Might be for an request_sock */
409 switch (sk->sk_state) {
412 /* Only in fast or simultaneous open. If a fast open socket is
413 * is already accepted it is treated as a connected one below.
415 if (fastopen && !fastopen->sk)
418 if (!sock_owned_by_user(sk)) {
420 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
424 sk->sk_err_soft = err;
428 if (!sock_owned_by_user(sk) && np->recverr) {
430 sk->sk_error_report(sk);
432 sk->sk_err_soft = err;
440 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
442 struct request_sock *req,
444 struct tcp_fastopen_cookie *foc)
446 struct inet_request_sock *ireq = inet_rsk(req);
447 struct ipv6_pinfo *np = inet6_sk(sk);
448 struct flowi6 *fl6 = &fl->u.ip6;
452 /* First, grab a route. */
453 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
456 skb = tcp_make_synack(sk, dst, req, foc);
459 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 &ireq->ir_v6_rmt_addr);
462 fl6->daddr = ireq->ir_v6_rmt_addr;
463 if (np->repflow && ireq->pktopts)
464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
466 skb_set_queue_mapping(skb, queue_mapping);
467 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
468 err = net_xmit_eval(err);
476 static void tcp_v6_reqsk_destructor(struct request_sock *req)
478 kfree_skb(inet_rsk(req)->pktopts);
481 #ifdef CONFIG_TCP_MD5SIG
482 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
483 const struct in6_addr *addr)
485 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
488 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
489 const struct sock *addr_sk)
491 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
494 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
497 struct tcp_md5sig cmd;
498 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
500 if (optlen < sizeof(cmd))
503 if (copy_from_user(&cmd, optval, sizeof(cmd)))
506 if (sin6->sin6_family != AF_INET6)
509 if (!cmd.tcpm_keylen) {
510 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
511 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
513 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
517 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
520 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
522 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
525 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
528 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
529 const struct in6_addr *daddr,
530 const struct in6_addr *saddr, int nbytes)
532 struct tcp6_pseudohdr *bp;
533 struct scatterlist sg;
535 bp = &hp->md5_blk.ip6;
536 /* 1. TCP pseudo-header (RFC2460) */
539 bp->protocol = cpu_to_be32(IPPROTO_TCP);
540 bp->len = cpu_to_be32(nbytes);
542 sg_init_one(&sg, bp, sizeof(*bp));
543 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
546 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
547 const struct in6_addr *daddr, struct in6_addr *saddr,
548 const struct tcphdr *th)
550 struct tcp_md5sig_pool *hp;
551 struct hash_desc *desc;
553 hp = tcp_get_md5sig_pool();
555 goto clear_hash_noput;
556 desc = &hp->md5_desc;
558 if (crypto_hash_init(desc))
560 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
562 if (tcp_md5_hash_header(hp, th))
564 if (tcp_md5_hash_key(hp, key))
566 if (crypto_hash_final(desc, md5_hash))
569 tcp_put_md5sig_pool();
573 tcp_put_md5sig_pool();
575 memset(md5_hash, 0, 16);
579 static int tcp_v6_md5_hash_skb(char *md5_hash,
580 const struct tcp_md5sig_key *key,
581 const struct sock *sk,
582 const struct sk_buff *skb)
584 const struct in6_addr *saddr, *daddr;
585 struct tcp_md5sig_pool *hp;
586 struct hash_desc *desc;
587 const struct tcphdr *th = tcp_hdr(skb);
589 if (sk) { /* valid for establish/request sockets */
590 saddr = &sk->sk_v6_rcv_saddr;
591 daddr = &sk->sk_v6_daddr;
593 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
594 saddr = &ip6h->saddr;
595 daddr = &ip6h->daddr;
598 hp = tcp_get_md5sig_pool();
600 goto clear_hash_noput;
601 desc = &hp->md5_desc;
603 if (crypto_hash_init(desc))
606 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
608 if (tcp_md5_hash_header(hp, th))
610 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
612 if (tcp_md5_hash_key(hp, key))
614 if (crypto_hash_final(desc, md5_hash))
617 tcp_put_md5sig_pool();
621 tcp_put_md5sig_pool();
623 memset(md5_hash, 0, 16);
627 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
629 const __u8 *hash_location = NULL;
630 struct tcp_md5sig_key *hash_expected;
631 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
632 const struct tcphdr *th = tcp_hdr(skb);
636 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
637 hash_location = tcp_parse_md5sig_option(th);
639 /* We've parsed the options - do we have a hash? */
640 if (!hash_expected && !hash_location)
643 if (hash_expected && !hash_location) {
644 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
648 if (!hash_expected && hash_location) {
649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
653 /* check the signature */
654 genhash = tcp_v6_md5_hash_skb(newhash,
658 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
659 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
660 genhash ? "failed" : "mismatch",
661 &ip6h->saddr, ntohs(th->source),
662 &ip6h->daddr, ntohs(th->dest));
669 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
672 struct inet_request_sock *ireq = inet_rsk(req);
673 struct ipv6_pinfo *np = inet6_sk(sk);
675 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
676 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
678 /* So that link locals have meaning */
679 if (!sk->sk_bound_dev_if &&
680 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
681 ireq->ir_iif = tcp_v6_iif(skb);
683 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
684 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
685 np->rxopt.bits.rxinfo ||
686 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
687 np->rxopt.bits.rxohlim || np->repflow)) {
688 atomic_inc(&skb->users);
693 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
694 const struct request_sock *req,
699 return inet6_csk_route_req(sk, &fl->u.ip6, req);
702 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
704 .obj_size = sizeof(struct tcp6_request_sock),
705 .rtx_syn_ack = tcp_rtx_synack,
706 .send_ack = tcp_v6_reqsk_send_ack,
707 .destructor = tcp_v6_reqsk_destructor,
708 .send_reset = tcp_v6_send_reset,
709 .syn_ack_timeout = tcp_syn_ack_timeout,
712 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
713 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
714 sizeof(struct ipv6hdr),
715 #ifdef CONFIG_TCP_MD5SIG
716 .req_md5_lookup = tcp_v6_md5_lookup,
717 .calc_md5_hash = tcp_v6_md5_hash_skb,
719 .init_req = tcp_v6_init_req,
720 #ifdef CONFIG_SYN_COOKIES
721 .cookie_init_seq = cookie_v6_init_sequence,
723 .route_req = tcp_v6_route_req,
724 .init_seq = tcp_v6_init_sequence,
725 .send_synack = tcp_v6_send_synack,
726 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
729 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
730 u32 ack, u32 win, u32 tsval, u32 tsecr,
731 int oif, struct tcp_md5sig_key *key, int rst,
732 u8 tclass, u32 label)
734 const struct tcphdr *th = tcp_hdr(skb);
736 struct sk_buff *buff;
738 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
739 struct sock *ctl_sk = net->ipv6.tcp_sk;
740 unsigned int tot_len = sizeof(struct tcphdr);
741 struct dst_entry *dst;
745 tot_len += TCPOLEN_TSTAMP_ALIGNED;
746 #ifdef CONFIG_TCP_MD5SIG
748 tot_len += TCPOLEN_MD5SIG_ALIGNED;
751 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
756 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
758 t1 = (struct tcphdr *) skb_push(buff, tot_len);
759 skb_reset_transport_header(buff);
761 /* Swap the send and the receive. */
762 memset(t1, 0, sizeof(*t1));
763 t1->dest = th->source;
764 t1->source = th->dest;
765 t1->doff = tot_len / 4;
766 t1->seq = htonl(seq);
767 t1->ack_seq = htonl(ack);
768 t1->ack = !rst || !th->ack;
770 t1->window = htons(win);
772 topt = (__be32 *)(t1 + 1);
775 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
776 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
777 *topt++ = htonl(tsval);
778 *topt++ = htonl(tsecr);
781 #ifdef CONFIG_TCP_MD5SIG
783 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
784 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
785 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
786 &ipv6_hdr(skb)->saddr,
787 &ipv6_hdr(skb)->daddr, t1);
791 memset(&fl6, 0, sizeof(fl6));
792 fl6.daddr = ipv6_hdr(skb)->saddr;
793 fl6.saddr = ipv6_hdr(skb)->daddr;
794 fl6.flowlabel = label;
796 buff->ip_summed = CHECKSUM_PARTIAL;
799 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
801 fl6.flowi6_proto = IPPROTO_TCP;
802 if (rt6_need_strict(&fl6.daddr) && !oif)
803 fl6.flowi6_oif = tcp_v6_iif(skb);
805 fl6.flowi6_oif = oif;
806 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
807 fl6.fl6_dport = t1->dest;
808 fl6.fl6_sport = t1->source;
809 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
811 /* Pass a socket to ip6_dst_lookup either it is for RST
812 * Underlying function will use this to retrieve the network
815 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
817 skb_dst_set(buff, dst);
818 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
819 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
821 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
828 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
830 const struct tcphdr *th = tcp_hdr(skb);
831 u32 seq = 0, ack_seq = 0;
832 struct tcp_md5sig_key *key = NULL;
833 #ifdef CONFIG_TCP_MD5SIG
834 const __u8 *hash_location = NULL;
835 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
836 unsigned char newhash[16];
838 struct sock *sk1 = NULL;
845 /* If sk not NULL, it means we did a successful lookup and incoming
846 * route had to be correct. prequeue might have dropped our dst.
848 if (!sk && !ipv6_unicast_destination(skb))
851 #ifdef CONFIG_TCP_MD5SIG
852 hash_location = tcp_parse_md5sig_option(th);
853 if (!sk && hash_location) {
855 * active side is lost. Try to find listening socket through
856 * source port, and then find md5 key through listening socket.
857 * we are not loose security here:
858 * Incoming packet is checked with md5 hash with finding key,
859 * no RST generated if md5 hash doesn't match.
861 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
862 &tcp_hashinfo, &ipv6h->saddr,
863 th->source, &ipv6h->daddr,
864 ntohs(th->source), tcp_v6_iif(skb));
869 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
873 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
874 if (genhash || memcmp(hash_location, newhash, 16) != 0)
877 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
882 seq = ntohl(th->ack_seq);
884 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
887 oif = sk ? sk->sk_bound_dev_if : 0;
888 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
890 #ifdef CONFIG_TCP_MD5SIG
899 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
900 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
901 struct tcp_md5sig_key *key, u8 tclass,
904 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
908 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
910 struct inet_timewait_sock *tw = inet_twsk(sk);
911 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
913 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
914 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
915 tcp_time_stamp + tcptw->tw_ts_offset,
916 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
917 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
922 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
923 struct request_sock *req)
925 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
926 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
928 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
929 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
930 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
931 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
932 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
937 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
939 const struct tcphdr *th = tcp_hdr(skb);
940 struct request_sock *req;
943 /* Find possible connection requests. */
944 req = inet6_csk_search_req(sk, th->source,
945 &ipv6_hdr(skb)->saddr,
946 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
948 nsk = tcp_check_req(sk, skb, req, false);
953 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
954 &ipv6_hdr(skb)->saddr, th->source,
955 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
959 if (nsk->sk_state != TCP_TIME_WAIT) {
963 inet_twsk_put(inet_twsk(nsk));
967 #ifdef CONFIG_SYN_COOKIES
969 sk = cookie_v6_check(sk, skb);
974 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
976 if (skb->protocol == htons(ETH_P_IP))
977 return tcp_v4_conn_request(sk, skb);
979 if (!ipv6_unicast_destination(skb))
982 return tcp_conn_request(&tcp6_request_sock_ops,
983 &tcp_request_sock_ipv6_ops, sk, skb);
986 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
987 return 0; /* don't send reset */
990 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
991 struct request_sock *req,
992 struct dst_entry *dst)
994 struct inet_request_sock *ireq;
995 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
996 struct tcp6_sock *newtcp6sk;
997 struct inet_sock *newinet;
998 struct tcp_sock *newtp;
1000 #ifdef CONFIG_TCP_MD5SIG
1001 struct tcp_md5sig_key *key;
1005 if (skb->protocol == htons(ETH_P_IP)) {
1010 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1015 newtcp6sk = (struct tcp6_sock *)newsk;
1016 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1018 newinet = inet_sk(newsk);
1019 newnp = inet6_sk(newsk);
1020 newtp = tcp_sk(newsk);
1022 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1024 newnp->saddr = newsk->sk_v6_rcv_saddr;
1026 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1027 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1028 #ifdef CONFIG_TCP_MD5SIG
1029 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1032 newnp->ipv6_ac_list = NULL;
1033 newnp->ipv6_fl_list = NULL;
1034 newnp->pktoptions = NULL;
1036 newnp->mcast_oif = tcp_v6_iif(skb);
1037 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1038 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1040 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1043 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1044 * here, tcp_create_openreq_child now does this for us, see the comment in
1045 * that function for the gory details. -acme
1048 /* It is tricky place. Until this moment IPv4 tcp
1049 worked with IPv6 icsk.icsk_af_ops.
1052 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1057 ireq = inet_rsk(req);
1059 if (sk_acceptq_is_full(sk))
1063 dst = inet6_csk_route_req(sk, &fl6, req);
1068 newsk = tcp_create_openreq_child(sk, req, skb);
1073 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1074 * count here, tcp_create_openreq_child now does this for us, see the
1075 * comment in that function for the gory details. -acme
1078 newsk->sk_gso_type = SKB_GSO_TCPV6;
1079 __ip6_dst_store(newsk, dst, NULL, NULL);
1080 inet6_sk_rx_dst_set(newsk, skb);
1082 newtcp6sk = (struct tcp6_sock *)newsk;
1083 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1085 newtp = tcp_sk(newsk);
1086 newinet = inet_sk(newsk);
1087 newnp = inet6_sk(newsk);
1089 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1091 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1092 newnp->saddr = ireq->ir_v6_loc_addr;
1093 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1094 newsk->sk_bound_dev_if = ireq->ir_iif;
1096 ip6_set_txhash(newsk);
1098 /* Now IPv6 options...
1100 First: no IPv4 options.
1102 newinet->inet_opt = NULL;
1103 newnp->ipv6_ac_list = NULL;
1104 newnp->ipv6_fl_list = NULL;
1107 newnp->rxopt.all = np->rxopt.all;
1109 /* Clone pktoptions received with SYN */
1110 newnp->pktoptions = NULL;
1111 if (ireq->pktopts) {
1112 newnp->pktoptions = skb_clone(ireq->pktopts,
1113 sk_gfp_atomic(sk, GFP_ATOMIC));
1114 consume_skb(ireq->pktopts);
1115 ireq->pktopts = NULL;
1116 if (newnp->pktoptions)
1117 skb_set_owner_r(newnp->pktoptions, newsk);
1120 newnp->mcast_oif = tcp_v6_iif(skb);
1121 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1122 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1124 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1126 /* Clone native IPv6 options from listening socket (if any)
1128 Yes, keeping reference count would be much more clever,
1129 but we make one more one thing there: reattach optmem
1133 newnp->opt = ipv6_dup_options(newsk, np->opt);
1135 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1137 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1138 newnp->opt->opt_flen);
1140 tcp_ca_openreq_child(newsk, dst);
1142 tcp_sync_mss(newsk, dst_mtu(dst));
1143 newtp->advmss = dst_metric_advmss(dst);
1144 if (tcp_sk(sk)->rx_opt.user_mss &&
1145 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1146 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1148 tcp_initialize_rcv_mss(newsk);
1150 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1151 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1153 #ifdef CONFIG_TCP_MD5SIG
1154 /* Copy over the MD5 key from the original socket */
1155 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1157 /* We're using one, so create a matching key
1158 * on the newsk structure. If we fail to get
1159 * memory, then we end up not copying the key
1162 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1163 AF_INET6, key->key, key->keylen,
1164 sk_gfp_atomic(sk, GFP_ATOMIC));
1168 if (__inet_inherit_port(sk, newsk) < 0) {
1169 inet_csk_prepare_forced_close(newsk);
1173 __inet_hash(newsk, NULL);
1178 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1182 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1186 /* The socket must have it's spinlock held when we get
1189 * We have a potential double-lock case here, so even when
1190 * doing backlog processing we use the BH locking scheme.
1191 * This is because we cannot sleep with the original spinlock
1194 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1196 struct ipv6_pinfo *np = inet6_sk(sk);
1197 struct tcp_sock *tp;
1198 struct sk_buff *opt_skb = NULL;
1200 /* Imagine: socket is IPv6. IPv4 packet arrives,
1201 goes to IPv4 receive handler and backlogged.
1202 From backlog it always goes here. Kerboom...
1203 Fortunately, tcp_rcv_established and rcv_established
1204 handle them correctly, but it is not case with
1205 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1208 if (skb->protocol == htons(ETH_P_IP))
1209 return tcp_v4_do_rcv(sk, skb);
1211 if (sk_filter(sk, skb))
1215 * socket locking is here for SMP purposes as backlog rcv
1216 * is currently called with bh processing disabled.
1219 /* Do Stevens' IPV6_PKTOPTIONS.
1221 Yes, guys, it is the only place in our code, where we
1222 may make it not affecting IPv4.
1223 The rest of code is protocol independent,
1224 and I do not like idea to uglify IPv4.
1226 Actually, all the idea behind IPV6_PKTOPTIONS
1227 looks not very well thought. For now we latch
1228 options, received in the last packet, enqueued
1229 by tcp. Feel free to propose better solution.
1233 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1235 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1236 struct dst_entry *dst = sk->sk_rx_dst;
1238 sock_rps_save_rxhash(sk, skb);
1239 sk_mark_napi_id(sk, skb);
1241 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1242 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1244 sk->sk_rx_dst = NULL;
1248 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1250 goto ipv6_pktoptions;
1254 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1257 if (sk->sk_state == TCP_LISTEN) {
1258 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1263 * Queue it on the new socket if the new socket is active,
1264 * otherwise we just shortcircuit this and continue with
1268 sock_rps_save_rxhash(nsk, skb);
1269 sk_mark_napi_id(sk, skb);
1270 if (tcp_child_process(sk, nsk, skb))
1273 __kfree_skb(opt_skb);
1277 sock_rps_save_rxhash(sk, skb);
1279 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1282 goto ipv6_pktoptions;
1286 tcp_v6_send_reset(sk, skb);
1289 __kfree_skb(opt_skb);
1293 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1294 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1299 /* Do you ask, what is it?
1301 1. skb was enqueued by tcp.
1302 2. skb is added to tail of read queue, rather than out of order.
1303 3. socket is not in passive state.
1304 4. Finally, it really contains options, which user wants to receive.
1307 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1308 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1309 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1310 np->mcast_oif = tcp_v6_iif(opt_skb);
1311 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1312 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1313 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1314 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1316 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1317 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1318 skb_set_owner_r(opt_skb, sk);
1319 opt_skb = xchg(&np->pktoptions, opt_skb);
1321 __kfree_skb(opt_skb);
1322 opt_skb = xchg(&np->pktoptions, NULL);
1330 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1331 const struct tcphdr *th)
1333 /* This is tricky: we move IP6CB at its correct location into
1334 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1335 * _decode_session6() uses IP6CB().
1336 * barrier() makes sure compiler won't play aliasing games.
1338 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1339 sizeof(struct inet6_skb_parm));
1342 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1343 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1344 skb->len - th->doff*4);
1345 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1346 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1347 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1348 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1349 TCP_SKB_CB(skb)->sacked = 0;
1352 static void tcp_v6_restore_cb(struct sk_buff *skb)
1354 /* We need to move header back to the beginning if xfrm6_policy_check()
1355 * and tcp_v6_fill_cb() are going to be called again.
1357 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1358 sizeof(struct inet6_skb_parm));
1361 static int tcp_v6_rcv(struct sk_buff *skb)
1363 const struct tcphdr *th;
1364 const struct ipv6hdr *hdr;
1367 struct net *net = dev_net(skb->dev);
1369 if (skb->pkt_type != PACKET_HOST)
1373 * Count it even if it's bad.
1375 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1377 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1382 if (th->doff < sizeof(struct tcphdr)/4)
1384 if (!pskb_may_pull(skb, th->doff*4))
1387 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1391 hdr = ipv6_hdr(skb);
1393 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1399 if (sk->sk_state == TCP_TIME_WAIT)
1402 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1403 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1404 goto discard_and_relse;
1407 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1408 goto discard_and_relse;
1410 tcp_v6_fill_cb(skb, hdr, th);
1412 #ifdef CONFIG_TCP_MD5SIG
1413 if (tcp_v6_inbound_md5_hash(sk, skb))
1414 goto discard_and_relse;
1417 if (sk_filter(sk, skb))
1418 goto discard_and_relse;
1420 sk_incoming_cpu_update(sk);
1423 bh_lock_sock_nested(sk);
1425 if (!sock_owned_by_user(sk)) {
1426 if (!tcp_prequeue(sk, skb))
1427 ret = tcp_v6_do_rcv(sk, skb);
1428 } else if (unlikely(sk_add_backlog(sk, skb,
1429 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1431 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1432 goto discard_and_relse;
1437 return ret ? -1 : 0;
1440 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1443 tcp_v6_fill_cb(skb, hdr, th);
1445 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1447 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1449 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1451 tcp_v6_send_reset(NULL, skb);
1463 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1464 inet_twsk_put(inet_twsk(sk));
1468 tcp_v6_fill_cb(skb, hdr, th);
1470 if (skb->len < (th->doff<<2)) {
1471 inet_twsk_put(inet_twsk(sk));
1474 if (tcp_checksum_complete(skb)) {
1475 inet_twsk_put(inet_twsk(sk));
1479 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1484 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1485 &ipv6_hdr(skb)->saddr, th->source,
1486 &ipv6_hdr(skb)->daddr,
1487 ntohs(th->dest), tcp_v6_iif(skb));
1489 struct inet_timewait_sock *tw = inet_twsk(sk);
1490 inet_twsk_deschedule(tw);
1493 tcp_v6_restore_cb(skb);
1496 /* Fall through to ACK */
1499 tcp_v6_timewait_ack(sk, skb);
1502 tcp_v6_restore_cb(skb);
1504 case TCP_TW_SUCCESS:
1510 static void tcp_v6_early_demux(struct sk_buff *skb)
1512 const struct ipv6hdr *hdr;
1513 const struct tcphdr *th;
1516 if (skb->pkt_type != PACKET_HOST)
1519 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1522 hdr = ipv6_hdr(skb);
1525 if (th->doff < sizeof(struct tcphdr) / 4)
1528 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1529 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1530 &hdr->saddr, th->source,
1531 &hdr->daddr, ntohs(th->dest),
1535 skb->destructor = sock_edemux;
1536 if (sk_fullsock(sk)) {
1537 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1540 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1542 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1543 skb_dst_set_noref(skb, dst);
1548 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1549 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1550 .twsk_unique = tcp_twsk_unique,
1551 .twsk_destructor = tcp_twsk_destructor,
1554 static const struct inet_connection_sock_af_ops ipv6_specific = {
1555 .queue_xmit = inet6_csk_xmit,
1556 .send_check = tcp_v6_send_check,
1557 .rebuild_header = inet6_sk_rebuild_header,
1558 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1559 .conn_request = tcp_v6_conn_request,
1560 .syn_recv_sock = tcp_v6_syn_recv_sock,
1561 .net_header_len = sizeof(struct ipv6hdr),
1562 .net_frag_header_len = sizeof(struct frag_hdr),
1563 .setsockopt = ipv6_setsockopt,
1564 .getsockopt = ipv6_getsockopt,
1565 .addr2sockaddr = inet6_csk_addr2sockaddr,
1566 .sockaddr_len = sizeof(struct sockaddr_in6),
1567 .bind_conflict = inet6_csk_bind_conflict,
1568 #ifdef CONFIG_COMPAT
1569 .compat_setsockopt = compat_ipv6_setsockopt,
1570 .compat_getsockopt = compat_ipv6_getsockopt,
1572 .mtu_reduced = tcp_v6_mtu_reduced,
1575 #ifdef CONFIG_TCP_MD5SIG
1576 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1577 .md5_lookup = tcp_v6_md5_lookup,
1578 .calc_md5_hash = tcp_v6_md5_hash_skb,
1579 .md5_parse = tcp_v6_parse_md5_keys,
1584 * TCP over IPv4 via INET6 API
1586 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1587 .queue_xmit = ip_queue_xmit,
1588 .send_check = tcp_v4_send_check,
1589 .rebuild_header = inet_sk_rebuild_header,
1590 .sk_rx_dst_set = inet_sk_rx_dst_set,
1591 .conn_request = tcp_v6_conn_request,
1592 .syn_recv_sock = tcp_v6_syn_recv_sock,
1593 .net_header_len = sizeof(struct iphdr),
1594 .setsockopt = ipv6_setsockopt,
1595 .getsockopt = ipv6_getsockopt,
1596 .addr2sockaddr = inet6_csk_addr2sockaddr,
1597 .sockaddr_len = sizeof(struct sockaddr_in6),
1598 .bind_conflict = inet6_csk_bind_conflict,
1599 #ifdef CONFIG_COMPAT
1600 .compat_setsockopt = compat_ipv6_setsockopt,
1601 .compat_getsockopt = compat_ipv6_getsockopt,
1603 .mtu_reduced = tcp_v4_mtu_reduced,
1606 #ifdef CONFIG_TCP_MD5SIG
1607 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1608 .md5_lookup = tcp_v4_md5_lookup,
1609 .calc_md5_hash = tcp_v4_md5_hash_skb,
1610 .md5_parse = tcp_v6_parse_md5_keys,
1614 /* NOTE: A lot of things set to zero explicitly by call to
1615 * sk_alloc() so need not be done here.
1617 static int tcp_v6_init_sock(struct sock *sk)
1619 struct inet_connection_sock *icsk = inet_csk(sk);
1623 icsk->icsk_af_ops = &ipv6_specific;
1625 #ifdef CONFIG_TCP_MD5SIG
1626 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1632 static void tcp_v6_destroy_sock(struct sock *sk)
1634 tcp_v4_destroy_sock(sk);
1635 inet6_destroy_sock(sk);
1638 #ifdef CONFIG_PROC_FS
1639 /* Proc filesystem TCPv6 sock list dumping. */
1640 static void get_openreq6(struct seq_file *seq,
1641 struct request_sock *req, int i, kuid_t uid)
1643 long ttd = req->rsk_timer.expires - jiffies;
1644 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1645 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1651 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1652 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1654 src->s6_addr32[0], src->s6_addr32[1],
1655 src->s6_addr32[2], src->s6_addr32[3],
1656 inet_rsk(req)->ir_num,
1657 dest->s6_addr32[0], dest->s6_addr32[1],
1658 dest->s6_addr32[2], dest->s6_addr32[3],
1659 ntohs(inet_rsk(req)->ir_rmt_port),
1661 0, 0, /* could print option size, but that is af dependent. */
1662 1, /* timers active (only the expire timer) */
1663 jiffies_to_clock_t(ttd),
1665 from_kuid_munged(seq_user_ns(seq), uid),
1666 0, /* non standard timer */
1667 0, /* open_requests have no inode */
1671 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1673 const struct in6_addr *dest, *src;
1676 unsigned long timer_expires;
1677 const struct inet_sock *inet = inet_sk(sp);
1678 const struct tcp_sock *tp = tcp_sk(sp);
1679 const struct inet_connection_sock *icsk = inet_csk(sp);
1680 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1682 dest = &sp->sk_v6_daddr;
1683 src = &sp->sk_v6_rcv_saddr;
1684 destp = ntohs(inet->inet_dport);
1685 srcp = ntohs(inet->inet_sport);
1687 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1689 timer_expires = icsk->icsk_timeout;
1690 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1692 timer_expires = icsk->icsk_timeout;
1693 } else if (timer_pending(&sp->sk_timer)) {
1695 timer_expires = sp->sk_timer.expires;
1698 timer_expires = jiffies;
1702 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1703 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1705 src->s6_addr32[0], src->s6_addr32[1],
1706 src->s6_addr32[2], src->s6_addr32[3], srcp,
1707 dest->s6_addr32[0], dest->s6_addr32[1],
1708 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1710 tp->write_seq-tp->snd_una,
1711 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1713 jiffies_delta_to_clock_t(timer_expires - jiffies),
1714 icsk->icsk_retransmits,
1715 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1716 icsk->icsk_probes_out,
1718 atomic_read(&sp->sk_refcnt), sp,
1719 jiffies_to_clock_t(icsk->icsk_rto),
1720 jiffies_to_clock_t(icsk->icsk_ack.ato),
1721 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1723 sp->sk_state == TCP_LISTEN ?
1724 (fastopenq ? fastopenq->max_qlen : 0) :
1725 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1729 static void get_timewait6_sock(struct seq_file *seq,
1730 struct inet_timewait_sock *tw, int i)
1732 long delta = tw->tw_timer.expires - jiffies;
1733 const struct in6_addr *dest, *src;
1736 dest = &tw->tw_v6_daddr;
1737 src = &tw->tw_v6_rcv_saddr;
1738 destp = ntohs(tw->tw_dport);
1739 srcp = ntohs(tw->tw_sport);
1742 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1743 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1745 src->s6_addr32[0], src->s6_addr32[1],
1746 src->s6_addr32[2], src->s6_addr32[3], srcp,
1747 dest->s6_addr32[0], dest->s6_addr32[1],
1748 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1749 tw->tw_substate, 0, 0,
1750 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1751 atomic_read(&tw->tw_refcnt), tw);
1754 static int tcp6_seq_show(struct seq_file *seq, void *v)
1756 struct tcp_iter_state *st;
1757 struct sock *sk = v;
1759 if (v == SEQ_START_TOKEN) {
1764 "st tx_queue rx_queue tr tm->when retrnsmt"
1765 " uid timeout inode\n");
1770 switch (st->state) {
1771 case TCP_SEQ_STATE_LISTENING:
1772 case TCP_SEQ_STATE_ESTABLISHED:
1773 if (sk->sk_state == TCP_TIME_WAIT)
1774 get_timewait6_sock(seq, v, st->num);
1776 get_tcp6_sock(seq, v, st->num);
1778 case TCP_SEQ_STATE_OPENREQ:
1779 get_openreq6(seq, v, st->num, st->uid);
1786 static const struct file_operations tcp6_afinfo_seq_fops = {
1787 .owner = THIS_MODULE,
1788 .open = tcp_seq_open,
1790 .llseek = seq_lseek,
1791 .release = seq_release_net
1794 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1797 .seq_fops = &tcp6_afinfo_seq_fops,
1799 .show = tcp6_seq_show,
1803 int __net_init tcp6_proc_init(struct net *net)
1805 return tcp_proc_register(net, &tcp6_seq_afinfo);
1808 void tcp6_proc_exit(struct net *net)
1810 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1814 static void tcp_v6_clear_sk(struct sock *sk, int size)
1816 struct inet_sock *inet = inet_sk(sk);
1818 /* we do not want to clear pinet6 field, because of RCU lookups */
1819 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1821 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1822 memset(&inet->pinet6 + 1, 0, size);
1825 struct proto tcpv6_prot = {
1827 .owner = THIS_MODULE,
1829 .connect = tcp_v6_connect,
1830 .disconnect = tcp_disconnect,
1831 .accept = inet_csk_accept,
1833 .init = tcp_v6_init_sock,
1834 .destroy = tcp_v6_destroy_sock,
1835 .shutdown = tcp_shutdown,
1836 .setsockopt = tcp_setsockopt,
1837 .getsockopt = tcp_getsockopt,
1838 .recvmsg = tcp_recvmsg,
1839 .sendmsg = tcp_sendmsg,
1840 .sendpage = tcp_sendpage,
1841 .backlog_rcv = tcp_v6_do_rcv,
1842 .release_cb = tcp_release_cb,
1844 .unhash = inet_unhash,
1845 .get_port = inet_csk_get_port,
1846 .enter_memory_pressure = tcp_enter_memory_pressure,
1847 .stream_memory_free = tcp_stream_memory_free,
1848 .sockets_allocated = &tcp_sockets_allocated,
1849 .memory_allocated = &tcp_memory_allocated,
1850 .memory_pressure = &tcp_memory_pressure,
1851 .orphan_count = &tcp_orphan_count,
1852 .sysctl_mem = sysctl_tcp_mem,
1853 .sysctl_wmem = sysctl_tcp_wmem,
1854 .sysctl_rmem = sysctl_tcp_rmem,
1855 .max_header = MAX_TCP_HEADER,
1856 .obj_size = sizeof(struct tcp6_sock),
1857 .slab_flags = SLAB_DESTROY_BY_RCU,
1858 .twsk_prot = &tcp6_timewait_sock_ops,
1859 .rsk_prot = &tcp6_request_sock_ops,
1860 .h.hashinfo = &tcp_hashinfo,
1861 .no_autobind = true,
1862 #ifdef CONFIG_COMPAT
1863 .compat_setsockopt = compat_tcp_setsockopt,
1864 .compat_getsockopt = compat_tcp_getsockopt,
1866 #ifdef CONFIG_MEMCG_KMEM
1867 .proto_cgroup = tcp_proto_cgroup,
1869 .clear_sk = tcp_v6_clear_sk,
1872 static const struct inet6_protocol tcpv6_protocol = {
1873 .early_demux = tcp_v6_early_demux,
1874 .handler = tcp_v6_rcv,
1875 .err_handler = tcp_v6_err,
1876 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1879 static struct inet_protosw tcpv6_protosw = {
1880 .type = SOCK_STREAM,
1881 .protocol = IPPROTO_TCP,
1882 .prot = &tcpv6_prot,
1883 .ops = &inet6_stream_ops,
1884 .flags = INET_PROTOSW_PERMANENT |
1888 static int __net_init tcpv6_net_init(struct net *net)
1890 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1891 SOCK_RAW, IPPROTO_TCP, net);
1894 static void __net_exit tcpv6_net_exit(struct net *net)
1896 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1899 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1901 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1904 static struct pernet_operations tcpv6_net_ops = {
1905 .init = tcpv6_net_init,
1906 .exit = tcpv6_net_exit,
1907 .exit_batch = tcpv6_net_exit_batch,
1910 int __init tcpv6_init(void)
1914 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1918 /* register inet6 protocol */
1919 ret = inet6_register_protosw(&tcpv6_protosw);
1921 goto out_tcpv6_protocol;
1923 ret = register_pernet_subsys(&tcpv6_net_ops);
1925 goto out_tcpv6_protosw;
1930 inet6_unregister_protosw(&tcpv6_protosw);
1932 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1936 void tcpv6_exit(void)
1938 unregister_pernet_subsys(&tcpv6_net_ops);
1939 inet6_unregister_protosw(&tcpv6_protosw);
1940 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);