3 * Linux INET6 implementation
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
109 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
110 ipv6_hdr(skb)->saddr.s6_addr32,
112 tcp_hdr(skb)->source);
115 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
118 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
119 struct inet_sock *inet = inet_sk(sk);
120 struct inet_connection_sock *icsk = inet_csk(sk);
121 struct ipv6_pinfo *np = inet6_sk(sk);
122 struct tcp_sock *tp = tcp_sk(sk);
123 struct in6_addr *saddr = NULL, *final_p, final;
126 struct dst_entry *dst;
130 if (addr_len < SIN6_LEN_RFC2133)
133 if (usin->sin6_family != AF_INET6)
134 return -EAFNOSUPPORT;
136 memset(&fl6, 0, sizeof(fl6));
139 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
140 IP6_ECN_flow_init(fl6.flowlabel);
141 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
142 struct ip6_flowlabel *flowlabel;
143 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
146 fl6_sock_release(flowlabel);
151 * connect() to INADDR_ANY means loopback (BSD'ism).
154 if (ipv6_addr_any(&usin->sin6_addr))
155 usin->sin6_addr.s6_addr[15] = 0x1;
157 addr_type = ipv6_addr_type(&usin->sin6_addr);
159 if (addr_type & IPV6_ADDR_MULTICAST)
162 if (addr_type&IPV6_ADDR_LINKLOCAL) {
163 if (addr_len >= sizeof(struct sockaddr_in6) &&
164 usin->sin6_scope_id) {
165 /* If interface is set while binding, indices
168 if (sk->sk_bound_dev_if &&
169 sk->sk_bound_dev_if != usin->sin6_scope_id)
172 sk->sk_bound_dev_if = usin->sin6_scope_id;
175 /* Connect to link-local address requires an interface */
176 if (!sk->sk_bound_dev_if)
180 if (tp->rx_opt.ts_recent_stamp &&
181 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
182 tp->rx_opt.ts_recent = 0;
183 tp->rx_opt.ts_recent_stamp = 0;
187 sk->sk_v6_daddr = usin->sin6_addr;
188 np->flow_label = fl6.flowlabel;
194 if (addr_type == IPV6_ADDR_MAPPED) {
195 u32 exthdrlen = icsk->icsk_ext_hdr_len;
196 struct sockaddr_in sin;
198 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
200 if (__ipv6_only_sock(sk))
203 sin.sin_family = AF_INET;
204 sin.sin_port = usin->sin6_port;
205 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
207 icsk->icsk_af_ops = &ipv6_mapped;
208 sk->sk_backlog_rcv = tcp_v4_do_rcv;
209 #ifdef CONFIG_TCP_MD5SIG
210 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
213 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
216 icsk->icsk_ext_hdr_len = exthdrlen;
217 icsk->icsk_af_ops = &ipv6_specific;
218 sk->sk_backlog_rcv = tcp_v6_do_rcv;
219 #ifdef CONFIG_TCP_MD5SIG
220 tp->af_specific = &tcp_sock_ipv6_specific;
224 np->saddr = sk->sk_v6_rcv_saddr;
229 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
230 saddr = &sk->sk_v6_rcv_saddr;
232 fl6.flowi6_proto = IPPROTO_TCP;
233 fl6.daddr = sk->sk_v6_daddr;
234 fl6.saddr = saddr ? *saddr : np->saddr;
235 fl6.flowi6_oif = sk->sk_bound_dev_if;
236 fl6.flowi6_mark = sk->sk_mark;
237 fl6.fl6_dport = usin->sin6_port;
238 fl6.fl6_sport = inet->inet_sport;
240 final_p = fl6_update_dst(&fl6, np->opt, &final);
242 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
244 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
252 sk->sk_v6_rcv_saddr = *saddr;
255 /* set the source address */
257 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
259 sk->sk_gso_type = SKB_GSO_TCPV6;
260 __ip6_dst_store(sk, dst, NULL, NULL);
262 rt = (struct rt6_info *) dst;
263 if (tcp_death_row.sysctl_tw_recycle &&
264 !tp->rx_opt.ts_recent_stamp &&
265 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
266 tcp_fetch_timewait_stamp(sk, dst);
268 icsk->icsk_ext_hdr_len = 0;
270 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
273 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
275 inet->inet_dport = usin->sin6_port;
277 tcp_set_state(sk, TCP_SYN_SENT);
278 err = inet6_hash_connect(&tcp_death_row, sk);
284 if (!tp->write_seq && likely(!tp->repair))
285 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
286 sk->sk_v6_daddr.s6_addr32,
290 err = tcp_connect(sk);
297 tcp_set_state(sk, TCP_CLOSE);
300 inet->inet_dport = 0;
301 sk->sk_route_caps = 0;
305 static void tcp_v6_mtu_reduced(struct sock *sk)
307 struct dst_entry *dst;
309 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
312 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
316 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
317 tcp_sync_mss(sk, dst_mtu(dst));
318 tcp_simple_retransmit(sk);
322 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
323 u8 type, u8 code, int offset, __be32 info)
325 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
326 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
327 struct net *net = dev_net(skb->dev);
328 struct request_sock *fastopen;
329 struct ipv6_pinfo *np;
335 sk = __inet6_lookup_established(net, &tcp_hashinfo,
336 &hdr->daddr, th->dest,
337 &hdr->saddr, ntohs(th->source),
341 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
346 if (sk->sk_state == TCP_TIME_WAIT) {
347 inet_twsk_put(inet_twsk(sk));
350 seq = ntohl(th->seq);
351 if (sk->sk_state == TCP_NEW_SYN_RECV)
352 return tcp_req_err(sk, seq);
355 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
356 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
358 if (sk->sk_state == TCP_CLOSE)
361 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
362 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
368 fastopen = tp->fastopen_rsk;
369 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
370 if (sk->sk_state != TCP_LISTEN &&
371 !between(seq, snd_una, tp->snd_nxt)) {
372 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
378 if (type == NDISC_REDIRECT) {
379 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
382 dst->ops->redirect(dst, sk, skb);
386 if (type == ICMPV6_PKT_TOOBIG) {
387 /* We are not interested in TCP_LISTEN and open_requests
388 * (SYN-ACKs send out by Linux are always <576bytes so
389 * they should go through unfragmented).
391 if (sk->sk_state == TCP_LISTEN)
394 if (!ip6_sk_accept_pmtu(sk))
397 tp->mtu_info = ntohl(info);
398 if (!sock_owned_by_user(sk))
399 tcp_v6_mtu_reduced(sk);
400 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
406 icmpv6_err_convert(type, code, &err);
408 /* Might be for an request_sock */
409 switch (sk->sk_state) {
412 /* Only in fast or simultaneous open. If a fast open socket is
413 * is already accepted it is treated as a connected one below.
415 if (fastopen && !fastopen->sk)
418 if (!sock_owned_by_user(sk)) {
420 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
424 sk->sk_err_soft = err;
428 if (!sock_owned_by_user(sk) && np->recverr) {
430 sk->sk_error_report(sk);
432 sk->sk_err_soft = err;
440 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
442 struct request_sock *req,
444 struct tcp_fastopen_cookie *foc)
446 struct inet_request_sock *ireq = inet_rsk(req);
447 struct ipv6_pinfo *np = inet6_sk(sk);
448 struct flowi6 *fl6 = &fl->u.ip6;
452 /* First, grab a route. */
453 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
456 skb = tcp_make_synack(sk, dst, req, foc);
459 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
460 &ireq->ir_v6_rmt_addr);
462 fl6->daddr = ireq->ir_v6_rmt_addr;
463 if (np->repflow && ireq->pktopts)
464 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
466 skb_set_queue_mapping(skb, queue_mapping);
467 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
468 err = net_xmit_eval(err);
476 static void tcp_v6_reqsk_destructor(struct request_sock *req)
478 kfree_skb(inet_rsk(req)->pktopts);
481 #ifdef CONFIG_TCP_MD5SIG
482 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
483 const struct in6_addr *addr)
485 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
488 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
489 const struct sock *addr_sk)
491 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
494 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
497 struct tcp_md5sig cmd;
498 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
500 if (optlen < sizeof(cmd))
503 if (copy_from_user(&cmd, optval, sizeof(cmd)))
506 if (sin6->sin6_family != AF_INET6)
509 if (!cmd.tcpm_keylen) {
510 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
511 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
513 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
517 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
520 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
521 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
522 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
524 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
525 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
528 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
529 const struct in6_addr *daddr,
530 const struct in6_addr *saddr, int nbytes)
532 struct tcp6_pseudohdr *bp;
533 struct scatterlist sg;
535 bp = &hp->md5_blk.ip6;
536 /* 1. TCP pseudo-header (RFC2460) */
539 bp->protocol = cpu_to_be32(IPPROTO_TCP);
540 bp->len = cpu_to_be32(nbytes);
542 sg_init_one(&sg, bp, sizeof(*bp));
543 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
546 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
547 const struct in6_addr *daddr, struct in6_addr *saddr,
548 const struct tcphdr *th)
550 struct tcp_md5sig_pool *hp;
551 struct hash_desc *desc;
553 hp = tcp_get_md5sig_pool();
555 goto clear_hash_noput;
556 desc = &hp->md5_desc;
558 if (crypto_hash_init(desc))
560 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
562 if (tcp_md5_hash_header(hp, th))
564 if (tcp_md5_hash_key(hp, key))
566 if (crypto_hash_final(desc, md5_hash))
569 tcp_put_md5sig_pool();
573 tcp_put_md5sig_pool();
575 memset(md5_hash, 0, 16);
579 static int tcp_v6_md5_hash_skb(char *md5_hash,
580 const struct tcp_md5sig_key *key,
581 const struct sock *sk,
582 const struct sk_buff *skb)
584 const struct in6_addr *saddr, *daddr;
585 struct tcp_md5sig_pool *hp;
586 struct hash_desc *desc;
587 const struct tcphdr *th = tcp_hdr(skb);
589 if (sk) { /* valid for establish/request sockets */
590 saddr = &sk->sk_v6_rcv_saddr;
591 daddr = &sk->sk_v6_daddr;
593 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
594 saddr = &ip6h->saddr;
595 daddr = &ip6h->daddr;
598 hp = tcp_get_md5sig_pool();
600 goto clear_hash_noput;
601 desc = &hp->md5_desc;
603 if (crypto_hash_init(desc))
606 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
608 if (tcp_md5_hash_header(hp, th))
610 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
612 if (tcp_md5_hash_key(hp, key))
614 if (crypto_hash_final(desc, md5_hash))
617 tcp_put_md5sig_pool();
621 tcp_put_md5sig_pool();
623 memset(md5_hash, 0, 16);
627 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
629 const __u8 *hash_location = NULL;
630 struct tcp_md5sig_key *hash_expected;
631 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
632 const struct tcphdr *th = tcp_hdr(skb);
636 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
637 hash_location = tcp_parse_md5sig_option(th);
639 /* We've parsed the options - do we have a hash? */
640 if (!hash_expected && !hash_location)
643 if (hash_expected && !hash_location) {
644 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
648 if (!hash_expected && hash_location) {
649 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
653 /* check the signature */
654 genhash = tcp_v6_md5_hash_skb(newhash,
658 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
659 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
660 genhash ? "failed" : "mismatch",
661 &ip6h->saddr, ntohs(th->source),
662 &ip6h->daddr, ntohs(th->dest));
669 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
672 struct inet_request_sock *ireq = inet_rsk(req);
673 struct ipv6_pinfo *np = inet6_sk(sk);
675 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
676 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
678 /* So that link locals have meaning */
679 if (!sk->sk_bound_dev_if &&
680 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
681 ireq->ir_iif = tcp_v6_iif(skb);
683 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
684 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
685 np->rxopt.bits.rxinfo ||
686 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
687 np->rxopt.bits.rxohlim || np->repflow)) {
688 atomic_inc(&skb->users);
693 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
694 const struct request_sock *req,
699 return inet6_csk_route_req(sk, &fl->u.ip6, req);
702 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
704 .obj_size = sizeof(struct tcp6_request_sock),
705 .rtx_syn_ack = tcp_rtx_synack,
706 .send_ack = tcp_v6_reqsk_send_ack,
707 .destructor = tcp_v6_reqsk_destructor,
708 .send_reset = tcp_v6_send_reset,
709 .syn_ack_timeout = tcp_syn_ack_timeout,
712 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
713 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
714 sizeof(struct ipv6hdr),
715 #ifdef CONFIG_TCP_MD5SIG
716 .req_md5_lookup = tcp_v6_md5_lookup,
717 .calc_md5_hash = tcp_v6_md5_hash_skb,
719 .init_req = tcp_v6_init_req,
720 #ifdef CONFIG_SYN_COOKIES
721 .cookie_init_seq = cookie_v6_init_sequence,
723 .route_req = tcp_v6_route_req,
724 .init_seq = tcp_v6_init_sequence,
725 .send_synack = tcp_v6_send_synack,
726 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
729 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
730 u32 ack, u32 win, u32 tsval, u32 tsecr,
731 int oif, struct tcp_md5sig_key *key, int rst,
732 u8 tclass, u32 label)
734 const struct tcphdr *th = tcp_hdr(skb);
736 struct sk_buff *buff;
738 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
739 struct sock *ctl_sk = net->ipv6.tcp_sk;
740 unsigned int tot_len = sizeof(struct tcphdr);
741 struct dst_entry *dst;
745 tot_len += TCPOLEN_TSTAMP_ALIGNED;
746 #ifdef CONFIG_TCP_MD5SIG
748 tot_len += TCPOLEN_MD5SIG_ALIGNED;
751 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
756 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
758 t1 = (struct tcphdr *) skb_push(buff, tot_len);
759 skb_reset_transport_header(buff);
761 /* Swap the send and the receive. */
762 memset(t1, 0, sizeof(*t1));
763 t1->dest = th->source;
764 t1->source = th->dest;
765 t1->doff = tot_len / 4;
766 t1->seq = htonl(seq);
767 t1->ack_seq = htonl(ack);
768 t1->ack = !rst || !th->ack;
770 t1->window = htons(win);
772 topt = (__be32 *)(t1 + 1);
775 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
776 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
777 *topt++ = htonl(tsval);
778 *topt++ = htonl(tsecr);
781 #ifdef CONFIG_TCP_MD5SIG
783 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
784 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
785 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
786 &ipv6_hdr(skb)->saddr,
787 &ipv6_hdr(skb)->daddr, t1);
791 memset(&fl6, 0, sizeof(fl6));
792 fl6.daddr = ipv6_hdr(skb)->saddr;
793 fl6.saddr = ipv6_hdr(skb)->daddr;
794 fl6.flowlabel = label;
796 buff->ip_summed = CHECKSUM_PARTIAL;
799 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
801 fl6.flowi6_proto = IPPROTO_TCP;
802 if (rt6_need_strict(&fl6.daddr) && !oif)
803 fl6.flowi6_oif = tcp_v6_iif(skb);
805 fl6.flowi6_oif = oif;
806 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
807 fl6.fl6_dport = t1->dest;
808 fl6.fl6_sport = t1->source;
809 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
811 /* Pass a socket to ip6_dst_lookup either it is for RST
812 * Underlying function will use this to retrieve the network
815 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
817 skb_dst_set(buff, dst);
818 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
819 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
821 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
828 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
830 const struct tcphdr *th = tcp_hdr(skb);
831 u32 seq = 0, ack_seq = 0;
832 struct tcp_md5sig_key *key = NULL;
833 #ifdef CONFIG_TCP_MD5SIG
834 const __u8 *hash_location = NULL;
835 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
836 unsigned char newhash[16];
838 struct sock *sk1 = NULL;
845 /* If sk not NULL, it means we did a successful lookup and incoming
846 * route had to be correct. prequeue might have dropped our dst.
848 if (!sk && !ipv6_unicast_destination(skb))
851 #ifdef CONFIG_TCP_MD5SIG
852 hash_location = tcp_parse_md5sig_option(th);
853 if (!sk && hash_location) {
855 * active side is lost. Try to find listening socket through
856 * source port, and then find md5 key through listening socket.
857 * we are not loose security here:
858 * Incoming packet is checked with md5 hash with finding key,
859 * no RST generated if md5 hash doesn't match.
861 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
862 &tcp_hashinfo, &ipv6h->saddr,
863 th->source, &ipv6h->daddr,
864 ntohs(th->source), tcp_v6_iif(skb));
869 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
873 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
874 if (genhash || memcmp(hash_location, newhash, 16) != 0)
877 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
882 seq = ntohl(th->ack_seq);
884 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
887 oif = sk ? sk->sk_bound_dev_if : 0;
888 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
890 #ifdef CONFIG_TCP_MD5SIG
899 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
900 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
901 struct tcp_md5sig_key *key, u8 tclass,
904 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
908 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
910 struct inet_timewait_sock *tw = inet_twsk(sk);
911 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
913 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
914 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
915 tcp_time_stamp + tcptw->tw_ts_offset,
916 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
917 tw->tw_tclass, (tw->tw_flowlabel << 12));
922 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
923 struct request_sock *req)
925 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
926 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
928 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
929 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
930 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
931 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
932 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
937 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
939 const struct tcphdr *th = tcp_hdr(skb);
940 struct request_sock *req;
943 /* Find possible connection requests. */
944 req = inet6_csk_search_req(sk, th->source,
945 &ipv6_hdr(skb)->saddr,
946 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
948 nsk = tcp_check_req(sk, skb, req, false);
952 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
953 &ipv6_hdr(skb)->saddr, th->source,
954 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
958 if (nsk->sk_state != TCP_TIME_WAIT) {
962 inet_twsk_put(inet_twsk(nsk));
966 #ifdef CONFIG_SYN_COOKIES
968 sk = cookie_v6_check(sk, skb);
973 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
975 if (skb->protocol == htons(ETH_P_IP))
976 return tcp_v4_conn_request(sk, skb);
978 if (!ipv6_unicast_destination(skb))
981 return tcp_conn_request(&tcp6_request_sock_ops,
982 &tcp_request_sock_ipv6_ops, sk, skb);
985 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
986 return 0; /* don't send reset */
989 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
990 struct request_sock *req,
991 struct dst_entry *dst)
993 struct inet_request_sock *ireq;
994 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
995 struct tcp6_sock *newtcp6sk;
996 struct inet_sock *newinet;
997 struct tcp_sock *newtp;
999 #ifdef CONFIG_TCP_MD5SIG
1000 struct tcp_md5sig_key *key;
1004 if (skb->protocol == htons(ETH_P_IP)) {
1009 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1014 newtcp6sk = (struct tcp6_sock *)newsk;
1015 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1017 newinet = inet_sk(newsk);
1018 newnp = inet6_sk(newsk);
1019 newtp = tcp_sk(newsk);
1021 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1023 newnp->saddr = newsk->sk_v6_rcv_saddr;
1025 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1026 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1027 #ifdef CONFIG_TCP_MD5SIG
1028 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1031 newnp->ipv6_ac_list = NULL;
1032 newnp->ipv6_fl_list = NULL;
1033 newnp->pktoptions = NULL;
1035 newnp->mcast_oif = tcp_v6_iif(skb);
1036 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1037 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1039 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1042 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1043 * here, tcp_create_openreq_child now does this for us, see the comment in
1044 * that function for the gory details. -acme
1047 /* It is tricky place. Until this moment IPv4 tcp
1048 worked with IPv6 icsk.icsk_af_ops.
1051 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1056 ireq = inet_rsk(req);
1058 if (sk_acceptq_is_full(sk))
1062 dst = inet6_csk_route_req(sk, &fl6, req);
1067 newsk = tcp_create_openreq_child(sk, req, skb);
1072 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1073 * count here, tcp_create_openreq_child now does this for us, see the
1074 * comment in that function for the gory details. -acme
1077 newsk->sk_gso_type = SKB_GSO_TCPV6;
1078 __ip6_dst_store(newsk, dst, NULL, NULL);
1079 inet6_sk_rx_dst_set(newsk, skb);
1081 newtcp6sk = (struct tcp6_sock *)newsk;
1082 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1084 newtp = tcp_sk(newsk);
1085 newinet = inet_sk(newsk);
1086 newnp = inet6_sk(newsk);
1088 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1090 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1091 newnp->saddr = ireq->ir_v6_loc_addr;
1092 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1093 newsk->sk_bound_dev_if = ireq->ir_iif;
1095 ip6_set_txhash(newsk);
1097 /* Now IPv6 options...
1099 First: no IPv4 options.
1101 newinet->inet_opt = NULL;
1102 newnp->ipv6_ac_list = NULL;
1103 newnp->ipv6_fl_list = NULL;
1106 newnp->rxopt.all = np->rxopt.all;
1108 /* Clone pktoptions received with SYN */
1109 newnp->pktoptions = NULL;
1110 if (ireq->pktopts) {
1111 newnp->pktoptions = skb_clone(ireq->pktopts,
1112 sk_gfp_atomic(sk, GFP_ATOMIC));
1113 consume_skb(ireq->pktopts);
1114 ireq->pktopts = NULL;
1115 if (newnp->pktoptions)
1116 skb_set_owner_r(newnp->pktoptions, newsk);
1119 newnp->mcast_oif = tcp_v6_iif(skb);
1120 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1121 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1123 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1125 /* Clone native IPv6 options from listening socket (if any)
1127 Yes, keeping reference count would be much more clever,
1128 but we make one more one thing there: reattach optmem
1132 newnp->opt = ipv6_dup_options(newsk, np->opt);
1134 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1136 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1137 newnp->opt->opt_flen);
1139 tcp_ca_openreq_child(newsk, dst);
1141 tcp_sync_mss(newsk, dst_mtu(dst));
1142 newtp->advmss = dst_metric_advmss(dst);
1143 if (tcp_sk(sk)->rx_opt.user_mss &&
1144 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1145 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1147 tcp_initialize_rcv_mss(newsk);
1149 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1150 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1152 #ifdef CONFIG_TCP_MD5SIG
1153 /* Copy over the MD5 key from the original socket */
1154 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1156 /* We're using one, so create a matching key
1157 * on the newsk structure. If we fail to get
1158 * memory, then we end up not copying the key
1161 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1162 AF_INET6, key->key, key->keylen,
1163 sk_gfp_atomic(sk, GFP_ATOMIC));
1167 if (__inet_inherit_port(sk, newsk) < 0) {
1168 inet_csk_prepare_forced_close(newsk);
1172 __inet_hash(newsk, NULL);
1177 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1181 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1185 /* The socket must have it's spinlock held when we get
1188 * We have a potential double-lock case here, so even when
1189 * doing backlog processing we use the BH locking scheme.
1190 * This is because we cannot sleep with the original spinlock
1193 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1195 struct ipv6_pinfo *np = inet6_sk(sk);
1196 struct tcp_sock *tp;
1197 struct sk_buff *opt_skb = NULL;
1199 /* Imagine: socket is IPv6. IPv4 packet arrives,
1200 goes to IPv4 receive handler and backlogged.
1201 From backlog it always goes here. Kerboom...
1202 Fortunately, tcp_rcv_established and rcv_established
1203 handle them correctly, but it is not case with
1204 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1207 if (skb->protocol == htons(ETH_P_IP))
1208 return tcp_v4_do_rcv(sk, skb);
1210 if (sk_filter(sk, skb))
1214 * socket locking is here for SMP purposes as backlog rcv
1215 * is currently called with bh processing disabled.
1218 /* Do Stevens' IPV6_PKTOPTIONS.
1220 Yes, guys, it is the only place in our code, where we
1221 may make it not affecting IPv4.
1222 The rest of code is protocol independent,
1223 and I do not like idea to uglify IPv4.
1225 Actually, all the idea behind IPV6_PKTOPTIONS
1226 looks not very well thought. For now we latch
1227 options, received in the last packet, enqueued
1228 by tcp. Feel free to propose better solution.
1232 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1234 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1235 struct dst_entry *dst = sk->sk_rx_dst;
1237 sock_rps_save_rxhash(sk, skb);
1238 sk_mark_napi_id(sk, skb);
1240 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1241 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1243 sk->sk_rx_dst = NULL;
1247 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1249 goto ipv6_pktoptions;
1253 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1256 if (sk->sk_state == TCP_LISTEN) {
1257 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1262 * Queue it on the new socket if the new socket is active,
1263 * otherwise we just shortcircuit this and continue with
1267 sock_rps_save_rxhash(nsk, skb);
1268 sk_mark_napi_id(sk, skb);
1269 if (tcp_child_process(sk, nsk, skb))
1272 __kfree_skb(opt_skb);
1276 sock_rps_save_rxhash(sk, skb);
1278 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1281 goto ipv6_pktoptions;
1285 tcp_v6_send_reset(sk, skb);
1288 __kfree_skb(opt_skb);
1292 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1293 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1298 /* Do you ask, what is it?
1300 1. skb was enqueued by tcp.
1301 2. skb is added to tail of read queue, rather than out of order.
1302 3. socket is not in passive state.
1303 4. Finally, it really contains options, which user wants to receive.
1306 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1307 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1308 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1309 np->mcast_oif = tcp_v6_iif(opt_skb);
1310 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1311 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1312 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1313 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1315 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1316 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1317 skb_set_owner_r(opt_skb, sk);
1318 opt_skb = xchg(&np->pktoptions, opt_skb);
1320 __kfree_skb(opt_skb);
1321 opt_skb = xchg(&np->pktoptions, NULL);
1329 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1330 const struct tcphdr *th)
1332 /* This is tricky: we move IP6CB at its correct location into
1333 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1334 * _decode_session6() uses IP6CB().
1335 * barrier() makes sure compiler won't play aliasing games.
1337 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1338 sizeof(struct inet6_skb_parm));
1341 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1342 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1343 skb->len - th->doff*4);
1344 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1345 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1346 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1347 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1348 TCP_SKB_CB(skb)->sacked = 0;
1351 static void tcp_v6_restore_cb(struct sk_buff *skb)
1353 /* We need to move header back to the beginning if xfrm6_policy_check()
1354 * and tcp_v6_fill_cb() are going to be called again.
1356 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1357 sizeof(struct inet6_skb_parm));
1360 static int tcp_v6_rcv(struct sk_buff *skb)
1362 const struct tcphdr *th;
1363 const struct ipv6hdr *hdr;
1366 struct net *net = dev_net(skb->dev);
1368 if (skb->pkt_type != PACKET_HOST)
1372 * Count it even if it's bad.
1374 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1376 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1381 if (th->doff < sizeof(struct tcphdr)/4)
1383 if (!pskb_may_pull(skb, th->doff*4))
1386 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1390 hdr = ipv6_hdr(skb);
1392 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1398 if (sk->sk_state == TCP_TIME_WAIT)
1401 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1402 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1403 goto discard_and_relse;
1406 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1407 goto discard_and_relse;
1409 tcp_v6_fill_cb(skb, hdr, th);
1411 #ifdef CONFIG_TCP_MD5SIG
1412 if (tcp_v6_inbound_md5_hash(sk, skb))
1413 goto discard_and_relse;
1416 if (sk_filter(sk, skb))
1417 goto discard_and_relse;
1419 sk_incoming_cpu_update(sk);
1422 bh_lock_sock_nested(sk);
1424 if (!sock_owned_by_user(sk)) {
1425 if (!tcp_prequeue(sk, skb))
1426 ret = tcp_v6_do_rcv(sk, skb);
1427 } else if (unlikely(sk_add_backlog(sk, skb,
1428 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1430 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1431 goto discard_and_relse;
1436 return ret ? -1 : 0;
1439 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1442 tcp_v6_fill_cb(skb, hdr, th);
1444 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1446 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1448 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1450 tcp_v6_send_reset(NULL, skb);
1462 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1463 inet_twsk_put(inet_twsk(sk));
1467 tcp_v6_fill_cb(skb, hdr, th);
1469 if (skb->len < (th->doff<<2)) {
1470 inet_twsk_put(inet_twsk(sk));
1473 if (tcp_checksum_complete(skb)) {
1474 inet_twsk_put(inet_twsk(sk));
1478 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1483 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1484 &ipv6_hdr(skb)->saddr, th->source,
1485 &ipv6_hdr(skb)->daddr,
1486 ntohs(th->dest), tcp_v6_iif(skb));
1488 struct inet_timewait_sock *tw = inet_twsk(sk);
1489 inet_twsk_deschedule(tw);
1492 tcp_v6_restore_cb(skb);
1495 /* Fall through to ACK */
1498 tcp_v6_timewait_ack(sk, skb);
1501 tcp_v6_restore_cb(skb);
1503 case TCP_TW_SUCCESS:
1509 static void tcp_v6_early_demux(struct sk_buff *skb)
1511 const struct ipv6hdr *hdr;
1512 const struct tcphdr *th;
1515 if (skb->pkt_type != PACKET_HOST)
1518 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1521 hdr = ipv6_hdr(skb);
1524 if (th->doff < sizeof(struct tcphdr) / 4)
1527 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1528 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1529 &hdr->saddr, th->source,
1530 &hdr->daddr, ntohs(th->dest),
1534 skb->destructor = sock_edemux;
1535 if (sk_fullsock(sk)) {
1536 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1539 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1541 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1542 skb_dst_set_noref(skb, dst);
1547 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1548 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1549 .twsk_unique = tcp_twsk_unique,
1550 .twsk_destructor = tcp_twsk_destructor,
1553 static const struct inet_connection_sock_af_ops ipv6_specific = {
1554 .queue_xmit = inet6_csk_xmit,
1555 .send_check = tcp_v6_send_check,
1556 .rebuild_header = inet6_sk_rebuild_header,
1557 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1558 .conn_request = tcp_v6_conn_request,
1559 .syn_recv_sock = tcp_v6_syn_recv_sock,
1560 .net_header_len = sizeof(struct ipv6hdr),
1561 .net_frag_header_len = sizeof(struct frag_hdr),
1562 .setsockopt = ipv6_setsockopt,
1563 .getsockopt = ipv6_getsockopt,
1564 .addr2sockaddr = inet6_csk_addr2sockaddr,
1565 .sockaddr_len = sizeof(struct sockaddr_in6),
1566 .bind_conflict = inet6_csk_bind_conflict,
1567 #ifdef CONFIG_COMPAT
1568 .compat_setsockopt = compat_ipv6_setsockopt,
1569 .compat_getsockopt = compat_ipv6_getsockopt,
1571 .mtu_reduced = tcp_v6_mtu_reduced,
1574 #ifdef CONFIG_TCP_MD5SIG
1575 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1576 .md5_lookup = tcp_v6_md5_lookup,
1577 .calc_md5_hash = tcp_v6_md5_hash_skb,
1578 .md5_parse = tcp_v6_parse_md5_keys,
1583 * TCP over IPv4 via INET6 API
1585 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1586 .queue_xmit = ip_queue_xmit,
1587 .send_check = tcp_v4_send_check,
1588 .rebuild_header = inet_sk_rebuild_header,
1589 .sk_rx_dst_set = inet_sk_rx_dst_set,
1590 .conn_request = tcp_v6_conn_request,
1591 .syn_recv_sock = tcp_v6_syn_recv_sock,
1592 .net_header_len = sizeof(struct iphdr),
1593 .setsockopt = ipv6_setsockopt,
1594 .getsockopt = ipv6_getsockopt,
1595 .addr2sockaddr = inet6_csk_addr2sockaddr,
1596 .sockaddr_len = sizeof(struct sockaddr_in6),
1597 .bind_conflict = inet6_csk_bind_conflict,
1598 #ifdef CONFIG_COMPAT
1599 .compat_setsockopt = compat_ipv6_setsockopt,
1600 .compat_getsockopt = compat_ipv6_getsockopt,
1602 .mtu_reduced = tcp_v4_mtu_reduced,
1605 #ifdef CONFIG_TCP_MD5SIG
1606 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1607 .md5_lookup = tcp_v4_md5_lookup,
1608 .calc_md5_hash = tcp_v4_md5_hash_skb,
1609 .md5_parse = tcp_v6_parse_md5_keys,
1613 /* NOTE: A lot of things set to zero explicitly by call to
1614 * sk_alloc() so need not be done here.
1616 static int tcp_v6_init_sock(struct sock *sk)
1618 struct inet_connection_sock *icsk = inet_csk(sk);
1622 icsk->icsk_af_ops = &ipv6_specific;
1624 #ifdef CONFIG_TCP_MD5SIG
1625 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1631 static void tcp_v6_destroy_sock(struct sock *sk)
1633 tcp_v4_destroy_sock(sk);
1634 inet6_destroy_sock(sk);
1637 #ifdef CONFIG_PROC_FS
1638 /* Proc filesystem TCPv6 sock list dumping. */
1639 static void get_openreq6(struct seq_file *seq,
1640 struct request_sock *req, int i, kuid_t uid)
1642 long ttd = req->rsk_timer.expires - jiffies;
1643 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1644 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1650 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1651 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1653 src->s6_addr32[0], src->s6_addr32[1],
1654 src->s6_addr32[2], src->s6_addr32[3],
1655 inet_rsk(req)->ir_num,
1656 dest->s6_addr32[0], dest->s6_addr32[1],
1657 dest->s6_addr32[2], dest->s6_addr32[3],
1658 ntohs(inet_rsk(req)->ir_rmt_port),
1660 0, 0, /* could print option size, but that is af dependent. */
1661 1, /* timers active (only the expire timer) */
1662 jiffies_to_clock_t(ttd),
1664 from_kuid_munged(seq_user_ns(seq), uid),
1665 0, /* non standard timer */
1666 0, /* open_requests have no inode */
1670 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1672 const struct in6_addr *dest, *src;
1675 unsigned long timer_expires;
1676 const struct inet_sock *inet = inet_sk(sp);
1677 const struct tcp_sock *tp = tcp_sk(sp);
1678 const struct inet_connection_sock *icsk = inet_csk(sp);
1679 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1681 dest = &sp->sk_v6_daddr;
1682 src = &sp->sk_v6_rcv_saddr;
1683 destp = ntohs(inet->inet_dport);
1684 srcp = ntohs(inet->inet_sport);
1686 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1688 timer_expires = icsk->icsk_timeout;
1689 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1691 timer_expires = icsk->icsk_timeout;
1692 } else if (timer_pending(&sp->sk_timer)) {
1694 timer_expires = sp->sk_timer.expires;
1697 timer_expires = jiffies;
1701 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1702 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1704 src->s6_addr32[0], src->s6_addr32[1],
1705 src->s6_addr32[2], src->s6_addr32[3], srcp,
1706 dest->s6_addr32[0], dest->s6_addr32[1],
1707 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1709 tp->write_seq-tp->snd_una,
1710 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1712 jiffies_delta_to_clock_t(timer_expires - jiffies),
1713 icsk->icsk_retransmits,
1714 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1715 icsk->icsk_probes_out,
1717 atomic_read(&sp->sk_refcnt), sp,
1718 jiffies_to_clock_t(icsk->icsk_rto),
1719 jiffies_to_clock_t(icsk->icsk_ack.ato),
1720 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1722 sp->sk_state == TCP_LISTEN ?
1723 (fastopenq ? fastopenq->max_qlen : 0) :
1724 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1728 static void get_timewait6_sock(struct seq_file *seq,
1729 struct inet_timewait_sock *tw, int i)
1731 long delta = tw->tw_timer.expires - jiffies;
1732 const struct in6_addr *dest, *src;
1735 dest = &tw->tw_v6_daddr;
1736 src = &tw->tw_v6_rcv_saddr;
1737 destp = ntohs(tw->tw_dport);
1738 srcp = ntohs(tw->tw_sport);
1741 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1742 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1744 src->s6_addr32[0], src->s6_addr32[1],
1745 src->s6_addr32[2], src->s6_addr32[3], srcp,
1746 dest->s6_addr32[0], dest->s6_addr32[1],
1747 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1748 tw->tw_substate, 0, 0,
1749 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1750 atomic_read(&tw->tw_refcnt), tw);
1753 static int tcp6_seq_show(struct seq_file *seq, void *v)
1755 struct tcp_iter_state *st;
1756 struct sock *sk = v;
1758 if (v == SEQ_START_TOKEN) {
1763 "st tx_queue rx_queue tr tm->when retrnsmt"
1764 " uid timeout inode\n");
1769 switch (st->state) {
1770 case TCP_SEQ_STATE_LISTENING:
1771 case TCP_SEQ_STATE_ESTABLISHED:
1772 if (sk->sk_state == TCP_TIME_WAIT)
1773 get_timewait6_sock(seq, v, st->num);
1775 get_tcp6_sock(seq, v, st->num);
1777 case TCP_SEQ_STATE_OPENREQ:
1778 get_openreq6(seq, v, st->num, st->uid);
1785 static const struct file_operations tcp6_afinfo_seq_fops = {
1786 .owner = THIS_MODULE,
1787 .open = tcp_seq_open,
1789 .llseek = seq_lseek,
1790 .release = seq_release_net
1793 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1796 .seq_fops = &tcp6_afinfo_seq_fops,
1798 .show = tcp6_seq_show,
1802 int __net_init tcp6_proc_init(struct net *net)
1804 return tcp_proc_register(net, &tcp6_seq_afinfo);
1807 void tcp6_proc_exit(struct net *net)
1809 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1813 static void tcp_v6_clear_sk(struct sock *sk, int size)
1815 struct inet_sock *inet = inet_sk(sk);
1817 /* we do not want to clear pinet6 field, because of RCU lookups */
1818 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1820 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1821 memset(&inet->pinet6 + 1, 0, size);
1824 struct proto tcpv6_prot = {
1826 .owner = THIS_MODULE,
1828 .connect = tcp_v6_connect,
1829 .disconnect = tcp_disconnect,
1830 .accept = inet_csk_accept,
1832 .init = tcp_v6_init_sock,
1833 .destroy = tcp_v6_destroy_sock,
1834 .shutdown = tcp_shutdown,
1835 .setsockopt = tcp_setsockopt,
1836 .getsockopt = tcp_getsockopt,
1837 .recvmsg = tcp_recvmsg,
1838 .sendmsg = tcp_sendmsg,
1839 .sendpage = tcp_sendpage,
1840 .backlog_rcv = tcp_v6_do_rcv,
1841 .release_cb = tcp_release_cb,
1843 .unhash = inet_unhash,
1844 .get_port = inet_csk_get_port,
1845 .enter_memory_pressure = tcp_enter_memory_pressure,
1846 .stream_memory_free = tcp_stream_memory_free,
1847 .sockets_allocated = &tcp_sockets_allocated,
1848 .memory_allocated = &tcp_memory_allocated,
1849 .memory_pressure = &tcp_memory_pressure,
1850 .orphan_count = &tcp_orphan_count,
1851 .sysctl_mem = sysctl_tcp_mem,
1852 .sysctl_wmem = sysctl_tcp_wmem,
1853 .sysctl_rmem = sysctl_tcp_rmem,
1854 .max_header = MAX_TCP_HEADER,
1855 .obj_size = sizeof(struct tcp6_sock),
1856 .slab_flags = SLAB_DESTROY_BY_RCU,
1857 .twsk_prot = &tcp6_timewait_sock_ops,
1858 .rsk_prot = &tcp6_request_sock_ops,
1859 .h.hashinfo = &tcp_hashinfo,
1860 .no_autobind = true,
1861 #ifdef CONFIG_COMPAT
1862 .compat_setsockopt = compat_tcp_setsockopt,
1863 .compat_getsockopt = compat_tcp_getsockopt,
1865 #ifdef CONFIG_MEMCG_KMEM
1866 .proto_cgroup = tcp_proto_cgroup,
1868 .clear_sk = tcp_v6_clear_sk,
1871 static const struct inet6_protocol tcpv6_protocol = {
1872 .early_demux = tcp_v6_early_demux,
1873 .handler = tcp_v6_rcv,
1874 .err_handler = tcp_v6_err,
1875 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1878 static struct inet_protosw tcpv6_protosw = {
1879 .type = SOCK_STREAM,
1880 .protocol = IPPROTO_TCP,
1881 .prot = &tcpv6_prot,
1882 .ops = &inet6_stream_ops,
1883 .flags = INET_PROTOSW_PERMANENT |
1887 static int __net_init tcpv6_net_init(struct net *net)
1889 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1890 SOCK_RAW, IPPROTO_TCP, net);
1893 static void __net_exit tcpv6_net_exit(struct net *net)
1895 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1898 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1900 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1903 static struct pernet_operations tcpv6_net_ops = {
1904 .init = tcpv6_net_init,
1905 .exit = tcpv6_net_exit,
1906 .exit_batch = tcpv6_net_exit_batch,
1909 int __init tcpv6_init(void)
1913 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1917 /* register inet6 protocol */
1918 ret = inet6_register_protosw(&tcpv6_protosw);
1920 goto out_tcpv6_protocol;
1922 ret = register_pernet_subsys(&tcpv6_net_ops);
1924 goto out_tcpv6_protosw;
1929 inet6_unregister_protosw(&tcpv6_protosw);
1931 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1935 void tcpv6_exit(void)
1937 unregister_pernet_subsys(&tcpv6_net_ops);
1938 inet6_unregister_protosw(&tcpv6_protosw);
1939 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);