3 * Linux INET6 implementation
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/tcp_memcontrol.h>
65 #include <net/busy_poll.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 #include <linux/crypto.h>
71 #include <linux/scatterlist.h>
73 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req);
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static const struct inet_connection_sock_af_ops ipv6_mapped;
80 static const struct inet_connection_sock_af_ops ipv6_specific;
81 #ifdef CONFIG_TCP_MD5SIG
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
86 const struct in6_addr *addr)
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
94 struct dst_entry *dst = skb_dst(skb);
97 const struct rt6_info *rt = (const struct rt6_info *)dst;
101 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
103 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
107 static void tcp_v6_hash(struct sock *sk)
109 if (sk->sk_state != TCP_CLOSE) {
110 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
115 __inet6_hash(sk, NULL);
120 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
122 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
123 ipv6_hdr(skb)->saddr.s6_addr32,
125 tcp_hdr(skb)->source);
128 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
131 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
132 struct inet_sock *inet = inet_sk(sk);
133 struct inet_connection_sock *icsk = inet_csk(sk);
134 struct ipv6_pinfo *np = inet6_sk(sk);
135 struct tcp_sock *tp = tcp_sk(sk);
136 struct in6_addr *saddr = NULL, *final_p, final;
139 struct dst_entry *dst;
143 if (addr_len < SIN6_LEN_RFC2133)
146 if (usin->sin6_family != AF_INET6)
147 return -EAFNOSUPPORT;
149 memset(&fl6, 0, sizeof(fl6));
152 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
153 IP6_ECN_flow_init(fl6.flowlabel);
154 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
155 struct ip6_flowlabel *flowlabel;
156 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
157 if (flowlabel == NULL)
159 fl6_sock_release(flowlabel);
164 * connect() to INADDR_ANY means loopback (BSD'ism).
167 if (ipv6_addr_any(&usin->sin6_addr))
168 usin->sin6_addr.s6_addr[15] = 0x1;
170 addr_type = ipv6_addr_type(&usin->sin6_addr);
172 if (addr_type & IPV6_ADDR_MULTICAST)
175 if (addr_type&IPV6_ADDR_LINKLOCAL) {
176 if (addr_len >= sizeof(struct sockaddr_in6) &&
177 usin->sin6_scope_id) {
178 /* If interface is set while binding, indices
181 if (sk->sk_bound_dev_if &&
182 sk->sk_bound_dev_if != usin->sin6_scope_id)
185 sk->sk_bound_dev_if = usin->sin6_scope_id;
188 /* Connect to link-local address requires an interface */
189 if (!sk->sk_bound_dev_if)
193 if (tp->rx_opt.ts_recent_stamp &&
194 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
195 tp->rx_opt.ts_recent = 0;
196 tp->rx_opt.ts_recent_stamp = 0;
200 sk->sk_v6_daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel;
207 if (addr_type == IPV6_ADDR_MAPPED) {
208 u32 exthdrlen = icsk->icsk_ext_hdr_len;
209 struct sockaddr_in sin;
211 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
213 if (__ipv6_only_sock(sk))
216 sin.sin_family = AF_INET;
217 sin.sin_port = usin->sin6_port;
218 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
220 icsk->icsk_af_ops = &ipv6_mapped;
221 sk->sk_backlog_rcv = tcp_v4_do_rcv;
222 #ifdef CONFIG_TCP_MD5SIG
223 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
226 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
229 icsk->icsk_ext_hdr_len = exthdrlen;
230 icsk->icsk_af_ops = &ipv6_specific;
231 sk->sk_backlog_rcv = tcp_v6_do_rcv;
232 #ifdef CONFIG_TCP_MD5SIG
233 tp->af_specific = &tcp_sock_ipv6_specific;
237 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
238 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
239 &sk->sk_v6_rcv_saddr);
245 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
246 saddr = &sk->sk_v6_rcv_saddr;
248 fl6.flowi6_proto = IPPROTO_TCP;
249 fl6.daddr = sk->sk_v6_daddr;
250 fl6.saddr = saddr ? *saddr : np->saddr;
251 fl6.flowi6_oif = sk->sk_bound_dev_if;
252 fl6.flowi6_mark = sk->sk_mark;
253 fl6.fl6_dport = usin->sin6_port;
254 fl6.fl6_sport = inet->inet_sport;
256 final_p = fl6_update_dst(&fl6, np->opt, &final);
258 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
260 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
268 sk->sk_v6_rcv_saddr = *saddr;
271 /* set the source address */
273 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
275 sk->sk_gso_type = SKB_GSO_TCPV6;
276 __ip6_dst_store(sk, dst, NULL, NULL);
278 rt = (struct rt6_info *) dst;
279 if (tcp_death_row.sysctl_tw_recycle &&
280 !tp->rx_opt.ts_recent_stamp &&
281 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
282 tcp_fetch_timewait_stamp(sk, dst);
284 icsk->icsk_ext_hdr_len = 0;
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291 inet->inet_dport = usin->sin6_port;
293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk);
300 if (!tp->write_seq && likely(!tp->repair))
301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 sk->sk_v6_daddr.s6_addr32,
306 err = tcp_connect(sk);
313 tcp_set_state(sk, TCP_CLOSE);
316 inet->inet_dport = 0;
317 sk->sk_route_caps = 0;
321 static void tcp_v6_mtu_reduced(struct sock *sk)
323 struct dst_entry *dst;
325 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
328 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
332 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
333 tcp_sync_mss(sk, dst_mtu(dst));
334 tcp_simple_retransmit(sk);
338 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
339 u8 type, u8 code, int offset, __be32 info)
341 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
342 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
343 struct ipv6_pinfo *np;
347 struct request_sock *fastopen;
349 struct net *net = dev_net(skb->dev);
351 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
352 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
355 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
360 if (sk->sk_state == TCP_TIME_WAIT) {
361 inet_twsk_put(inet_twsk(sk));
366 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
367 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
369 if (sk->sk_state == TCP_CLOSE)
372 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
373 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
378 seq = ntohl(th->seq);
379 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
380 fastopen = tp->fastopen_rsk;
381 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
382 if (sk->sk_state != TCP_LISTEN &&
383 !between(seq, snd_una, tp->snd_nxt)) {
384 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
390 if (type == NDISC_REDIRECT) {
391 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
394 dst->ops->redirect(dst, sk, skb);
398 if (type == ICMPV6_PKT_TOOBIG) {
399 /* We are not interested in TCP_LISTEN and open_requests
400 * (SYN-ACKs send out by Linux are always <576bytes so
401 * they should go through unfragmented).
403 if (sk->sk_state == TCP_LISTEN)
406 if (!ip6_sk_accept_pmtu(sk))
409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
418 icmpv6_err_convert(type, code, &err);
420 /* Might be for an request_sock */
421 switch (sk->sk_state) {
422 struct request_sock *req, **prev;
424 if (sock_owned_by_user(sk))
427 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
428 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
429 &hdr->saddr, inet6_iif(skb));
433 /* ICMPs are not backlogged, hence we cannot get
434 * an established socket here.
436 WARN_ON(req->sk != NULL);
438 if (seq != tcp_rsk(req)->snt_isn) {
439 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
443 inet_csk_reqsk_queue_drop(sk, req, prev);
444 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
449 /* Only in fast or simultaneous open. If a fast open socket is
450 * is already accepted it is treated as a connected one below.
452 if (fastopen && fastopen->sk == NULL)
455 if (!sock_owned_by_user(sk)) {
457 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
461 sk->sk_err_soft = err;
465 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_error_report(sk);
469 sk->sk_err_soft = err;
477 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
479 struct request_sock *req,
481 struct tcp_fastopen_cookie *foc)
483 struct inet_request_sock *ireq = inet_rsk(req);
484 struct ipv6_pinfo *np = inet6_sk(sk);
485 struct flowi6 *fl6 = &fl->u.ip6;
489 /* First, grab a route. */
490 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
493 skb = tcp_make_synack(sk, dst, req, foc);
496 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
497 &ireq->ir_v6_rmt_addr);
499 fl6->daddr = ireq->ir_v6_rmt_addr;
500 if (np->repflow && (ireq->pktopts != NULL))
501 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
503 skb_set_queue_mapping(skb, queue_mapping);
504 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
505 err = net_xmit_eval(err);
513 static void tcp_v6_reqsk_destructor(struct request_sock *req)
515 kfree_skb(inet_rsk(req)->pktopts);
518 #ifdef CONFIG_TCP_MD5SIG
519 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
520 const struct in6_addr *addr)
522 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
525 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
526 struct sock *addr_sk)
528 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
531 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
532 struct request_sock *req)
534 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
537 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
540 struct tcp_md5sig cmd;
541 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
543 if (optlen < sizeof(cmd))
546 if (copy_from_user(&cmd, optval, sizeof(cmd)))
549 if (sin6->sin6_family != AF_INET6)
552 if (!cmd.tcpm_keylen) {
553 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
554 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
556 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
560 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
563 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
564 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
565 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
567 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
568 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
571 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
572 const struct in6_addr *daddr,
573 const struct in6_addr *saddr, int nbytes)
575 struct tcp6_pseudohdr *bp;
576 struct scatterlist sg;
578 bp = &hp->md5_blk.ip6;
579 /* 1. TCP pseudo-header (RFC2460) */
582 bp->protocol = cpu_to_be32(IPPROTO_TCP);
583 bp->len = cpu_to_be32(nbytes);
585 sg_init_one(&sg, bp, sizeof(*bp));
586 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
589 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
590 const struct in6_addr *daddr, struct in6_addr *saddr,
591 const struct tcphdr *th)
593 struct tcp_md5sig_pool *hp;
594 struct hash_desc *desc;
596 hp = tcp_get_md5sig_pool();
598 goto clear_hash_noput;
599 desc = &hp->md5_desc;
601 if (crypto_hash_init(desc))
603 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
605 if (tcp_md5_hash_header(hp, th))
607 if (tcp_md5_hash_key(hp, key))
609 if (crypto_hash_final(desc, md5_hash))
612 tcp_put_md5sig_pool();
616 tcp_put_md5sig_pool();
618 memset(md5_hash, 0, 16);
622 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
623 const struct sock *sk,
624 const struct request_sock *req,
625 const struct sk_buff *skb)
627 const struct in6_addr *saddr, *daddr;
628 struct tcp_md5sig_pool *hp;
629 struct hash_desc *desc;
630 const struct tcphdr *th = tcp_hdr(skb);
633 saddr = &inet6_sk(sk)->saddr;
634 daddr = &sk->sk_v6_daddr;
636 saddr = &inet_rsk(req)->ir_v6_loc_addr;
637 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
639 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
640 saddr = &ip6h->saddr;
641 daddr = &ip6h->daddr;
644 hp = tcp_get_md5sig_pool();
646 goto clear_hash_noput;
647 desc = &hp->md5_desc;
649 if (crypto_hash_init(desc))
652 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
654 if (tcp_md5_hash_header(hp, th))
656 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
658 if (tcp_md5_hash_key(hp, key))
660 if (crypto_hash_final(desc, md5_hash))
663 tcp_put_md5sig_pool();
667 tcp_put_md5sig_pool();
669 memset(md5_hash, 0, 16);
673 static int __tcp_v6_inbound_md5_hash(struct sock *sk,
674 const struct sk_buff *skb)
676 const __u8 *hash_location = NULL;
677 struct tcp_md5sig_key *hash_expected;
678 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
679 const struct tcphdr *th = tcp_hdr(skb);
683 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
684 hash_location = tcp_parse_md5sig_option(th);
686 /* We've parsed the options - do we have a hash? */
687 if (!hash_expected && !hash_location)
690 if (hash_expected && !hash_location) {
691 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
695 if (!hash_expected && hash_location) {
696 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
700 /* check the signature */
701 genhash = tcp_v6_md5_hash_skb(newhash,
705 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
706 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
707 genhash ? "failed" : "mismatch",
708 &ip6h->saddr, ntohs(th->source),
709 &ip6h->daddr, ntohs(th->dest));
715 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
720 ret = __tcp_v6_inbound_md5_hash(sk, skb);
728 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
731 struct inet_request_sock *ireq = inet_rsk(req);
732 struct ipv6_pinfo *np = inet6_sk(sk);
734 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
735 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
737 ireq->ir_iif = sk->sk_bound_dev_if;
739 /* So that link locals have meaning */
740 if (!sk->sk_bound_dev_if &&
741 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
742 ireq->ir_iif = tcp_v6_iif(skb);
744 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
745 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
746 np->rxopt.bits.rxinfo ||
747 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
748 np->rxopt.bits.rxohlim || np->repflow)) {
749 atomic_inc(&skb->users);
754 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
755 const struct request_sock *req,
760 return inet6_csk_route_req(sk, &fl->u.ip6, req);
763 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
765 .obj_size = sizeof(struct tcp6_request_sock),
766 .rtx_syn_ack = tcp_rtx_synack,
767 .send_ack = tcp_v6_reqsk_send_ack,
768 .destructor = tcp_v6_reqsk_destructor,
769 .send_reset = tcp_v6_send_reset,
770 .syn_ack_timeout = tcp_syn_ack_timeout,
773 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
774 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
775 sizeof(struct ipv6hdr),
776 #ifdef CONFIG_TCP_MD5SIG
777 .md5_lookup = tcp_v6_reqsk_md5_lookup,
778 .calc_md5_hash = tcp_v6_md5_hash_skb,
780 .init_req = tcp_v6_init_req,
781 #ifdef CONFIG_SYN_COOKIES
782 .cookie_init_seq = cookie_v6_init_sequence,
784 .route_req = tcp_v6_route_req,
785 .init_seq = tcp_v6_init_sequence,
786 .send_synack = tcp_v6_send_synack,
787 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
790 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
791 u32 ack, u32 win, u32 tsval, u32 tsecr,
792 int oif, struct tcp_md5sig_key *key, int rst,
793 u8 tclass, u32 label)
795 const struct tcphdr *th = tcp_hdr(skb);
797 struct sk_buff *buff;
799 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
800 struct sock *ctl_sk = net->ipv6.tcp_sk;
801 unsigned int tot_len = sizeof(struct tcphdr);
802 struct dst_entry *dst;
806 tot_len += TCPOLEN_TSTAMP_ALIGNED;
807 #ifdef CONFIG_TCP_MD5SIG
809 tot_len += TCPOLEN_MD5SIG_ALIGNED;
812 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
817 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
819 t1 = (struct tcphdr *) skb_push(buff, tot_len);
820 skb_reset_transport_header(buff);
822 /* Swap the send and the receive. */
823 memset(t1, 0, sizeof(*t1));
824 t1->dest = th->source;
825 t1->source = th->dest;
826 t1->doff = tot_len / 4;
827 t1->seq = htonl(seq);
828 t1->ack_seq = htonl(ack);
829 t1->ack = !rst || !th->ack;
831 t1->window = htons(win);
833 topt = (__be32 *)(t1 + 1);
836 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
837 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
838 *topt++ = htonl(tsval);
839 *topt++ = htonl(tsecr);
842 #ifdef CONFIG_TCP_MD5SIG
844 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
845 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
846 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
847 &ipv6_hdr(skb)->saddr,
848 &ipv6_hdr(skb)->daddr, t1);
852 memset(&fl6, 0, sizeof(fl6));
853 fl6.daddr = ipv6_hdr(skb)->saddr;
854 fl6.saddr = ipv6_hdr(skb)->daddr;
855 fl6.flowlabel = label;
857 buff->ip_summed = CHECKSUM_PARTIAL;
860 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
862 fl6.flowi6_proto = IPPROTO_TCP;
863 if (rt6_need_strict(&fl6.daddr) && !oif)
864 fl6.flowi6_oif = tcp_v6_iif(skb);
866 fl6.flowi6_oif = oif;
867 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
868 fl6.fl6_dport = t1->dest;
869 fl6.fl6_sport = t1->source;
870 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
872 /* Pass a socket to ip6_dst_lookup either it is for RST
873 * Underlying function will use this to retrieve the network
876 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
878 skb_dst_set(buff, dst);
879 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
880 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
882 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
889 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
891 const struct tcphdr *th = tcp_hdr(skb);
892 u32 seq = 0, ack_seq = 0;
893 struct tcp_md5sig_key *key = NULL;
894 #ifdef CONFIG_TCP_MD5SIG
895 const __u8 *hash_location = NULL;
896 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
897 unsigned char newhash[16];
899 struct sock *sk1 = NULL;
906 /* If sk not NULL, it means we did a successful lookup and incoming
907 * route had to be correct. prequeue might have dropped our dst.
909 if (!sk && !ipv6_unicast_destination(skb))
912 #ifdef CONFIG_TCP_MD5SIG
913 hash_location = tcp_parse_md5sig_option(th);
914 if (!sk && hash_location) {
916 * active side is lost. Try to find listening socket through
917 * source port, and then find md5 key through listening socket.
918 * we are not loose security here:
919 * Incoming packet is checked with md5 hash with finding key,
920 * no RST generated if md5 hash doesn't match.
922 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
923 &tcp_hashinfo, &ipv6h->saddr,
924 th->source, &ipv6h->daddr,
925 ntohs(th->source), tcp_v6_iif(skb));
930 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
934 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
935 if (genhash || memcmp(hash_location, newhash, 16) != 0)
938 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
943 seq = ntohl(th->ack_seq);
945 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
948 oif = sk ? sk->sk_bound_dev_if : 0;
949 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
951 #ifdef CONFIG_TCP_MD5SIG
960 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
961 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
962 struct tcp_md5sig_key *key, u8 tclass,
965 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
969 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
971 struct inet_timewait_sock *tw = inet_twsk(sk);
972 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
974 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
975 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
976 tcp_time_stamp + tcptw->tw_ts_offset,
977 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
978 tw->tw_tclass, (tw->tw_flowlabel << 12));
983 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
984 struct request_sock *req)
986 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
987 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
989 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
990 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
991 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
992 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
993 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
998 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
1000 struct request_sock *req, **prev;
1001 const struct tcphdr *th = tcp_hdr(skb);
1004 /* Find possible connection requests. */
1005 req = inet6_csk_search_req(sk, &prev, th->source,
1006 &ipv6_hdr(skb)->saddr,
1007 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
1009 return tcp_check_req(sk, skb, req, prev, false);
1011 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1012 &ipv6_hdr(skb)->saddr, th->source,
1013 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
1017 if (nsk->sk_state != TCP_TIME_WAIT) {
1021 inet_twsk_put(inet_twsk(nsk));
1025 #ifdef CONFIG_SYN_COOKIES
1027 sk = cookie_v6_check(sk, skb);
1032 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1034 if (skb->protocol == htons(ETH_P_IP))
1035 return tcp_v4_conn_request(sk, skb);
1037 if (!ipv6_unicast_destination(skb))
1040 return tcp_conn_request(&tcp6_request_sock_ops,
1041 &tcp_request_sock_ipv6_ops, sk, skb);
1044 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1045 return 0; /* don't send reset */
1048 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1049 struct request_sock *req,
1050 struct dst_entry *dst)
1052 struct inet_request_sock *ireq;
1053 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1054 struct tcp6_sock *newtcp6sk;
1055 struct inet_sock *newinet;
1056 struct tcp_sock *newtp;
1058 #ifdef CONFIG_TCP_MD5SIG
1059 struct tcp_md5sig_key *key;
1063 if (skb->protocol == htons(ETH_P_IP)) {
1068 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1073 newtcp6sk = (struct tcp6_sock *)newsk;
1074 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1076 newinet = inet_sk(newsk);
1077 newnp = inet6_sk(newsk);
1078 newtp = tcp_sk(newsk);
1080 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1082 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1084 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1086 newsk->sk_v6_rcv_saddr = newnp->saddr;
1088 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1089 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1090 #ifdef CONFIG_TCP_MD5SIG
1091 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1094 newnp->ipv6_ac_list = NULL;
1095 newnp->ipv6_fl_list = NULL;
1096 newnp->pktoptions = NULL;
1098 newnp->mcast_oif = tcp_v6_iif(skb);
1099 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1100 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1102 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1105 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1106 * here, tcp_create_openreq_child now does this for us, see the comment in
1107 * that function for the gory details. -acme
1110 /* It is tricky place. Until this moment IPv4 tcp
1111 worked with IPv6 icsk.icsk_af_ops.
1114 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1119 ireq = inet_rsk(req);
1121 if (sk_acceptq_is_full(sk))
1125 dst = inet6_csk_route_req(sk, &fl6, req);
1130 newsk = tcp_create_openreq_child(sk, req, skb);
1135 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1136 * count here, tcp_create_openreq_child now does this for us, see the
1137 * comment in that function for the gory details. -acme
1140 newsk->sk_gso_type = SKB_GSO_TCPV6;
1141 __ip6_dst_store(newsk, dst, NULL, NULL);
1142 inet6_sk_rx_dst_set(newsk, skb);
1144 newtcp6sk = (struct tcp6_sock *)newsk;
1145 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1147 newtp = tcp_sk(newsk);
1148 newinet = inet_sk(newsk);
1149 newnp = inet6_sk(newsk);
1151 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1153 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1154 newnp->saddr = ireq->ir_v6_loc_addr;
1155 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1156 newsk->sk_bound_dev_if = ireq->ir_iif;
1158 ip6_set_txhash(newsk);
1160 /* Now IPv6 options...
1162 First: no IPv4 options.
1164 newinet->inet_opt = NULL;
1165 newnp->ipv6_ac_list = NULL;
1166 newnp->ipv6_fl_list = NULL;
1169 newnp->rxopt.all = np->rxopt.all;
1171 /* Clone pktoptions received with SYN */
1172 newnp->pktoptions = NULL;
1173 if (ireq->pktopts != NULL) {
1174 newnp->pktoptions = skb_clone(ireq->pktopts,
1175 sk_gfp_atomic(sk, GFP_ATOMIC));
1176 consume_skb(ireq->pktopts);
1177 ireq->pktopts = NULL;
1178 if (newnp->pktoptions)
1179 skb_set_owner_r(newnp->pktoptions, newsk);
1182 newnp->mcast_oif = tcp_v6_iif(skb);
1183 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1184 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1186 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1188 /* Clone native IPv6 options from listening socket (if any)
1190 Yes, keeping reference count would be much more clever,
1191 but we make one more one thing there: reattach optmem
1195 newnp->opt = ipv6_dup_options(newsk, np->opt);
1197 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1199 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1200 newnp->opt->opt_flen);
1202 tcp_sync_mss(newsk, dst_mtu(dst));
1203 newtp->advmss = dst_metric_advmss(dst);
1204 if (tcp_sk(sk)->rx_opt.user_mss &&
1205 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1206 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1208 tcp_initialize_rcv_mss(newsk);
1210 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1211 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1213 #ifdef CONFIG_TCP_MD5SIG
1214 /* Copy over the MD5 key from the original socket */
1215 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1217 /* We're using one, so create a matching key
1218 * on the newsk structure. If we fail to get
1219 * memory, then we end up not copying the key
1222 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1223 AF_INET6, key->key, key->keylen,
1224 sk_gfp_atomic(sk, GFP_ATOMIC));
1228 if (__inet_inherit_port(sk, newsk) < 0) {
1229 inet_csk_prepare_forced_close(newsk);
1233 __inet6_hash(newsk, NULL);
1238 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1242 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1246 /* The socket must have it's spinlock held when we get
1249 * We have a potential double-lock case here, so even when
1250 * doing backlog processing we use the BH locking scheme.
1251 * This is because we cannot sleep with the original spinlock
1254 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1256 struct ipv6_pinfo *np = inet6_sk(sk);
1257 struct tcp_sock *tp;
1258 struct sk_buff *opt_skb = NULL;
1260 /* Imagine: socket is IPv6. IPv4 packet arrives,
1261 goes to IPv4 receive handler and backlogged.
1262 From backlog it always goes here. Kerboom...
1263 Fortunately, tcp_rcv_established and rcv_established
1264 handle them correctly, but it is not case with
1265 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1268 if (skb->protocol == htons(ETH_P_IP))
1269 return tcp_v4_do_rcv(sk, skb);
1271 if (sk_filter(sk, skb))
1275 * socket locking is here for SMP purposes as backlog rcv
1276 * is currently called with bh processing disabled.
1279 /* Do Stevens' IPV6_PKTOPTIONS.
1281 Yes, guys, it is the only place in our code, where we
1282 may make it not affecting IPv4.
1283 The rest of code is protocol independent,
1284 and I do not like idea to uglify IPv4.
1286 Actually, all the idea behind IPV6_PKTOPTIONS
1287 looks not very well thought. For now we latch
1288 options, received in the last packet, enqueued
1289 by tcp. Feel free to propose better solution.
1293 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1295 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1296 struct dst_entry *dst = sk->sk_rx_dst;
1298 sock_rps_save_rxhash(sk, skb);
1299 sk_mark_napi_id(sk, skb);
1301 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1302 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1304 sk->sk_rx_dst = NULL;
1308 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1310 goto ipv6_pktoptions;
1314 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1317 if (sk->sk_state == TCP_LISTEN) {
1318 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1323 * Queue it on the new socket if the new socket is active,
1324 * otherwise we just shortcircuit this and continue with
1328 sock_rps_save_rxhash(nsk, skb);
1329 sk_mark_napi_id(sk, skb);
1330 if (tcp_child_process(sk, nsk, skb))
1333 __kfree_skb(opt_skb);
1337 sock_rps_save_rxhash(sk, skb);
1339 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1342 goto ipv6_pktoptions;
1346 tcp_v6_send_reset(sk, skb);
1349 __kfree_skb(opt_skb);
1353 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1354 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1359 /* Do you ask, what is it?
1361 1. skb was enqueued by tcp.
1362 2. skb is added to tail of read queue, rather than out of order.
1363 3. socket is not in passive state.
1364 4. Finally, it really contains options, which user wants to receive.
1367 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1368 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1369 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1370 np->mcast_oif = tcp_v6_iif(opt_skb);
1371 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1372 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1373 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1374 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1376 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1377 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1378 skb_set_owner_r(opt_skb, sk);
1379 opt_skb = xchg(&np->pktoptions, opt_skb);
1381 __kfree_skb(opt_skb);
1382 opt_skb = xchg(&np->pktoptions, NULL);
1390 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1391 const struct tcphdr *th)
1393 /* This is tricky: we move IP6CB at its correct location into
1394 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1395 * _decode_session6() uses IP6CB().
1396 * barrier() makes sure compiler won't play aliasing games.
1398 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1399 sizeof(struct inet6_skb_parm));
1402 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1403 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1404 skb->len - th->doff*4);
1405 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1406 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1407 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1408 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1409 TCP_SKB_CB(skb)->sacked = 0;
1412 static int tcp_v6_rcv(struct sk_buff *skb)
1414 const struct tcphdr *th;
1415 const struct ipv6hdr *hdr;
1418 struct net *net = dev_net(skb->dev);
1420 if (skb->pkt_type != PACKET_HOST)
1424 * Count it even if it's bad.
1426 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1428 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1433 if (th->doff < sizeof(struct tcphdr)/4)
1435 if (!pskb_may_pull(skb, th->doff*4))
1438 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1442 hdr = ipv6_hdr(skb);
1444 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
1450 if (sk->sk_state == TCP_TIME_WAIT)
1453 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1454 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1455 goto discard_and_relse;
1458 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1459 goto discard_and_relse;
1461 tcp_v6_fill_cb(skb, hdr, th);
1463 #ifdef CONFIG_TCP_MD5SIG
1464 if (tcp_v6_inbound_md5_hash(sk, skb))
1465 goto discard_and_relse;
1468 if (sk_filter(sk, skb))
1469 goto discard_and_relse;
1471 sk_incoming_cpu_update(sk);
1474 bh_lock_sock_nested(sk);
1476 if (!sock_owned_by_user(sk)) {
1477 if (!tcp_prequeue(sk, skb))
1478 ret = tcp_v6_do_rcv(sk, skb);
1479 } else if (unlikely(sk_add_backlog(sk, skb,
1480 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1482 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1483 goto discard_and_relse;
1488 return ret ? -1 : 0;
1491 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1494 tcp_v6_fill_cb(skb, hdr, th);
1496 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1498 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1500 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1502 tcp_v6_send_reset(NULL, skb);
1514 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1515 inet_twsk_put(inet_twsk(sk));
1519 tcp_v6_fill_cb(skb, hdr, th);
1521 if (skb->len < (th->doff<<2)) {
1522 inet_twsk_put(inet_twsk(sk));
1525 if (tcp_checksum_complete(skb)) {
1526 inet_twsk_put(inet_twsk(sk));
1530 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1535 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1536 &ipv6_hdr(skb)->saddr, th->source,
1537 &ipv6_hdr(skb)->daddr,
1538 ntohs(th->dest), tcp_v6_iif(skb));
1540 struct inet_timewait_sock *tw = inet_twsk(sk);
1541 inet_twsk_deschedule(tw, &tcp_death_row);
1546 /* Fall through to ACK */
1549 tcp_v6_timewait_ack(sk, skb);
1553 case TCP_TW_SUCCESS:
1559 static void tcp_v6_early_demux(struct sk_buff *skb)
1561 const struct ipv6hdr *hdr;
1562 const struct tcphdr *th;
1565 if (skb->pkt_type != PACKET_HOST)
1568 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1571 hdr = ipv6_hdr(skb);
1574 if (th->doff < sizeof(struct tcphdr) / 4)
1577 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1578 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1579 &hdr->saddr, th->source,
1580 &hdr->daddr, ntohs(th->dest),
1584 skb->destructor = sock_edemux;
1585 if (sk->sk_state != TCP_TIME_WAIT) {
1586 struct dst_entry *dst = sk->sk_rx_dst;
1589 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1591 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1592 skb_dst_set_noref(skb, dst);
1597 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1598 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1599 .twsk_unique = tcp_twsk_unique,
1600 .twsk_destructor = tcp_twsk_destructor,
1603 static const struct inet_connection_sock_af_ops ipv6_specific = {
1604 .queue_xmit = inet6_csk_xmit,
1605 .send_check = tcp_v6_send_check,
1606 .rebuild_header = inet6_sk_rebuild_header,
1607 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1608 .conn_request = tcp_v6_conn_request,
1609 .syn_recv_sock = tcp_v6_syn_recv_sock,
1610 .net_header_len = sizeof(struct ipv6hdr),
1611 .net_frag_header_len = sizeof(struct frag_hdr),
1612 .setsockopt = ipv6_setsockopt,
1613 .getsockopt = ipv6_getsockopt,
1614 .addr2sockaddr = inet6_csk_addr2sockaddr,
1615 .sockaddr_len = sizeof(struct sockaddr_in6),
1616 .bind_conflict = inet6_csk_bind_conflict,
1617 #ifdef CONFIG_COMPAT
1618 .compat_setsockopt = compat_ipv6_setsockopt,
1619 .compat_getsockopt = compat_ipv6_getsockopt,
1621 .mtu_reduced = tcp_v6_mtu_reduced,
1624 #ifdef CONFIG_TCP_MD5SIG
1625 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1626 .md5_lookup = tcp_v6_md5_lookup,
1627 .calc_md5_hash = tcp_v6_md5_hash_skb,
1628 .md5_parse = tcp_v6_parse_md5_keys,
1633 * TCP over IPv4 via INET6 API
1635 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1636 .queue_xmit = ip_queue_xmit,
1637 .send_check = tcp_v4_send_check,
1638 .rebuild_header = inet_sk_rebuild_header,
1639 .sk_rx_dst_set = inet_sk_rx_dst_set,
1640 .conn_request = tcp_v6_conn_request,
1641 .syn_recv_sock = tcp_v6_syn_recv_sock,
1642 .net_header_len = sizeof(struct iphdr),
1643 .setsockopt = ipv6_setsockopt,
1644 .getsockopt = ipv6_getsockopt,
1645 .addr2sockaddr = inet6_csk_addr2sockaddr,
1646 .sockaddr_len = sizeof(struct sockaddr_in6),
1647 .bind_conflict = inet6_csk_bind_conflict,
1648 #ifdef CONFIG_COMPAT
1649 .compat_setsockopt = compat_ipv6_setsockopt,
1650 .compat_getsockopt = compat_ipv6_getsockopt,
1652 .mtu_reduced = tcp_v4_mtu_reduced,
1655 #ifdef CONFIG_TCP_MD5SIG
1656 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1657 .md5_lookup = tcp_v4_md5_lookup,
1658 .calc_md5_hash = tcp_v4_md5_hash_skb,
1659 .md5_parse = tcp_v6_parse_md5_keys,
1663 /* NOTE: A lot of things set to zero explicitly by call to
1664 * sk_alloc() so need not be done here.
1666 static int tcp_v6_init_sock(struct sock *sk)
1668 struct inet_connection_sock *icsk = inet_csk(sk);
1672 icsk->icsk_af_ops = &ipv6_specific;
1674 #ifdef CONFIG_TCP_MD5SIG
1675 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1681 static void tcp_v6_destroy_sock(struct sock *sk)
1683 tcp_v4_destroy_sock(sk);
1684 inet6_destroy_sock(sk);
1687 #ifdef CONFIG_PROC_FS
1688 /* Proc filesystem TCPv6 sock list dumping. */
1689 static void get_openreq6(struct seq_file *seq,
1690 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1692 int ttd = req->expires - jiffies;
1693 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1694 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1700 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1701 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1703 src->s6_addr32[0], src->s6_addr32[1],
1704 src->s6_addr32[2], src->s6_addr32[3],
1705 inet_rsk(req)->ir_num,
1706 dest->s6_addr32[0], dest->s6_addr32[1],
1707 dest->s6_addr32[2], dest->s6_addr32[3],
1708 ntohs(inet_rsk(req)->ir_rmt_port),
1710 0, 0, /* could print option size, but that is af dependent. */
1711 1, /* timers active (only the expire timer) */
1712 jiffies_to_clock_t(ttd),
1714 from_kuid_munged(seq_user_ns(seq), uid),
1715 0, /* non standard timer */
1716 0, /* open_requests have no inode */
1720 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1722 const struct in6_addr *dest, *src;
1725 unsigned long timer_expires;
1726 const struct inet_sock *inet = inet_sk(sp);
1727 const struct tcp_sock *tp = tcp_sk(sp);
1728 const struct inet_connection_sock *icsk = inet_csk(sp);
1729 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1731 dest = &sp->sk_v6_daddr;
1732 src = &sp->sk_v6_rcv_saddr;
1733 destp = ntohs(inet->inet_dport);
1734 srcp = ntohs(inet->inet_sport);
1736 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1738 timer_expires = icsk->icsk_timeout;
1739 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1741 timer_expires = icsk->icsk_timeout;
1742 } else if (timer_pending(&sp->sk_timer)) {
1744 timer_expires = sp->sk_timer.expires;
1747 timer_expires = jiffies;
1751 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1752 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1754 src->s6_addr32[0], src->s6_addr32[1],
1755 src->s6_addr32[2], src->s6_addr32[3], srcp,
1756 dest->s6_addr32[0], dest->s6_addr32[1],
1757 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1759 tp->write_seq-tp->snd_una,
1760 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1762 jiffies_delta_to_clock_t(timer_expires - jiffies),
1763 icsk->icsk_retransmits,
1764 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1765 icsk->icsk_probes_out,
1767 atomic_read(&sp->sk_refcnt), sp,
1768 jiffies_to_clock_t(icsk->icsk_rto),
1769 jiffies_to_clock_t(icsk->icsk_ack.ato),
1770 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1772 sp->sk_state == TCP_LISTEN ?
1773 (fastopenq ? fastopenq->max_qlen : 0) :
1774 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1778 static void get_timewait6_sock(struct seq_file *seq,
1779 struct inet_timewait_sock *tw, int i)
1781 const struct in6_addr *dest, *src;
1783 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1785 dest = &tw->tw_v6_daddr;
1786 src = &tw->tw_v6_rcv_saddr;
1787 destp = ntohs(tw->tw_dport);
1788 srcp = ntohs(tw->tw_sport);
1791 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1792 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1794 src->s6_addr32[0], src->s6_addr32[1],
1795 src->s6_addr32[2], src->s6_addr32[3], srcp,
1796 dest->s6_addr32[0], dest->s6_addr32[1],
1797 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1798 tw->tw_substate, 0, 0,
1799 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1800 atomic_read(&tw->tw_refcnt), tw);
1803 static int tcp6_seq_show(struct seq_file *seq, void *v)
1805 struct tcp_iter_state *st;
1806 struct sock *sk = v;
1808 if (v == SEQ_START_TOKEN) {
1813 "st tx_queue rx_queue tr tm->when retrnsmt"
1814 " uid timeout inode\n");
1819 switch (st->state) {
1820 case TCP_SEQ_STATE_LISTENING:
1821 case TCP_SEQ_STATE_ESTABLISHED:
1822 if (sk->sk_state == TCP_TIME_WAIT)
1823 get_timewait6_sock(seq, v, st->num);
1825 get_tcp6_sock(seq, v, st->num);
1827 case TCP_SEQ_STATE_OPENREQ:
1828 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1835 static const struct file_operations tcp6_afinfo_seq_fops = {
1836 .owner = THIS_MODULE,
1837 .open = tcp_seq_open,
1839 .llseek = seq_lseek,
1840 .release = seq_release_net
1843 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1846 .seq_fops = &tcp6_afinfo_seq_fops,
1848 .show = tcp6_seq_show,
1852 int __net_init tcp6_proc_init(struct net *net)
1854 return tcp_proc_register(net, &tcp6_seq_afinfo);
1857 void tcp6_proc_exit(struct net *net)
1859 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1863 static void tcp_v6_clear_sk(struct sock *sk, int size)
1865 struct inet_sock *inet = inet_sk(sk);
1867 /* we do not want to clear pinet6 field, because of RCU lookups */
1868 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1870 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1871 memset(&inet->pinet6 + 1, 0, size);
1874 struct proto tcpv6_prot = {
1876 .owner = THIS_MODULE,
1878 .connect = tcp_v6_connect,
1879 .disconnect = tcp_disconnect,
1880 .accept = inet_csk_accept,
1882 .init = tcp_v6_init_sock,
1883 .destroy = tcp_v6_destroy_sock,
1884 .shutdown = tcp_shutdown,
1885 .setsockopt = tcp_setsockopt,
1886 .getsockopt = tcp_getsockopt,
1887 .recvmsg = tcp_recvmsg,
1888 .sendmsg = tcp_sendmsg,
1889 .sendpage = tcp_sendpage,
1890 .backlog_rcv = tcp_v6_do_rcv,
1891 .release_cb = tcp_release_cb,
1892 .hash = tcp_v6_hash,
1893 .unhash = inet_unhash,
1894 .get_port = inet_csk_get_port,
1895 .enter_memory_pressure = tcp_enter_memory_pressure,
1896 .stream_memory_free = tcp_stream_memory_free,
1897 .sockets_allocated = &tcp_sockets_allocated,
1898 .memory_allocated = &tcp_memory_allocated,
1899 .memory_pressure = &tcp_memory_pressure,
1900 .orphan_count = &tcp_orphan_count,
1901 .sysctl_mem = sysctl_tcp_mem,
1902 .sysctl_wmem = sysctl_tcp_wmem,
1903 .sysctl_rmem = sysctl_tcp_rmem,
1904 .max_header = MAX_TCP_HEADER,
1905 .obj_size = sizeof(struct tcp6_sock),
1906 .slab_flags = SLAB_DESTROY_BY_RCU,
1907 .twsk_prot = &tcp6_timewait_sock_ops,
1908 .rsk_prot = &tcp6_request_sock_ops,
1909 .h.hashinfo = &tcp_hashinfo,
1910 .no_autobind = true,
1911 #ifdef CONFIG_COMPAT
1912 .compat_setsockopt = compat_tcp_setsockopt,
1913 .compat_getsockopt = compat_tcp_getsockopt,
1915 #ifdef CONFIG_MEMCG_KMEM
1916 .proto_cgroup = tcp_proto_cgroup,
1918 .clear_sk = tcp_v6_clear_sk,
1921 static const struct inet6_protocol tcpv6_protocol = {
1922 .early_demux = tcp_v6_early_demux,
1923 .handler = tcp_v6_rcv,
1924 .err_handler = tcp_v6_err,
1925 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1928 static struct inet_protosw tcpv6_protosw = {
1929 .type = SOCK_STREAM,
1930 .protocol = IPPROTO_TCP,
1931 .prot = &tcpv6_prot,
1932 .ops = &inet6_stream_ops,
1933 .flags = INET_PROTOSW_PERMANENT |
1937 static int __net_init tcpv6_net_init(struct net *net)
1939 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1940 SOCK_RAW, IPPROTO_TCP, net);
1943 static void __net_exit tcpv6_net_exit(struct net *net)
1945 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1948 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1950 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1953 static struct pernet_operations tcpv6_net_ops = {
1954 .init = tcpv6_net_init,
1955 .exit = tcpv6_net_exit,
1956 .exit_batch = tcpv6_net_exit_batch,
1959 int __init tcpv6_init(void)
1963 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1967 /* register inet6 protocol */
1968 ret = inet6_register_protosw(&tcpv6_protosw);
1970 goto out_tcpv6_protocol;
1972 ret = register_pernet_subsys(&tcpv6_net_ops);
1974 goto out_tcpv6_protosw;
1979 inet6_unregister_protosw(&tcpv6_protosw);
1981 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1985 void tcpv6_exit(void)
1987 unregister_pernet_subsys(&tcpv6_net_ops);
1988 inet6_unregister_protosw(&tcpv6_protosw);
1989 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);