3 * Linux INET6 implementation
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
66 #include <net/busy_poll.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
80 static const struct inet_connection_sock_af_ops ipv6_mapped;
81 static const struct inet_connection_sock_af_ops ipv6_specific;
82 #ifdef CONFIG_TCP_MD5SIG
83 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
84 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
86 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
87 const struct in6_addr *addr)
93 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95 struct dst_entry *dst = skb_dst(skb);
96 const struct rt6_info *rt = (const struct rt6_info *)dst;
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
102 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
105 static void tcp_v6_hash(struct sock *sk)
107 if (sk->sk_state != TCP_CLOSE) {
108 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
113 __inet6_hash(sk, NULL);
118 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
121 ipv6_hdr(skb)->saddr.s6_addr32,
123 tcp_hdr(skb)->source);
126 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
130 struct inet_sock *inet = inet_sk(sk);
131 struct inet_connection_sock *icsk = inet_csk(sk);
132 struct ipv6_pinfo *np = inet6_sk(sk);
133 struct tcp_sock *tp = tcp_sk(sk);
134 struct in6_addr *saddr = NULL, *final_p, final;
137 struct dst_entry *dst;
141 if (addr_len < SIN6_LEN_RFC2133)
144 if (usin->sin6_family != AF_INET6)
145 return -EAFNOSUPPORT;
147 memset(&fl6, 0, sizeof(fl6));
150 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
151 IP6_ECN_flow_init(fl6.flowlabel);
152 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
153 struct ip6_flowlabel *flowlabel;
154 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
155 if (flowlabel == NULL)
157 fl6_sock_release(flowlabel);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if (ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
170 if (addr_type & IPV6_ADDR_MULTICAST)
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
198 sk->sk_v6_daddr = usin->sin6_addr;
199 np->flow_label = fl6.flowlabel;
205 if (addr_type == IPV6_ADDR_MAPPED) {
206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 struct sockaddr_in sin;
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk))
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 icsk->icsk_af_ops = &ipv6_mapped;
219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
237 &sk->sk_v6_rcv_saddr);
243 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
244 saddr = &sk->sk_v6_rcv_saddr;
246 fl6.flowi6_proto = IPPROTO_TCP;
247 fl6.daddr = sk->sk_v6_daddr;
248 fl6.saddr = saddr ? *saddr : np->saddr;
249 fl6.flowi6_oif = sk->sk_bound_dev_if;
250 fl6.flowi6_mark = sk->sk_mark;
251 fl6.fl6_dport = usin->sin6_port;
252 fl6.fl6_sport = inet->inet_sport;
254 final_p = fl6_update_dst(&fl6, np->opt, &final);
256 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
266 sk->sk_v6_rcv_saddr = *saddr;
269 /* set the source address */
271 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273 sk->sk_gso_type = SKB_GSO_TCPV6;
274 __ip6_dst_store(sk, dst, NULL, NULL);
276 rt = (struct rt6_info *) dst;
277 if (tcp_death_row.sysctl_tw_recycle &&
278 !tp->rx_opt.ts_recent_stamp &&
279 ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
280 tcp_fetch_timewait_stamp(sk, dst);
282 icsk->icsk_ext_hdr_len = 0;
284 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
289 inet->inet_dport = usin->sin6_port;
291 tcp_set_state(sk, TCP_SYN_SENT);
292 err = inet6_hash_connect(&tcp_death_row, sk);
296 if (!tp->write_seq && likely(!tp->repair))
297 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
298 sk->sk_v6_daddr.s6_addr32,
302 err = tcp_connect(sk);
309 tcp_set_state(sk, TCP_CLOSE);
312 inet->inet_dport = 0;
313 sk->sk_route_caps = 0;
317 static void tcp_v6_mtu_reduced(struct sock *sk)
319 struct dst_entry *dst;
321 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
324 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
328 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
329 tcp_sync_mss(sk, dst_mtu(dst));
330 tcp_simple_retransmit(sk);
334 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
335 u8 type, u8 code, int offset, __be32 info)
337 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
338 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
339 struct ipv6_pinfo *np;
343 struct request_sock *fastopen;
345 struct net *net = dev_net(skb->dev);
347 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
348 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
351 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
356 if (sk->sk_state == TCP_TIME_WAIT) {
357 inet_twsk_put(inet_twsk(sk));
362 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
363 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
365 if (sk->sk_state == TCP_CLOSE)
368 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
369 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
374 seq = ntohl(th->seq);
375 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
376 fastopen = tp->fastopen_rsk;
377 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
378 if (sk->sk_state != TCP_LISTEN &&
379 !between(seq, snd_una, tp->snd_nxt)) {
380 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
386 if (type == NDISC_REDIRECT) {
387 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
390 dst->ops->redirect(dst, sk, skb);
394 if (type == ICMPV6_PKT_TOOBIG) {
395 /* We are not interested in TCP_LISTEN and open_requests
396 * (SYN-ACKs send out by Linux are always <576bytes so
397 * they should go through unfragmented).
399 if (sk->sk_state == TCP_LISTEN)
402 if (!ip6_sk_accept_pmtu(sk))
405 tp->mtu_info = ntohl(info);
406 if (!sock_owned_by_user(sk))
407 tcp_v6_mtu_reduced(sk);
408 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
414 icmpv6_err_convert(type, code, &err);
416 /* Might be for an request_sock */
417 switch (sk->sk_state) {
418 struct request_sock *req, **prev;
420 if (sock_owned_by_user(sk))
423 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
424 &hdr->saddr, inet6_iif(skb));
428 /* ICMPs are not backlogged, hence we cannot get
429 * an established socket here.
431 WARN_ON(req->sk != NULL);
433 if (seq != tcp_rsk(req)->snt_isn) {
434 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
438 inet_csk_reqsk_queue_drop(sk, req, prev);
439 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
444 /* Only in fast or simultaneous open. If a fast open socket is
445 * is already accepted it is treated as a connected one below.
447 if (fastopen && fastopen->sk == NULL)
450 if (!sock_owned_by_user(sk)) {
452 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
456 sk->sk_err_soft = err;
460 if (!sock_owned_by_user(sk) && np->recverr) {
462 sk->sk_error_report(sk);
464 sk->sk_err_soft = err;
472 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
474 struct request_sock *req,
476 struct tcp_fastopen_cookie *foc)
478 struct inet_request_sock *ireq = inet_rsk(req);
479 struct ipv6_pinfo *np = inet6_sk(sk);
480 struct flowi6 *fl6 = &fl->u.ip6;
484 /* First, grab a route. */
485 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
488 skb = tcp_make_synack(sk, dst, req, foc);
491 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
492 &ireq->ir_v6_rmt_addr);
494 fl6->daddr = ireq->ir_v6_rmt_addr;
495 if (np->repflow && (ireq->pktopts != NULL))
496 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
498 skb_set_queue_mapping(skb, queue_mapping);
499 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
500 err = net_xmit_eval(err);
501 if (!tcp_rsk(req)->snt_synack && !err)
502 tcp_rsk(req)->snt_synack = tcp_time_stamp;
510 static void tcp_v6_reqsk_destructor(struct request_sock *req)
512 kfree_skb(inet_rsk(req)->pktopts);
515 #ifdef CONFIG_TCP_MD5SIG
516 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
517 const struct in6_addr *addr)
519 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
522 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
523 struct sock *addr_sk)
525 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
528 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
529 struct request_sock *req)
531 return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
534 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
537 struct tcp_md5sig cmd;
538 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
540 if (optlen < sizeof(cmd))
543 if (copy_from_user(&cmd, optval, sizeof(cmd)))
546 if (sin6->sin6_family != AF_INET6)
549 if (!cmd.tcpm_keylen) {
550 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
551 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
553 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
557 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
560 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
561 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
562 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
564 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
565 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
568 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
569 const struct in6_addr *daddr,
570 const struct in6_addr *saddr, int nbytes)
572 struct tcp6_pseudohdr *bp;
573 struct scatterlist sg;
575 bp = &hp->md5_blk.ip6;
576 /* 1. TCP pseudo-header (RFC2460) */
579 bp->protocol = cpu_to_be32(IPPROTO_TCP);
580 bp->len = cpu_to_be32(nbytes);
582 sg_init_one(&sg, bp, sizeof(*bp));
583 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
586 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
587 const struct in6_addr *daddr, struct in6_addr *saddr,
588 const struct tcphdr *th)
590 struct tcp_md5sig_pool *hp;
591 struct hash_desc *desc;
593 hp = tcp_get_md5sig_pool();
595 goto clear_hash_noput;
596 desc = &hp->md5_desc;
598 if (crypto_hash_init(desc))
600 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
602 if (tcp_md5_hash_header(hp, th))
604 if (tcp_md5_hash_key(hp, key))
606 if (crypto_hash_final(desc, md5_hash))
609 tcp_put_md5sig_pool();
613 tcp_put_md5sig_pool();
615 memset(md5_hash, 0, 16);
619 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
620 const struct sock *sk,
621 const struct request_sock *req,
622 const struct sk_buff *skb)
624 const struct in6_addr *saddr, *daddr;
625 struct tcp_md5sig_pool *hp;
626 struct hash_desc *desc;
627 const struct tcphdr *th = tcp_hdr(skb);
630 saddr = &inet6_sk(sk)->saddr;
631 daddr = &sk->sk_v6_daddr;
633 saddr = &inet_rsk(req)->ir_v6_loc_addr;
634 daddr = &inet_rsk(req)->ir_v6_rmt_addr;
636 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
637 saddr = &ip6h->saddr;
638 daddr = &ip6h->daddr;
641 hp = tcp_get_md5sig_pool();
643 goto clear_hash_noput;
644 desc = &hp->md5_desc;
646 if (crypto_hash_init(desc))
649 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
651 if (tcp_md5_hash_header(hp, th))
653 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
655 if (tcp_md5_hash_key(hp, key))
657 if (crypto_hash_final(desc, md5_hash))
660 tcp_put_md5sig_pool();
664 tcp_put_md5sig_pool();
666 memset(md5_hash, 0, 16);
670 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
672 const __u8 *hash_location = NULL;
673 struct tcp_md5sig_key *hash_expected;
674 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
675 const struct tcphdr *th = tcp_hdr(skb);
679 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
680 hash_location = tcp_parse_md5sig_option(th);
682 /* We've parsed the options - do we have a hash? */
683 if (!hash_expected && !hash_location)
686 if (hash_expected && !hash_location) {
687 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
691 if (!hash_expected && hash_location) {
692 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
696 /* check the signature */
697 genhash = tcp_v6_md5_hash_skb(newhash,
701 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
702 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
703 genhash ? "failed" : "mismatch",
704 &ip6h->saddr, ntohs(th->source),
705 &ip6h->daddr, ntohs(th->dest));
712 static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
715 struct inet_request_sock *ireq = inet_rsk(req);
716 struct ipv6_pinfo *np = inet6_sk(sk);
718 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
719 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
721 ireq->ir_iif = sk->sk_bound_dev_if;
723 /* So that link locals have meaning */
724 if (!sk->sk_bound_dev_if &&
725 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
726 ireq->ir_iif = inet6_iif(skb);
728 if (!TCP_SKB_CB(skb)->when &&
729 (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
730 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
731 np->rxopt.bits.rxohlim || np->repflow)) {
732 atomic_inc(&skb->users);
737 static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
738 const struct request_sock *req,
743 return inet6_csk_route_req(sk, &fl->u.ip6, req);
746 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
748 .obj_size = sizeof(struct tcp6_request_sock),
749 .rtx_syn_ack = tcp_rtx_synack,
750 .send_ack = tcp_v6_reqsk_send_ack,
751 .destructor = tcp_v6_reqsk_destructor,
752 .send_reset = tcp_v6_send_reset,
753 .syn_ack_timeout = tcp_syn_ack_timeout,
756 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
757 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
758 sizeof(struct ipv6hdr),
759 #ifdef CONFIG_TCP_MD5SIG
760 .md5_lookup = tcp_v6_reqsk_md5_lookup,
761 .calc_md5_hash = tcp_v6_md5_hash_skb,
763 .init_req = tcp_v6_init_req,
764 #ifdef CONFIG_SYN_COOKIES
765 .cookie_init_seq = cookie_v6_init_sequence,
767 .route_req = tcp_v6_route_req,
768 .init_seq = tcp_v6_init_sequence,
769 .send_synack = tcp_v6_send_synack,
770 .queue_hash_add = inet6_csk_reqsk_queue_hash_add,
773 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
774 u32 tsval, u32 tsecr, int oif,
775 struct tcp_md5sig_key *key, int rst, u8 tclass,
778 const struct tcphdr *th = tcp_hdr(skb);
780 struct sk_buff *buff;
782 struct net *net = dev_net(skb_dst(skb)->dev);
783 struct sock *ctl_sk = net->ipv6.tcp_sk;
784 unsigned int tot_len = sizeof(struct tcphdr);
785 struct dst_entry *dst;
789 tot_len += TCPOLEN_TSTAMP_ALIGNED;
790 #ifdef CONFIG_TCP_MD5SIG
792 tot_len += TCPOLEN_MD5SIG_ALIGNED;
795 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
800 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
802 t1 = (struct tcphdr *) skb_push(buff, tot_len);
803 skb_reset_transport_header(buff);
805 /* Swap the send and the receive. */
806 memset(t1, 0, sizeof(*t1));
807 t1->dest = th->source;
808 t1->source = th->dest;
809 t1->doff = tot_len / 4;
810 t1->seq = htonl(seq);
811 t1->ack_seq = htonl(ack);
812 t1->ack = !rst || !th->ack;
814 t1->window = htons(win);
816 topt = (__be32 *)(t1 + 1);
819 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
820 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
821 *topt++ = htonl(tsval);
822 *topt++ = htonl(tsecr);
825 #ifdef CONFIG_TCP_MD5SIG
827 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
828 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
829 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
830 &ipv6_hdr(skb)->saddr,
831 &ipv6_hdr(skb)->daddr, t1);
835 memset(&fl6, 0, sizeof(fl6));
836 fl6.daddr = ipv6_hdr(skb)->saddr;
837 fl6.saddr = ipv6_hdr(skb)->daddr;
838 fl6.flowlabel = label;
840 buff->ip_summed = CHECKSUM_PARTIAL;
843 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
845 fl6.flowi6_proto = IPPROTO_TCP;
846 if (rt6_need_strict(&fl6.daddr) && !oif)
847 fl6.flowi6_oif = inet6_iif(skb);
849 fl6.flowi6_oif = oif;
850 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
851 fl6.fl6_dport = t1->dest;
852 fl6.fl6_sport = t1->source;
853 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
855 /* Pass a socket to ip6_dst_lookup either it is for RST
856 * Underlying function will use this to retrieve the network
859 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
861 skb_dst_set(buff, dst);
862 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
863 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
865 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
872 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
874 const struct tcphdr *th = tcp_hdr(skb);
875 u32 seq = 0, ack_seq = 0;
876 struct tcp_md5sig_key *key = NULL;
877 #ifdef CONFIG_TCP_MD5SIG
878 const __u8 *hash_location = NULL;
879 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
880 unsigned char newhash[16];
882 struct sock *sk1 = NULL;
889 if (!ipv6_unicast_destination(skb))
892 #ifdef CONFIG_TCP_MD5SIG
893 hash_location = tcp_parse_md5sig_option(th);
894 if (!sk && hash_location) {
896 * active side is lost. Try to find listening socket through
897 * source port, and then find md5 key through listening socket.
898 * we are not loose security here:
899 * Incoming packet is checked with md5 hash with finding key,
900 * no RST generated if md5 hash doesn't match.
902 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
903 &tcp_hashinfo, &ipv6h->saddr,
904 th->source, &ipv6h->daddr,
905 ntohs(th->source), inet6_iif(skb));
910 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
914 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
915 if (genhash || memcmp(hash_location, newhash, 16) != 0)
918 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL;
923 seq = ntohl(th->ack_seq);
925 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
928 oif = sk ? sk->sk_bound_dev_if : 0;
929 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
931 #ifdef CONFIG_TCP_MD5SIG
940 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
941 u32 win, u32 tsval, u32 tsecr, int oif,
942 struct tcp_md5sig_key *key, u8 tclass,
945 tcp_v6_send_response(skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass,
949 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
951 struct inet_timewait_sock *tw = inet_twsk(sk);
952 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
954 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
955 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
956 tcp_time_stamp + tcptw->tw_ts_offset,
957 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
958 tw->tw_tclass, (tw->tw_flowlabel << 12));
963 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
964 struct request_sock *req)
966 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
967 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
969 tcp_v6_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
970 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
971 tcp_rsk(req)->rcv_nxt,
972 req->rcv_wnd, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
973 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
978 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
980 struct request_sock *req, **prev;
981 const struct tcphdr *th = tcp_hdr(skb);
984 /* Find possible connection requests. */
985 req = inet6_csk_search_req(sk, &prev, th->source,
986 &ipv6_hdr(skb)->saddr,
987 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
989 return tcp_check_req(sk, skb, req, prev, false);
991 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
992 &ipv6_hdr(skb)->saddr, th->source,
993 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
996 if (nsk->sk_state != TCP_TIME_WAIT) {
1000 inet_twsk_put(inet_twsk(nsk));
1004 #ifdef CONFIG_SYN_COOKIES
1006 sk = cookie_v6_check(sk, skb);
1011 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1013 if (skb->protocol == htons(ETH_P_IP))
1014 return tcp_v4_conn_request(sk, skb);
1016 if (!ipv6_unicast_destination(skb))
1019 return tcp_conn_request(&tcp6_request_sock_ops,
1020 &tcp_request_sock_ipv6_ops, sk, skb);
1023 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1024 return 0; /* don't send reset */
1027 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1028 struct request_sock *req,
1029 struct dst_entry *dst)
1031 struct inet_request_sock *ireq;
1032 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1033 struct tcp6_sock *newtcp6sk;
1034 struct inet_sock *newinet;
1035 struct tcp_sock *newtp;
1037 #ifdef CONFIG_TCP_MD5SIG
1038 struct tcp_md5sig_key *key;
1042 if (skb->protocol == htons(ETH_P_IP)) {
1047 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1052 newtcp6sk = (struct tcp6_sock *)newsk;
1053 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1055 newinet = inet_sk(newsk);
1056 newnp = inet6_sk(newsk);
1057 newtp = tcp_sk(newsk);
1059 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1061 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1063 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1065 newsk->sk_v6_rcv_saddr = newnp->saddr;
1067 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1068 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1069 #ifdef CONFIG_TCP_MD5SIG
1070 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1073 newnp->ipv6_ac_list = NULL;
1074 newnp->ipv6_fl_list = NULL;
1075 newnp->pktoptions = NULL;
1077 newnp->mcast_oif = inet6_iif(skb);
1078 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1079 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1081 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1084 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1085 * here, tcp_create_openreq_child now does this for us, see the comment in
1086 * that function for the gory details. -acme
1089 /* It is tricky place. Until this moment IPv4 tcp
1090 worked with IPv6 icsk.icsk_af_ops.
1093 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1098 ireq = inet_rsk(req);
1100 if (sk_acceptq_is_full(sk))
1104 dst = inet6_csk_route_req(sk, &fl6, req);
1109 newsk = tcp_create_openreq_child(sk, req, skb);
1114 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1115 * count here, tcp_create_openreq_child now does this for us, see the
1116 * comment in that function for the gory details. -acme
1119 newsk->sk_gso_type = SKB_GSO_TCPV6;
1120 __ip6_dst_store(newsk, dst, NULL, NULL);
1121 inet6_sk_rx_dst_set(newsk, skb);
1123 newtcp6sk = (struct tcp6_sock *)newsk;
1124 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1126 newtp = tcp_sk(newsk);
1127 newinet = inet_sk(newsk);
1128 newnp = inet6_sk(newsk);
1130 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1132 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1133 newnp->saddr = ireq->ir_v6_loc_addr;
1134 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1135 newsk->sk_bound_dev_if = ireq->ir_iif;
1137 /* Now IPv6 options...
1139 First: no IPv4 options.
1141 newinet->inet_opt = NULL;
1142 newnp->ipv6_ac_list = NULL;
1143 newnp->ipv6_fl_list = NULL;
1146 newnp->rxopt.all = np->rxopt.all;
1148 /* Clone pktoptions received with SYN */
1149 newnp->pktoptions = NULL;
1150 if (ireq->pktopts != NULL) {
1151 newnp->pktoptions = skb_clone(ireq->pktopts,
1152 sk_gfp_atomic(sk, GFP_ATOMIC));
1153 consume_skb(ireq->pktopts);
1154 ireq->pktopts = NULL;
1155 if (newnp->pktoptions)
1156 skb_set_owner_r(newnp->pktoptions, newsk);
1159 newnp->mcast_oif = inet6_iif(skb);
1160 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1161 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1163 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1165 /* Clone native IPv6 options from listening socket (if any)
1167 Yes, keeping reference count would be much more clever,
1168 but we make one more one thing there: reattach optmem
1172 newnp->opt = ipv6_dup_options(newsk, np->opt);
1174 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1176 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1177 newnp->opt->opt_flen);
1179 tcp_sync_mss(newsk, dst_mtu(dst));
1180 newtp->advmss = dst_metric_advmss(dst);
1181 if (tcp_sk(sk)->rx_opt.user_mss &&
1182 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1183 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1185 tcp_initialize_rcv_mss(newsk);
1187 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1188 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1190 #ifdef CONFIG_TCP_MD5SIG
1191 /* Copy over the MD5 key from the original socket */
1192 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1194 /* We're using one, so create a matching key
1195 * on the newsk structure. If we fail to get
1196 * memory, then we end up not copying the key
1199 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1200 AF_INET6, key->key, key->keylen,
1201 sk_gfp_atomic(sk, GFP_ATOMIC));
1205 if (__inet_inherit_port(sk, newsk) < 0) {
1206 inet_csk_prepare_forced_close(newsk);
1210 __inet6_hash(newsk, NULL);
1215 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1219 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1223 /* The socket must have it's spinlock held when we get
1226 * We have a potential double-lock case here, so even when
1227 * doing backlog processing we use the BH locking scheme.
1228 * This is because we cannot sleep with the original spinlock
1231 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1233 struct ipv6_pinfo *np = inet6_sk(sk);
1234 struct tcp_sock *tp;
1235 struct sk_buff *opt_skb = NULL;
1237 /* Imagine: socket is IPv6. IPv4 packet arrives,
1238 goes to IPv4 receive handler and backlogged.
1239 From backlog it always goes here. Kerboom...
1240 Fortunately, tcp_rcv_established and rcv_established
1241 handle them correctly, but it is not case with
1242 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1245 if (skb->protocol == htons(ETH_P_IP))
1246 return tcp_v4_do_rcv(sk, skb);
1248 #ifdef CONFIG_TCP_MD5SIG
1249 if (tcp_v6_inbound_md5_hash(sk, skb))
1253 if (sk_filter(sk, skb))
1257 * socket locking is here for SMP purposes as backlog rcv
1258 * is currently called with bh processing disabled.
1261 /* Do Stevens' IPV6_PKTOPTIONS.
1263 Yes, guys, it is the only place in our code, where we
1264 may make it not affecting IPv4.
1265 The rest of code is protocol independent,
1266 and I do not like idea to uglify IPv4.
1268 Actually, all the idea behind IPV6_PKTOPTIONS
1269 looks not very well thought. For now we latch
1270 options, received in the last packet, enqueued
1271 by tcp. Feel free to propose better solution.
1275 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1277 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1278 struct dst_entry *dst = sk->sk_rx_dst;
1280 sock_rps_save_rxhash(sk, skb);
1282 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1283 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1285 sk->sk_rx_dst = NULL;
1289 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1291 goto ipv6_pktoptions;
1295 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1298 if (sk->sk_state == TCP_LISTEN) {
1299 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1304 * Queue it on the new socket if the new socket is active,
1305 * otherwise we just shortcircuit this and continue with
1309 sock_rps_save_rxhash(nsk, skb);
1310 if (tcp_child_process(sk, nsk, skb))
1313 __kfree_skb(opt_skb);
1317 sock_rps_save_rxhash(sk, skb);
1319 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1322 goto ipv6_pktoptions;
1326 tcp_v6_send_reset(sk, skb);
1329 __kfree_skb(opt_skb);
1333 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1334 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1339 /* Do you ask, what is it?
1341 1. skb was enqueued by tcp.
1342 2. skb is added to tail of read queue, rather than out of order.
1343 3. socket is not in passive state.
1344 4. Finally, it really contains options, which user wants to receive.
1347 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1348 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1349 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1350 np->mcast_oif = inet6_iif(opt_skb);
1351 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1352 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1353 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1354 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1356 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1357 if (ipv6_opt_accepted(sk, opt_skb)) {
1358 skb_set_owner_r(opt_skb, sk);
1359 opt_skb = xchg(&np->pktoptions, opt_skb);
1361 __kfree_skb(opt_skb);
1362 opt_skb = xchg(&np->pktoptions, NULL);
1370 static int tcp_v6_rcv(struct sk_buff *skb)
1372 const struct tcphdr *th;
1373 const struct ipv6hdr *hdr;
1376 struct net *net = dev_net(skb->dev);
1378 if (skb->pkt_type != PACKET_HOST)
1382 * Count it even if it's bad.
1384 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1386 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1391 if (th->doff < sizeof(struct tcphdr)/4)
1393 if (!pskb_may_pull(skb, th->doff*4))
1396 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1400 hdr = ipv6_hdr(skb);
1401 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1402 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1403 skb->len - th->doff*4);
1404 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1405 TCP_SKB_CB(skb)->when = 0;
1406 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1407 TCP_SKB_CB(skb)->sacked = 0;
1409 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1414 if (sk->sk_state == TCP_TIME_WAIT)
1417 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1418 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1419 goto discard_and_relse;
1422 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1423 goto discard_and_relse;
1425 if (sk_filter(sk, skb))
1426 goto discard_and_relse;
1428 sk_mark_napi_id(sk, skb);
1431 bh_lock_sock_nested(sk);
1433 if (!sock_owned_by_user(sk)) {
1434 #ifdef CONFIG_NET_DMA
1435 struct tcp_sock *tp = tcp_sk(sk);
1436 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1437 tp->ucopy.dma_chan = net_dma_find_channel();
1438 if (tp->ucopy.dma_chan)
1439 ret = tcp_v6_do_rcv(sk, skb);
1443 if (!tcp_prequeue(sk, skb))
1444 ret = tcp_v6_do_rcv(sk, skb);
1446 } else if (unlikely(sk_add_backlog(sk, skb,
1447 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1449 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1450 goto discard_and_relse;
1455 return ret ? -1 : 0;
1458 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1461 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1463 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1465 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1467 tcp_v6_send_reset(NULL, skb);
1479 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1480 inet_twsk_put(inet_twsk(sk));
1484 if (skb->len < (th->doff<<2)) {
1485 inet_twsk_put(inet_twsk(sk));
1488 if (tcp_checksum_complete(skb)) {
1489 inet_twsk_put(inet_twsk(sk));
1493 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1498 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1499 &ipv6_hdr(skb)->saddr, th->source,
1500 &ipv6_hdr(skb)->daddr,
1501 ntohs(th->dest), inet6_iif(skb));
1503 struct inet_timewait_sock *tw = inet_twsk(sk);
1504 inet_twsk_deschedule(tw, &tcp_death_row);
1509 /* Fall through to ACK */
1512 tcp_v6_timewait_ack(sk, skb);
1516 case TCP_TW_SUCCESS:
1522 static void tcp_v6_early_demux(struct sk_buff *skb)
1524 const struct ipv6hdr *hdr;
1525 const struct tcphdr *th;
1528 if (skb->pkt_type != PACKET_HOST)
1531 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1534 hdr = ipv6_hdr(skb);
1537 if (th->doff < sizeof(struct tcphdr) / 4)
1540 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1541 &hdr->saddr, th->source,
1542 &hdr->daddr, ntohs(th->dest),
1546 skb->destructor = sock_edemux;
1547 if (sk->sk_state != TCP_TIME_WAIT) {
1548 struct dst_entry *dst = sk->sk_rx_dst;
1551 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1553 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1554 skb_dst_set_noref(skb, dst);
1559 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1560 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1561 .twsk_unique = tcp_twsk_unique,
1562 .twsk_destructor = tcp_twsk_destructor,
1565 static const struct inet_connection_sock_af_ops ipv6_specific = {
1566 .queue_xmit = inet6_csk_xmit,
1567 .send_check = tcp_v6_send_check,
1568 .rebuild_header = inet6_sk_rebuild_header,
1569 .sk_rx_dst_set = inet6_sk_rx_dst_set,
1570 .conn_request = tcp_v6_conn_request,
1571 .syn_recv_sock = tcp_v6_syn_recv_sock,
1572 .net_header_len = sizeof(struct ipv6hdr),
1573 .net_frag_header_len = sizeof(struct frag_hdr),
1574 .setsockopt = ipv6_setsockopt,
1575 .getsockopt = ipv6_getsockopt,
1576 .addr2sockaddr = inet6_csk_addr2sockaddr,
1577 .sockaddr_len = sizeof(struct sockaddr_in6),
1578 .bind_conflict = inet6_csk_bind_conflict,
1579 #ifdef CONFIG_COMPAT
1580 .compat_setsockopt = compat_ipv6_setsockopt,
1581 .compat_getsockopt = compat_ipv6_getsockopt,
1585 #ifdef CONFIG_TCP_MD5SIG
1586 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1587 .md5_lookup = tcp_v6_md5_lookup,
1588 .calc_md5_hash = tcp_v6_md5_hash_skb,
1589 .md5_parse = tcp_v6_parse_md5_keys,
1594 * TCP over IPv4 via INET6 API
1596 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1597 .queue_xmit = ip_queue_xmit,
1598 .send_check = tcp_v4_send_check,
1599 .rebuild_header = inet_sk_rebuild_header,
1600 .sk_rx_dst_set = inet_sk_rx_dst_set,
1601 .conn_request = tcp_v6_conn_request,
1602 .syn_recv_sock = tcp_v6_syn_recv_sock,
1603 .net_header_len = sizeof(struct iphdr),
1604 .setsockopt = ipv6_setsockopt,
1605 .getsockopt = ipv6_getsockopt,
1606 .addr2sockaddr = inet6_csk_addr2sockaddr,
1607 .sockaddr_len = sizeof(struct sockaddr_in6),
1608 .bind_conflict = inet6_csk_bind_conflict,
1609 #ifdef CONFIG_COMPAT
1610 .compat_setsockopt = compat_ipv6_setsockopt,
1611 .compat_getsockopt = compat_ipv6_getsockopt,
1615 #ifdef CONFIG_TCP_MD5SIG
1616 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1617 .md5_lookup = tcp_v4_md5_lookup,
1618 .calc_md5_hash = tcp_v4_md5_hash_skb,
1619 .md5_parse = tcp_v6_parse_md5_keys,
1623 /* NOTE: A lot of things set to zero explicitly by call to
1624 * sk_alloc() so need not be done here.
1626 static int tcp_v6_init_sock(struct sock *sk)
1628 struct inet_connection_sock *icsk = inet_csk(sk);
1632 icsk->icsk_af_ops = &ipv6_specific;
1634 #ifdef CONFIG_TCP_MD5SIG
1635 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1641 static void tcp_v6_destroy_sock(struct sock *sk)
1643 tcp_v4_destroy_sock(sk);
1644 inet6_destroy_sock(sk);
1647 #ifdef CONFIG_PROC_FS
1648 /* Proc filesystem TCPv6 sock list dumping. */
1649 static void get_openreq6(struct seq_file *seq,
1650 const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1652 int ttd = req->expires - jiffies;
1653 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1654 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1660 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1661 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1663 src->s6_addr32[0], src->s6_addr32[1],
1664 src->s6_addr32[2], src->s6_addr32[3],
1665 inet_rsk(req)->ir_num,
1666 dest->s6_addr32[0], dest->s6_addr32[1],
1667 dest->s6_addr32[2], dest->s6_addr32[3],
1668 ntohs(inet_rsk(req)->ir_rmt_port),
1670 0, 0, /* could print option size, but that is af dependent. */
1671 1, /* timers active (only the expire timer) */
1672 jiffies_to_clock_t(ttd),
1674 from_kuid_munged(seq_user_ns(seq), uid),
1675 0, /* non standard timer */
1676 0, /* open_requests have no inode */
1680 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1682 const struct in6_addr *dest, *src;
1685 unsigned long timer_expires;
1686 const struct inet_sock *inet = inet_sk(sp);
1687 const struct tcp_sock *tp = tcp_sk(sp);
1688 const struct inet_connection_sock *icsk = inet_csk(sp);
1689 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1691 dest = &sp->sk_v6_daddr;
1692 src = &sp->sk_v6_rcv_saddr;
1693 destp = ntohs(inet->inet_dport);
1694 srcp = ntohs(inet->inet_sport);
1696 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1698 timer_expires = icsk->icsk_timeout;
1699 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1701 timer_expires = icsk->icsk_timeout;
1702 } else if (timer_pending(&sp->sk_timer)) {
1704 timer_expires = sp->sk_timer.expires;
1707 timer_expires = jiffies;
1711 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1712 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1714 src->s6_addr32[0], src->s6_addr32[1],
1715 src->s6_addr32[2], src->s6_addr32[3], srcp,
1716 dest->s6_addr32[0], dest->s6_addr32[1],
1717 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1719 tp->write_seq-tp->snd_una,
1720 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1722 jiffies_delta_to_clock_t(timer_expires - jiffies),
1723 icsk->icsk_retransmits,
1724 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1725 icsk->icsk_probes_out,
1727 atomic_read(&sp->sk_refcnt), sp,
1728 jiffies_to_clock_t(icsk->icsk_rto),
1729 jiffies_to_clock_t(icsk->icsk_ack.ato),
1730 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1732 sp->sk_state == TCP_LISTEN ?
1733 (fastopenq ? fastopenq->max_qlen : 0) :
1734 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1738 static void get_timewait6_sock(struct seq_file *seq,
1739 struct inet_timewait_sock *tw, int i)
1741 const struct in6_addr *dest, *src;
1743 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1745 dest = &tw->tw_v6_daddr;
1746 src = &tw->tw_v6_rcv_saddr;
1747 destp = ntohs(tw->tw_dport);
1748 srcp = ntohs(tw->tw_sport);
1751 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1752 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1754 src->s6_addr32[0], src->s6_addr32[1],
1755 src->s6_addr32[2], src->s6_addr32[3], srcp,
1756 dest->s6_addr32[0], dest->s6_addr32[1],
1757 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1758 tw->tw_substate, 0, 0,
1759 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1760 atomic_read(&tw->tw_refcnt), tw);
1763 static int tcp6_seq_show(struct seq_file *seq, void *v)
1765 struct tcp_iter_state *st;
1766 struct sock *sk = v;
1768 if (v == SEQ_START_TOKEN) {
1773 "st tx_queue rx_queue tr tm->when retrnsmt"
1774 " uid timeout inode\n");
1779 switch (st->state) {
1780 case TCP_SEQ_STATE_LISTENING:
1781 case TCP_SEQ_STATE_ESTABLISHED:
1782 if (sk->sk_state == TCP_TIME_WAIT)
1783 get_timewait6_sock(seq, v, st->num);
1785 get_tcp6_sock(seq, v, st->num);
1787 case TCP_SEQ_STATE_OPENREQ:
1788 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1795 static const struct file_operations tcp6_afinfo_seq_fops = {
1796 .owner = THIS_MODULE,
1797 .open = tcp_seq_open,
1799 .llseek = seq_lseek,
1800 .release = seq_release_net
1803 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1806 .seq_fops = &tcp6_afinfo_seq_fops,
1808 .show = tcp6_seq_show,
1812 int __net_init tcp6_proc_init(struct net *net)
1814 return tcp_proc_register(net, &tcp6_seq_afinfo);
1817 void tcp6_proc_exit(struct net *net)
1819 tcp_proc_unregister(net, &tcp6_seq_afinfo);
1823 static void tcp_v6_clear_sk(struct sock *sk, int size)
1825 struct inet_sock *inet = inet_sk(sk);
1827 /* we do not want to clear pinet6 field, because of RCU lookups */
1828 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1830 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1831 memset(&inet->pinet6 + 1, 0, size);
1834 struct proto tcpv6_prot = {
1836 .owner = THIS_MODULE,
1838 .connect = tcp_v6_connect,
1839 .disconnect = tcp_disconnect,
1840 .accept = inet_csk_accept,
1842 .init = tcp_v6_init_sock,
1843 .destroy = tcp_v6_destroy_sock,
1844 .shutdown = tcp_shutdown,
1845 .setsockopt = tcp_setsockopt,
1846 .getsockopt = tcp_getsockopt,
1847 .recvmsg = tcp_recvmsg,
1848 .sendmsg = tcp_sendmsg,
1849 .sendpage = tcp_sendpage,
1850 .backlog_rcv = tcp_v6_do_rcv,
1851 .release_cb = tcp_release_cb,
1852 .mtu_reduced = tcp_v6_mtu_reduced,
1853 .hash = tcp_v6_hash,
1854 .unhash = inet_unhash,
1855 .get_port = inet_csk_get_port,
1856 .enter_memory_pressure = tcp_enter_memory_pressure,
1857 .stream_memory_free = tcp_stream_memory_free,
1858 .sockets_allocated = &tcp_sockets_allocated,
1859 .memory_allocated = &tcp_memory_allocated,
1860 .memory_pressure = &tcp_memory_pressure,
1861 .orphan_count = &tcp_orphan_count,
1862 .sysctl_mem = sysctl_tcp_mem,
1863 .sysctl_wmem = sysctl_tcp_wmem,
1864 .sysctl_rmem = sysctl_tcp_rmem,
1865 .max_header = MAX_TCP_HEADER,
1866 .obj_size = sizeof(struct tcp6_sock),
1867 .slab_flags = SLAB_DESTROY_BY_RCU,
1868 .twsk_prot = &tcp6_timewait_sock_ops,
1869 .rsk_prot = &tcp6_request_sock_ops,
1870 .h.hashinfo = &tcp_hashinfo,
1871 .no_autobind = true,
1872 #ifdef CONFIG_COMPAT
1873 .compat_setsockopt = compat_tcp_setsockopt,
1874 .compat_getsockopt = compat_tcp_getsockopt,
1876 #ifdef CONFIG_MEMCG_KMEM
1877 .proto_cgroup = tcp_proto_cgroup,
1879 .clear_sk = tcp_v6_clear_sk,
1882 static const struct inet6_protocol tcpv6_protocol = {
1883 .early_demux = tcp_v6_early_demux,
1884 .handler = tcp_v6_rcv,
1885 .err_handler = tcp_v6_err,
1886 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1889 static struct inet_protosw tcpv6_protosw = {
1890 .type = SOCK_STREAM,
1891 .protocol = IPPROTO_TCP,
1892 .prot = &tcpv6_prot,
1893 .ops = &inet6_stream_ops,
1894 .flags = INET_PROTOSW_PERMANENT |
1898 static int __net_init tcpv6_net_init(struct net *net)
1900 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1901 SOCK_RAW, IPPROTO_TCP, net);
1904 static void __net_exit tcpv6_net_exit(struct net *net)
1906 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1909 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1911 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1914 static struct pernet_operations tcpv6_net_ops = {
1915 .init = tcpv6_net_init,
1916 .exit = tcpv6_net_exit,
1917 .exit_batch = tcpv6_net_exit_batch,
1920 int __init tcpv6_init(void)
1924 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1928 /* register inet6 protocol */
1929 ret = inet6_register_protosw(&tcpv6_protosw);
1931 goto out_tcpv6_protocol;
1933 ret = register_pernet_subsys(&tcpv6_net_ops);
1935 goto out_tcpv6_protosw;
1940 inet6_unregister_protosw(&tcpv6_protosw);
1942 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1946 void tcpv6_exit(void)
1948 unregister_pernet_subsys(&tcpv6_net_ops);
1949 inet6_unregister_protosw(&tcpv6_protosw);
1950 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);