]> Git Repo - linux.git/blob - net/ipv6/tcp_ipv6.c
net: busy-poll: return busypolling status to drivers
[linux.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <[email protected]>
7  *
8  *      Based on:
9  *      linux/net/ipv4/tcp.c
10  *      linux/net/ipv4/tcp_input.c
11  *      linux/net/ipv4/tcp_output.c
12  *
13  *      Fixes:
14  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
15  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
16  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
17  *                                      a single port at the same time.
18  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
19  *
20  *      This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 #include <linux/uaccess.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/inet_common.h>
63 #include <net/secure_seq.h>
64 #include <net/busy_poll.h>
65
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
68
69 #include <crypto/hash.h>
70 #include <linux/scatterlist.h>
71
72 static void     tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73 static void     tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
74                                       struct request_sock *req);
75
76 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
77
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
85                                                    const struct in6_addr *addr)
86 {
87         return NULL;
88 }
89 #endif
90
91 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92 {
93         struct dst_entry *dst = skb_dst(skb);
94
95         if (dst && dst_hold_safe(dst)) {
96                 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
98                 sk->sk_rx_dst = dst;
99                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
100                 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
101         }
102 }
103
104 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
105 {
106         return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
107                                             ipv6_hdr(skb)->saddr.s6_addr32,
108                                             tcp_hdr(skb)->dest,
109                                             tcp_hdr(skb)->source);
110 }
111
112 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
113                           int addr_len)
114 {
115         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
116         struct inet_sock *inet = inet_sk(sk);
117         struct inet_connection_sock *icsk = inet_csk(sk);
118         struct ipv6_pinfo *np = inet6_sk(sk);
119         struct tcp_sock *tp = tcp_sk(sk);
120         struct in6_addr *saddr = NULL, *final_p, final;
121         struct ipv6_txoptions *opt;
122         struct flowi6 fl6;
123         struct dst_entry *dst;
124         int addr_type;
125         int err;
126
127         if (addr_len < SIN6_LEN_RFC2133)
128                 return -EINVAL;
129
130         if (usin->sin6_family != AF_INET6)
131                 return -EAFNOSUPPORT;
132
133         memset(&fl6, 0, sizeof(fl6));
134
135         if (np->sndflow) {
136                 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
137                 IP6_ECN_flow_init(fl6.flowlabel);
138                 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
139                         struct ip6_flowlabel *flowlabel;
140                         flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
141                         if (!flowlabel)
142                                 return -EINVAL;
143                         fl6_sock_release(flowlabel);
144                 }
145         }
146
147         /*
148          *      connect() to INADDR_ANY means loopback (BSD'ism).
149          */
150
151         if (ipv6_addr_any(&usin->sin6_addr))
152                 usin->sin6_addr.s6_addr[15] = 0x1;
153
154         addr_type = ipv6_addr_type(&usin->sin6_addr);
155
156         if (addr_type & IPV6_ADDR_MULTICAST)
157                 return -ENETUNREACH;
158
159         if (addr_type&IPV6_ADDR_LINKLOCAL) {
160                 if (addr_len >= sizeof(struct sockaddr_in6) &&
161                     usin->sin6_scope_id) {
162                         /* If interface is set while binding, indices
163                          * must coincide.
164                          */
165                         if (sk->sk_bound_dev_if &&
166                             sk->sk_bound_dev_if != usin->sin6_scope_id)
167                                 return -EINVAL;
168
169                         sk->sk_bound_dev_if = usin->sin6_scope_id;
170                 }
171
172                 /* Connect to link-local address requires an interface */
173                 if (!sk->sk_bound_dev_if)
174                         return -EINVAL;
175         }
176
177         if (tp->rx_opt.ts_recent_stamp &&
178             !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
179                 tp->rx_opt.ts_recent = 0;
180                 tp->rx_opt.ts_recent_stamp = 0;
181                 tp->write_seq = 0;
182         }
183
184         sk->sk_v6_daddr = usin->sin6_addr;
185         np->flow_label = fl6.flowlabel;
186
187         /*
188          *      TCP over IPv4
189          */
190
191         if (addr_type == IPV6_ADDR_MAPPED) {
192                 u32 exthdrlen = icsk->icsk_ext_hdr_len;
193                 struct sockaddr_in sin;
194
195                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
196
197                 if (__ipv6_only_sock(sk))
198                         return -ENETUNREACH;
199
200                 sin.sin_family = AF_INET;
201                 sin.sin_port = usin->sin6_port;
202                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
203
204                 icsk->icsk_af_ops = &ipv6_mapped;
205                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
206 #ifdef CONFIG_TCP_MD5SIG
207                 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
208 #endif
209
210                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
211
212                 if (err) {
213                         icsk->icsk_ext_hdr_len = exthdrlen;
214                         icsk->icsk_af_ops = &ipv6_specific;
215                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
216 #ifdef CONFIG_TCP_MD5SIG
217                         tp->af_specific = &tcp_sock_ipv6_specific;
218 #endif
219                         goto failure;
220                 }
221                 np->saddr = sk->sk_v6_rcv_saddr;
222
223                 return err;
224         }
225
226         if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
227                 saddr = &sk->sk_v6_rcv_saddr;
228
229         fl6.flowi6_proto = IPPROTO_TCP;
230         fl6.daddr = sk->sk_v6_daddr;
231         fl6.saddr = saddr ? *saddr : np->saddr;
232         fl6.flowi6_oif = sk->sk_bound_dev_if;
233         fl6.flowi6_mark = sk->sk_mark;
234         fl6.fl6_dport = usin->sin6_port;
235         fl6.fl6_sport = inet->inet_sport;
236         fl6.flowi6_uid = sk->sk_uid;
237
238         opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
239         final_p = fl6_update_dst(&fl6, opt, &final);
240
241         security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
242
243         dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
244         if (IS_ERR(dst)) {
245                 err = PTR_ERR(dst);
246                 goto failure;
247         }
248
249         if (!saddr) {
250                 saddr = &fl6.saddr;
251                 sk->sk_v6_rcv_saddr = *saddr;
252         }
253
254         /* set the source address */
255         np->saddr = *saddr;
256         inet->inet_rcv_saddr = LOOPBACK4_IPV6;
257
258         sk->sk_gso_type = SKB_GSO_TCPV6;
259         ip6_dst_store(sk, dst, NULL, NULL);
260
261         if (tcp_death_row.sysctl_tw_recycle &&
262             !tp->rx_opt.ts_recent_stamp &&
263             ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
264                 tcp_fetch_timewait_stamp(sk, dst);
265
266         icsk->icsk_ext_hdr_len = 0;
267         if (opt)
268                 icsk->icsk_ext_hdr_len = opt->opt_flen +
269                                          opt->opt_nflen;
270
271         tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
272
273         inet->inet_dport = usin->sin6_port;
274
275         tcp_set_state(sk, TCP_SYN_SENT);
276         err = inet6_hash_connect(&tcp_death_row, sk);
277         if (err)
278                 goto late_failure;
279
280         sk_set_txhash(sk);
281
282         if (!tp->write_seq && likely(!tp->repair))
283                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
284                                                              sk->sk_v6_daddr.s6_addr32,
285                                                              inet->inet_sport,
286                                                              inet->inet_dport);
287
288         err = tcp_connect(sk);
289         if (err)
290                 goto late_failure;
291
292         return 0;
293
294 late_failure:
295         tcp_set_state(sk, TCP_CLOSE);
296         __sk_dst_reset(sk);
297 failure:
298         inet->inet_dport = 0;
299         sk->sk_route_caps = 0;
300         return err;
301 }
302
303 static void tcp_v6_mtu_reduced(struct sock *sk)
304 {
305         struct dst_entry *dst;
306
307         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
308                 return;
309
310         dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
311         if (!dst)
312                 return;
313
314         if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
315                 tcp_sync_mss(sk, dst_mtu(dst));
316                 tcp_simple_retransmit(sk);
317         }
318 }
319
320 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
321                 u8 type, u8 code, int offset, __be32 info)
322 {
323         const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
324         const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
325         struct net *net = dev_net(skb->dev);
326         struct request_sock *fastopen;
327         struct ipv6_pinfo *np;
328         struct tcp_sock *tp;
329         __u32 seq, snd_una;
330         struct sock *sk;
331         bool fatal;
332         int err;
333
334         sk = __inet6_lookup_established(net, &tcp_hashinfo,
335                                         &hdr->daddr, th->dest,
336                                         &hdr->saddr, ntohs(th->source),
337                                         skb->dev->ifindex);
338
339         if (!sk) {
340                 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
341                                   ICMP6_MIB_INERRORS);
342                 return;
343         }
344
345         if (sk->sk_state == TCP_TIME_WAIT) {
346                 inet_twsk_put(inet_twsk(sk));
347                 return;
348         }
349         seq = ntohl(th->seq);
350         fatal = icmpv6_err_convert(type, code, &err);
351         if (sk->sk_state == TCP_NEW_SYN_RECV)
352                 return tcp_req_err(sk, seq, fatal);
353
354         bh_lock_sock(sk);
355         if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
356                 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
357
358         if (sk->sk_state == TCP_CLOSE)
359                 goto out;
360
361         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
362                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
363                 goto out;
364         }
365
366         tp = tcp_sk(sk);
367         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
368         fastopen = tp->fastopen_rsk;
369         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
370         if (sk->sk_state != TCP_LISTEN &&
371             !between(seq, snd_una, tp->snd_nxt)) {
372                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
373                 goto out;
374         }
375
376         np = inet6_sk(sk);
377
378         if (type == NDISC_REDIRECT) {
379                 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
380
381                 if (dst)
382                         dst->ops->redirect(dst, sk, skb);
383                 goto out;
384         }
385
386         if (type == ICMPV6_PKT_TOOBIG) {
387                 /* We are not interested in TCP_LISTEN and open_requests
388                  * (SYN-ACKs send out by Linux are always <576bytes so
389                  * they should go through unfragmented).
390                  */
391                 if (sk->sk_state == TCP_LISTEN)
392                         goto out;
393
394                 if (!ip6_sk_accept_pmtu(sk))
395                         goto out;
396
397                 tp->mtu_info = ntohl(info);
398                 if (!sock_owned_by_user(sk))
399                         tcp_v6_mtu_reduced(sk);
400                 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
401                                            &tp->tsq_flags))
402                         sock_hold(sk);
403                 goto out;
404         }
405
406
407         /* Might be for an request_sock */
408         switch (sk->sk_state) {
409         case TCP_SYN_SENT:
410         case TCP_SYN_RECV:
411                 /* Only in fast or simultaneous open. If a fast open socket is
412                  * is already accepted it is treated as a connected one below.
413                  */
414                 if (fastopen && !fastopen->sk)
415                         break;
416
417                 if (!sock_owned_by_user(sk)) {
418                         sk->sk_err = err;
419                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
420
421                         tcp_done(sk);
422                 } else
423                         sk->sk_err_soft = err;
424                 goto out;
425         }
426
427         if (!sock_owned_by_user(sk) && np->recverr) {
428                 sk->sk_err = err;
429                 sk->sk_error_report(sk);
430         } else
431                 sk->sk_err_soft = err;
432
433 out:
434         bh_unlock_sock(sk);
435         sock_put(sk);
436 }
437
438
439 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
440                               struct flowi *fl,
441                               struct request_sock *req,
442                               struct tcp_fastopen_cookie *foc,
443                               enum tcp_synack_type synack_type)
444 {
445         struct inet_request_sock *ireq = inet_rsk(req);
446         struct ipv6_pinfo *np = inet6_sk(sk);
447         struct ipv6_txoptions *opt;
448         struct flowi6 *fl6 = &fl->u.ip6;
449         struct sk_buff *skb;
450         int err = -ENOMEM;
451
452         /* First, grab a route. */
453         if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
454                                                IPPROTO_TCP)) == NULL)
455                 goto done;
456
457         skb = tcp_make_synack(sk, dst, req, foc, synack_type);
458
459         if (skb) {
460                 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
461                                     &ireq->ir_v6_rmt_addr);
462
463                 fl6->daddr = ireq->ir_v6_rmt_addr;
464                 if (np->repflow && ireq->pktopts)
465                         fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
466
467                 rcu_read_lock();
468                 opt = ireq->ipv6_opt;
469                 if (!opt)
470                         opt = rcu_dereference(np->opt);
471                 err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
472                 rcu_read_unlock();
473                 err = net_xmit_eval(err);
474         }
475
476 done:
477         return err;
478 }
479
480
481 static void tcp_v6_reqsk_destructor(struct request_sock *req)
482 {
483         kfree(inet_rsk(req)->ipv6_opt);
484         kfree_skb(inet_rsk(req)->pktopts);
485 }
486
487 #ifdef CONFIG_TCP_MD5SIG
488 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
489                                                    const struct in6_addr *addr)
490 {
491         return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
492 }
493
494 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
495                                                 const struct sock *addr_sk)
496 {
497         return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
498 }
499
500 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
501                                  int optlen)
502 {
503         struct tcp_md5sig cmd;
504         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
505
506         if (optlen < sizeof(cmd))
507                 return -EINVAL;
508
509         if (copy_from_user(&cmd, optval, sizeof(cmd)))
510                 return -EFAULT;
511
512         if (sin6->sin6_family != AF_INET6)
513                 return -EINVAL;
514
515         if (!cmd.tcpm_keylen) {
516                 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
517                         return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
518                                               AF_INET);
519                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
520                                       AF_INET6);
521         }
522
523         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
524                 return -EINVAL;
525
526         if (ipv6_addr_v4mapped(&sin6->sin6_addr))
527                 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
528                                       AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
529
530         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
531                               AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
532 }
533
534 static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
535                                    const struct in6_addr *daddr,
536                                    const struct in6_addr *saddr,
537                                    const struct tcphdr *th, int nbytes)
538 {
539         struct tcp6_pseudohdr *bp;
540         struct scatterlist sg;
541         struct tcphdr *_th;
542
543         bp = hp->scratch;
544         /* 1. TCP pseudo-header (RFC2460) */
545         bp->saddr = *saddr;
546         bp->daddr = *daddr;
547         bp->protocol = cpu_to_be32(IPPROTO_TCP);
548         bp->len = cpu_to_be32(nbytes);
549
550         _th = (struct tcphdr *)(bp + 1);
551         memcpy(_th, th, sizeof(*th));
552         _th->check = 0;
553
554         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
555         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
556                                 sizeof(*bp) + sizeof(*th));
557         return crypto_ahash_update(hp->md5_req);
558 }
559
560 static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
561                                const struct in6_addr *daddr, struct in6_addr *saddr,
562                                const struct tcphdr *th)
563 {
564         struct tcp_md5sig_pool *hp;
565         struct ahash_request *req;
566
567         hp = tcp_get_md5sig_pool();
568         if (!hp)
569                 goto clear_hash_noput;
570         req = hp->md5_req;
571
572         if (crypto_ahash_init(req))
573                 goto clear_hash;
574         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
575                 goto clear_hash;
576         if (tcp_md5_hash_key(hp, key))
577                 goto clear_hash;
578         ahash_request_set_crypt(req, NULL, md5_hash, 0);
579         if (crypto_ahash_final(req))
580                 goto clear_hash;
581
582         tcp_put_md5sig_pool();
583         return 0;
584
585 clear_hash:
586         tcp_put_md5sig_pool();
587 clear_hash_noput:
588         memset(md5_hash, 0, 16);
589         return 1;
590 }
591
592 static int tcp_v6_md5_hash_skb(char *md5_hash,
593                                const struct tcp_md5sig_key *key,
594                                const struct sock *sk,
595                                const struct sk_buff *skb)
596 {
597         const struct in6_addr *saddr, *daddr;
598         struct tcp_md5sig_pool *hp;
599         struct ahash_request *req;
600         const struct tcphdr *th = tcp_hdr(skb);
601
602         if (sk) { /* valid for establish/request sockets */
603                 saddr = &sk->sk_v6_rcv_saddr;
604                 daddr = &sk->sk_v6_daddr;
605         } else {
606                 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
607                 saddr = &ip6h->saddr;
608                 daddr = &ip6h->daddr;
609         }
610
611         hp = tcp_get_md5sig_pool();
612         if (!hp)
613                 goto clear_hash_noput;
614         req = hp->md5_req;
615
616         if (crypto_ahash_init(req))
617                 goto clear_hash;
618
619         if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
620                 goto clear_hash;
621         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
622                 goto clear_hash;
623         if (tcp_md5_hash_key(hp, key))
624                 goto clear_hash;
625         ahash_request_set_crypt(req, NULL, md5_hash, 0);
626         if (crypto_ahash_final(req))
627                 goto clear_hash;
628
629         tcp_put_md5sig_pool();
630         return 0;
631
632 clear_hash:
633         tcp_put_md5sig_pool();
634 clear_hash_noput:
635         memset(md5_hash, 0, 16);
636         return 1;
637 }
638
639 #endif
640
641 static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
642                                     const struct sk_buff *skb)
643 {
644 #ifdef CONFIG_TCP_MD5SIG
645         const __u8 *hash_location = NULL;
646         struct tcp_md5sig_key *hash_expected;
647         const struct ipv6hdr *ip6h = ipv6_hdr(skb);
648         const struct tcphdr *th = tcp_hdr(skb);
649         int genhash;
650         u8 newhash[16];
651
652         hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
653         hash_location = tcp_parse_md5sig_option(th);
654
655         /* We've parsed the options - do we have a hash? */
656         if (!hash_expected && !hash_location)
657                 return false;
658
659         if (hash_expected && !hash_location) {
660                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
661                 return true;
662         }
663
664         if (!hash_expected && hash_location) {
665                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
666                 return true;
667         }
668
669         /* check the signature */
670         genhash = tcp_v6_md5_hash_skb(newhash,
671                                       hash_expected,
672                                       NULL, skb);
673
674         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
675                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
676                 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
677                                      genhash ? "failed" : "mismatch",
678                                      &ip6h->saddr, ntohs(th->source),
679                                      &ip6h->daddr, ntohs(th->dest));
680                 return true;
681         }
682 #endif
683         return false;
684 }
685
686 static void tcp_v6_init_req(struct request_sock *req,
687                             const struct sock *sk_listener,
688                             struct sk_buff *skb)
689 {
690         struct inet_request_sock *ireq = inet_rsk(req);
691         const struct ipv6_pinfo *np = inet6_sk(sk_listener);
692
693         ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
694         ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
695
696         /* So that link locals have meaning */
697         if (!sk_listener->sk_bound_dev_if &&
698             ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
699                 ireq->ir_iif = tcp_v6_iif(skb);
700
701         if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
702             (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
703              np->rxopt.bits.rxinfo ||
704              np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
705              np->rxopt.bits.rxohlim || np->repflow)) {
706                 atomic_inc(&skb->users);
707                 ireq->pktopts = skb;
708         }
709 }
710
711 static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
712                                           struct flowi *fl,
713                                           const struct request_sock *req,
714                                           bool *strict)
715 {
716         if (strict)
717                 *strict = true;
718         return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
719 }
720
721 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
722         .family         =       AF_INET6,
723         .obj_size       =       sizeof(struct tcp6_request_sock),
724         .rtx_syn_ack    =       tcp_rtx_synack,
725         .send_ack       =       tcp_v6_reqsk_send_ack,
726         .destructor     =       tcp_v6_reqsk_destructor,
727         .send_reset     =       tcp_v6_send_reset,
728         .syn_ack_timeout =      tcp_syn_ack_timeout,
729 };
730
731 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
732         .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
733                                 sizeof(struct ipv6hdr),
734 #ifdef CONFIG_TCP_MD5SIG
735         .req_md5_lookup =       tcp_v6_md5_lookup,
736         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
737 #endif
738         .init_req       =       tcp_v6_init_req,
739 #ifdef CONFIG_SYN_COOKIES
740         .cookie_init_seq =      cookie_v6_init_sequence,
741 #endif
742         .route_req      =       tcp_v6_route_req,
743         .init_seq       =       tcp_v6_init_sequence,
744         .send_synack    =       tcp_v6_send_synack,
745 };
746
747 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
748                                  u32 ack, u32 win, u32 tsval, u32 tsecr,
749                                  int oif, struct tcp_md5sig_key *key, int rst,
750                                  u8 tclass, __be32 label)
751 {
752         const struct tcphdr *th = tcp_hdr(skb);
753         struct tcphdr *t1;
754         struct sk_buff *buff;
755         struct flowi6 fl6;
756         struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
757         struct sock *ctl_sk = net->ipv6.tcp_sk;
758         unsigned int tot_len = sizeof(struct tcphdr);
759         struct dst_entry *dst;
760         __be32 *topt;
761
762         if (tsecr)
763                 tot_len += TCPOLEN_TSTAMP_ALIGNED;
764 #ifdef CONFIG_TCP_MD5SIG
765         if (key)
766                 tot_len += TCPOLEN_MD5SIG_ALIGNED;
767 #endif
768
769         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
770                          GFP_ATOMIC);
771         if (!buff)
772                 return;
773
774         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
775
776         t1 = (struct tcphdr *) skb_push(buff, tot_len);
777         skb_reset_transport_header(buff);
778
779         /* Swap the send and the receive. */
780         memset(t1, 0, sizeof(*t1));
781         t1->dest = th->source;
782         t1->source = th->dest;
783         t1->doff = tot_len / 4;
784         t1->seq = htonl(seq);
785         t1->ack_seq = htonl(ack);
786         t1->ack = !rst || !th->ack;
787         t1->rst = rst;
788         t1->window = htons(win);
789
790         topt = (__be32 *)(t1 + 1);
791
792         if (tsecr) {
793                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
794                                 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
795                 *topt++ = htonl(tsval);
796                 *topt++ = htonl(tsecr);
797         }
798
799 #ifdef CONFIG_TCP_MD5SIG
800         if (key) {
801                 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
802                                 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
803                 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
804                                     &ipv6_hdr(skb)->saddr,
805                                     &ipv6_hdr(skb)->daddr, t1);
806         }
807 #endif
808
809         memset(&fl6, 0, sizeof(fl6));
810         fl6.daddr = ipv6_hdr(skb)->saddr;
811         fl6.saddr = ipv6_hdr(skb)->daddr;
812         fl6.flowlabel = label;
813
814         buff->ip_summed = CHECKSUM_PARTIAL;
815         buff->csum = 0;
816
817         __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
818
819         fl6.flowi6_proto = IPPROTO_TCP;
820         if (rt6_need_strict(&fl6.daddr) && !oif)
821                 fl6.flowi6_oif = tcp_v6_iif(skb);
822         else {
823                 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
824                         oif = skb->skb_iif;
825
826                 fl6.flowi6_oif = oif;
827         }
828
829         fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
830         fl6.fl6_dport = t1->dest;
831         fl6.fl6_sport = t1->source;
832         fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
833         security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
834
835         /* Pass a socket to ip6_dst_lookup either it is for RST
836          * Underlying function will use this to retrieve the network
837          * namespace
838          */
839         dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
840         if (!IS_ERR(dst)) {
841                 skb_dst_set(buff, dst);
842                 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
843                 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
844                 if (rst)
845                         TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
846                 return;
847         }
848
849         kfree_skb(buff);
850 }
851
852 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
853 {
854         const struct tcphdr *th = tcp_hdr(skb);
855         u32 seq = 0, ack_seq = 0;
856         struct tcp_md5sig_key *key = NULL;
857 #ifdef CONFIG_TCP_MD5SIG
858         const __u8 *hash_location = NULL;
859         struct ipv6hdr *ipv6h = ipv6_hdr(skb);
860         unsigned char newhash[16];
861         int genhash;
862         struct sock *sk1 = NULL;
863 #endif
864         int oif;
865
866         if (th->rst)
867                 return;
868
869         /* If sk not NULL, it means we did a successful lookup and incoming
870          * route had to be correct. prequeue might have dropped our dst.
871          */
872         if (!sk && !ipv6_unicast_destination(skb))
873                 return;
874
875 #ifdef CONFIG_TCP_MD5SIG
876         rcu_read_lock();
877         hash_location = tcp_parse_md5sig_option(th);
878         if (sk && sk_fullsock(sk)) {
879                 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
880         } else if (hash_location) {
881                 /*
882                  * active side is lost. Try to find listening socket through
883                  * source port, and then find md5 key through listening socket.
884                  * we are not loose security here:
885                  * Incoming packet is checked with md5 hash with finding key,
886                  * no RST generated if md5 hash doesn't match.
887                  */
888                 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
889                                            &tcp_hashinfo, NULL, 0,
890                                            &ipv6h->saddr,
891                                            th->source, &ipv6h->daddr,
892                                            ntohs(th->source), tcp_v6_iif(skb));
893                 if (!sk1)
894                         goto out;
895
896                 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
897                 if (!key)
898                         goto out;
899
900                 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
901                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
902                         goto out;
903         }
904 #endif
905
906         if (th->ack)
907                 seq = ntohl(th->ack_seq);
908         else
909                 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
910                           (th->doff << 2);
911
912         oif = sk ? sk->sk_bound_dev_if : 0;
913         tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
914
915 #ifdef CONFIG_TCP_MD5SIG
916 out:
917         rcu_read_unlock();
918 #endif
919 }
920
921 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
922                             u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
923                             struct tcp_md5sig_key *key, u8 tclass,
924                             __be32 label)
925 {
926         tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
927                              tclass, label);
928 }
929
930 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
931 {
932         struct inet_timewait_sock *tw = inet_twsk(sk);
933         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
934
935         tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
936                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
937                         tcp_time_stamp + tcptw->tw_ts_offset,
938                         tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
939                         tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
940
941         inet_twsk_put(tw);
942 }
943
944 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
945                                   struct request_sock *req)
946 {
947         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
948          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
949          */
950         /* RFC 7323 2.3
951          * The window field (SEG.WND) of every outgoing segment, with the
952          * exception of <SYN> segments, MUST be right-shifted by
953          * Rcv.Wind.Shift bits:
954          */
955         tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
956                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
957                         tcp_rsk(req)->rcv_nxt,
958                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
959                         tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
960                         tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
961                         0, 0);
962 }
963
964
965 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
966 {
967 #ifdef CONFIG_SYN_COOKIES
968         const struct tcphdr *th = tcp_hdr(skb);
969
970         if (!th->syn)
971                 sk = cookie_v6_check(sk, skb);
972 #endif
973         return sk;
974 }
975
976 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
977 {
978         if (skb->protocol == htons(ETH_P_IP))
979                 return tcp_v4_conn_request(sk, skb);
980
981         if (!ipv6_unicast_destination(skb))
982                 goto drop;
983
984         return tcp_conn_request(&tcp6_request_sock_ops,
985                                 &tcp_request_sock_ipv6_ops, sk, skb);
986
987 drop:
988         tcp_listendrop(sk);
989         return 0; /* don't send reset */
990 }
991
992 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
993                                          struct request_sock *req,
994                                          struct dst_entry *dst,
995                                          struct request_sock *req_unhash,
996                                          bool *own_req)
997 {
998         struct inet_request_sock *ireq;
999         struct ipv6_pinfo *newnp;
1000         const struct ipv6_pinfo *np = inet6_sk(sk);
1001         struct ipv6_txoptions *opt;
1002         struct tcp6_sock *newtcp6sk;
1003         struct inet_sock *newinet;
1004         struct tcp_sock *newtp;
1005         struct sock *newsk;
1006 #ifdef CONFIG_TCP_MD5SIG
1007         struct tcp_md5sig_key *key;
1008 #endif
1009         struct flowi6 fl6;
1010
1011         if (skb->protocol == htons(ETH_P_IP)) {
1012                 /*
1013                  *      v6 mapped
1014                  */
1015
1016                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1017                                              req_unhash, own_req);
1018
1019                 if (!newsk)
1020                         return NULL;
1021
1022                 newtcp6sk = (struct tcp6_sock *)newsk;
1023                 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1024
1025                 newinet = inet_sk(newsk);
1026                 newnp = inet6_sk(newsk);
1027                 newtp = tcp_sk(newsk);
1028
1029                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1030
1031                 newnp->saddr = newsk->sk_v6_rcv_saddr;
1032
1033                 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1034                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1035 #ifdef CONFIG_TCP_MD5SIG
1036                 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1037 #endif
1038
1039                 newnp->ipv6_ac_list = NULL;
1040                 newnp->ipv6_fl_list = NULL;
1041                 newnp->pktoptions  = NULL;
1042                 newnp->opt         = NULL;
1043                 newnp->mcast_oif   = tcp_v6_iif(skb);
1044                 newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1045                 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1046                 if (np->repflow)
1047                         newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1048
1049                 /*
1050                  * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1051                  * here, tcp_create_openreq_child now does this for us, see the comment in
1052                  * that function for the gory details. -acme
1053                  */
1054
1055                 /* It is tricky place. Until this moment IPv4 tcp
1056                    worked with IPv6 icsk.icsk_af_ops.
1057                    Sync it now.
1058                  */
1059                 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1060
1061                 return newsk;
1062         }
1063
1064         ireq = inet_rsk(req);
1065
1066         if (sk_acceptq_is_full(sk))
1067                 goto out_overflow;
1068
1069         if (!dst) {
1070                 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
1071                 if (!dst)
1072                         goto out;
1073         }
1074
1075         newsk = tcp_create_openreq_child(sk, req, skb);
1076         if (!newsk)
1077                 goto out_nonewsk;
1078
1079         /*
1080          * No need to charge this sock to the relevant IPv6 refcnt debug socks
1081          * count here, tcp_create_openreq_child now does this for us, see the
1082          * comment in that function for the gory details. -acme
1083          */
1084
1085         newsk->sk_gso_type = SKB_GSO_TCPV6;
1086         ip6_dst_store(newsk, dst, NULL, NULL);
1087         inet6_sk_rx_dst_set(newsk, skb);
1088
1089         newtcp6sk = (struct tcp6_sock *)newsk;
1090         inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1091
1092         newtp = tcp_sk(newsk);
1093         newinet = inet_sk(newsk);
1094         newnp = inet6_sk(newsk);
1095
1096         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1097
1098         newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1099         newnp->saddr = ireq->ir_v6_loc_addr;
1100         newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1101         newsk->sk_bound_dev_if = ireq->ir_iif;
1102
1103         /* Now IPv6 options...
1104
1105            First: no IPv4 options.
1106          */
1107         newinet->inet_opt = NULL;
1108         newnp->ipv6_ac_list = NULL;
1109         newnp->ipv6_fl_list = NULL;
1110
1111         /* Clone RX bits */
1112         newnp->rxopt.all = np->rxopt.all;
1113
1114         newnp->pktoptions = NULL;
1115         newnp->opt        = NULL;
1116         newnp->mcast_oif  = tcp_v6_iif(skb);
1117         newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1118         newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1119         if (np->repflow)
1120                 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
1121
1122         /* Clone native IPv6 options from listening socket (if any)
1123
1124            Yes, keeping reference count would be much more clever,
1125            but we make one more one thing there: reattach optmem
1126            to newsk.
1127          */
1128         opt = ireq->ipv6_opt;
1129         if (!opt)
1130                 opt = rcu_dereference(np->opt);
1131         if (opt) {
1132                 opt = ipv6_dup_options(newsk, opt);
1133                 RCU_INIT_POINTER(newnp->opt, opt);
1134         }
1135         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1136         if (opt)
1137                 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1138                                                     opt->opt_flen;
1139
1140         tcp_ca_openreq_child(newsk, dst);
1141
1142         tcp_sync_mss(newsk, dst_mtu(dst));
1143         newtp->advmss = dst_metric_advmss(dst);
1144         if (tcp_sk(sk)->rx_opt.user_mss &&
1145             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1146                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1147
1148         tcp_initialize_rcv_mss(newsk);
1149
1150         newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1151         newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1152
1153 #ifdef CONFIG_TCP_MD5SIG
1154         /* Copy over the MD5 key from the original socket */
1155         key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1156         if (key) {
1157                 /* We're using one, so create a matching key
1158                  * on the newsk structure. If we fail to get
1159                  * memory, then we end up not copying the key
1160                  * across. Shucks.
1161                  */
1162                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1163                                AF_INET6, key->key, key->keylen,
1164                                sk_gfp_mask(sk, GFP_ATOMIC));
1165         }
1166 #endif
1167
1168         if (__inet_inherit_port(sk, newsk) < 0) {
1169                 inet_csk_prepare_forced_close(newsk);
1170                 tcp_done(newsk);
1171                 goto out;
1172         }
1173         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1174         if (*own_req) {
1175                 tcp_move_syn(newtp, req);
1176
1177                 /* Clone pktoptions received with SYN, if we own the req */
1178                 if (ireq->pktopts) {
1179                         newnp->pktoptions = skb_clone(ireq->pktopts,
1180                                                       sk_gfp_mask(sk, GFP_ATOMIC));
1181                         consume_skb(ireq->pktopts);
1182                         ireq->pktopts = NULL;
1183                         if (newnp->pktoptions)
1184                                 skb_set_owner_r(newnp->pktoptions, newsk);
1185                 }
1186         }
1187
1188         return newsk;
1189
1190 out_overflow:
1191         __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1192 out_nonewsk:
1193         dst_release(dst);
1194 out:
1195         tcp_listendrop(sk);
1196         return NULL;
1197 }
1198
1199 static void tcp_v6_restore_cb(struct sk_buff *skb)
1200 {
1201         /* We need to move header back to the beginning if xfrm6_policy_check()
1202          * and tcp_v6_fill_cb() are going to be called again.
1203          * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1204          */
1205         memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1206                 sizeof(struct inet6_skb_parm));
1207 }
1208
1209 /* The socket must have it's spinlock held when we get
1210  * here, unless it is a TCP_LISTEN socket.
1211  *
1212  * We have a potential double-lock case here, so even when
1213  * doing backlog processing we use the BH locking scheme.
1214  * This is because we cannot sleep with the original spinlock
1215  * held.
1216  */
1217 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1218 {
1219         struct ipv6_pinfo *np = inet6_sk(sk);
1220         struct tcp_sock *tp;
1221         struct sk_buff *opt_skb = NULL;
1222
1223         /* Imagine: socket is IPv6. IPv4 packet arrives,
1224            goes to IPv4 receive handler and backlogged.
1225            From backlog it always goes here. Kerboom...
1226            Fortunately, tcp_rcv_established and rcv_established
1227            handle them correctly, but it is not case with
1228            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1229          */
1230
1231         if (skb->protocol == htons(ETH_P_IP))
1232                 return tcp_v4_do_rcv(sk, skb);
1233
1234         if (tcp_filter(sk, skb))
1235                 goto discard;
1236
1237         /*
1238          *      socket locking is here for SMP purposes as backlog rcv
1239          *      is currently called with bh processing disabled.
1240          */
1241
1242         /* Do Stevens' IPV6_PKTOPTIONS.
1243
1244            Yes, guys, it is the only place in our code, where we
1245            may make it not affecting IPv4.
1246            The rest of code is protocol independent,
1247            and I do not like idea to uglify IPv4.
1248
1249            Actually, all the idea behind IPV6_PKTOPTIONS
1250            looks not very well thought. For now we latch
1251            options, received in the last packet, enqueued
1252            by tcp. Feel free to propose better solution.
1253                                                --ANK (980728)
1254          */
1255         if (np->rxopt.all)
1256                 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
1257
1258         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1259                 struct dst_entry *dst = sk->sk_rx_dst;
1260
1261                 sock_rps_save_rxhash(sk, skb);
1262                 sk_mark_napi_id(sk, skb);
1263                 if (dst) {
1264                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1265                             dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1266                                 dst_release(dst);
1267                                 sk->sk_rx_dst = NULL;
1268                         }
1269                 }
1270
1271                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1272                 if (opt_skb)
1273                         goto ipv6_pktoptions;
1274                 return 0;
1275         }
1276
1277         if (tcp_checksum_complete(skb))
1278                 goto csum_err;
1279
1280         if (sk->sk_state == TCP_LISTEN) {
1281                 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1282
1283                 if (!nsk)
1284                         goto discard;
1285
1286                 if (nsk != sk) {
1287                         sock_rps_save_rxhash(nsk, skb);
1288                         sk_mark_napi_id(nsk, skb);
1289                         if (tcp_child_process(sk, nsk, skb))
1290                                 goto reset;
1291                         if (opt_skb)
1292                                 __kfree_skb(opt_skb);
1293                         return 0;
1294                 }
1295         } else
1296                 sock_rps_save_rxhash(sk, skb);
1297
1298         if (tcp_rcv_state_process(sk, skb))
1299                 goto reset;
1300         if (opt_skb)
1301                 goto ipv6_pktoptions;
1302         return 0;
1303
1304 reset:
1305         tcp_v6_send_reset(sk, skb);
1306 discard:
1307         if (opt_skb)
1308                 __kfree_skb(opt_skb);
1309         kfree_skb(skb);
1310         return 0;
1311 csum_err:
1312         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1313         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1314         goto discard;
1315
1316
1317 ipv6_pktoptions:
1318         /* Do you ask, what is it?
1319
1320            1. skb was enqueued by tcp.
1321            2. skb is added to tail of read queue, rather than out of order.
1322            3. socket is not in passive state.
1323            4. Finally, it really contains options, which user wants to receive.
1324          */
1325         tp = tcp_sk(sk);
1326         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1327             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1328                 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1329                         np->mcast_oif = tcp_v6_iif(opt_skb);
1330                 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1331                         np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1332                 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1333                         np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1334                 if (np->repflow)
1335                         np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
1336                 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
1337                         skb_set_owner_r(opt_skb, sk);
1338                         tcp_v6_restore_cb(opt_skb);
1339                         opt_skb = xchg(&np->pktoptions, opt_skb);
1340                 } else {
1341                         __kfree_skb(opt_skb);
1342                         opt_skb = xchg(&np->pktoptions, NULL);
1343                 }
1344         }
1345
1346         kfree_skb(opt_skb);
1347         return 0;
1348 }
1349
1350 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1351                            const struct tcphdr *th)
1352 {
1353         /* This is tricky: we move IP6CB at its correct location into
1354          * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1355          * _decode_session6() uses IP6CB().
1356          * barrier() makes sure compiler won't play aliasing games.
1357          */
1358         memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1359                 sizeof(struct inet6_skb_parm));
1360         barrier();
1361
1362         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1363         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1364                                     skb->len - th->doff*4);
1365         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1366         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1367         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1368         TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1369         TCP_SKB_CB(skb)->sacked = 0;
1370 }
1371
1372 static int tcp_v6_rcv(struct sk_buff *skb)
1373 {
1374         const struct tcphdr *th;
1375         const struct ipv6hdr *hdr;
1376         bool refcounted;
1377         struct sock *sk;
1378         int ret;
1379         struct net *net = dev_net(skb->dev);
1380
1381         if (skb->pkt_type != PACKET_HOST)
1382                 goto discard_it;
1383
1384         /*
1385          *      Count it even if it's bad.
1386          */
1387         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1388
1389         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1390                 goto discard_it;
1391
1392         th = (const struct tcphdr *)skb->data;
1393
1394         if (unlikely(th->doff < sizeof(struct tcphdr)/4))
1395                 goto bad_packet;
1396         if (!pskb_may_pull(skb, th->doff*4))
1397                 goto discard_it;
1398
1399         if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1400                 goto csum_error;
1401
1402         th = (const struct tcphdr *)skb->data;
1403         hdr = ipv6_hdr(skb);
1404
1405 lookup:
1406         sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
1407                                 th->source, th->dest, inet6_iif(skb),
1408                                 &refcounted);
1409         if (!sk)
1410                 goto no_tcp_socket;
1411
1412 process:
1413         if (sk->sk_state == TCP_TIME_WAIT)
1414                 goto do_time_wait;
1415
1416         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1417                 struct request_sock *req = inet_reqsk(sk);
1418                 struct sock *nsk;
1419
1420                 sk = req->rsk_listener;
1421                 tcp_v6_fill_cb(skb, hdr, th);
1422                 if (tcp_v6_inbound_md5_hash(sk, skb)) {
1423                         sk_drops_add(sk, skb);
1424                         reqsk_put(req);
1425                         goto discard_it;
1426                 }
1427                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1428                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1429                         goto lookup;
1430                 }
1431                 sock_hold(sk);
1432                 refcounted = true;
1433                 nsk = tcp_check_req(sk, skb, req, false);
1434                 if (!nsk) {
1435                         reqsk_put(req);
1436                         goto discard_and_relse;
1437                 }
1438                 if (nsk == sk) {
1439                         reqsk_put(req);
1440                         tcp_v6_restore_cb(skb);
1441                 } else if (tcp_child_process(sk, nsk, skb)) {
1442                         tcp_v6_send_reset(nsk, skb);
1443                         goto discard_and_relse;
1444                 } else {
1445                         sock_put(sk);
1446                         return 0;
1447                 }
1448         }
1449         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1450                 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1451                 goto discard_and_relse;
1452         }
1453
1454         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1455                 goto discard_and_relse;
1456
1457         tcp_v6_fill_cb(skb, hdr, th);
1458
1459         if (tcp_v6_inbound_md5_hash(sk, skb))
1460                 goto discard_and_relse;
1461
1462         if (tcp_filter(sk, skb))
1463                 goto discard_and_relse;
1464         th = (const struct tcphdr *)skb->data;
1465         hdr = ipv6_hdr(skb);
1466
1467         skb->dev = NULL;
1468
1469         if (sk->sk_state == TCP_LISTEN) {
1470                 ret = tcp_v6_do_rcv(sk, skb);
1471                 goto put_and_return;
1472         }
1473
1474         sk_incoming_cpu_update(sk);
1475
1476         bh_lock_sock_nested(sk);
1477         tcp_segs_in(tcp_sk(sk), skb);
1478         ret = 0;
1479         if (!sock_owned_by_user(sk)) {
1480                 if (!tcp_prequeue(sk, skb))
1481                         ret = tcp_v6_do_rcv(sk, skb);
1482         } else if (tcp_add_backlog(sk, skb)) {
1483                 goto discard_and_relse;
1484         }
1485         bh_unlock_sock(sk);
1486
1487 put_and_return:
1488         if (refcounted)
1489                 sock_put(sk);
1490         return ret ? -1 : 0;
1491
1492 no_tcp_socket:
1493         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1494                 goto discard_it;
1495
1496         tcp_v6_fill_cb(skb, hdr, th);
1497
1498         if (tcp_checksum_complete(skb)) {
1499 csum_error:
1500                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1501 bad_packet:
1502                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1503         } else {
1504                 tcp_v6_send_reset(NULL, skb);
1505         }
1506
1507 discard_it:
1508         kfree_skb(skb);
1509         return 0;
1510
1511 discard_and_relse:
1512         sk_drops_add(sk, skb);
1513         if (refcounted)
1514                 sock_put(sk);
1515         goto discard_it;
1516
1517 do_time_wait:
1518         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1519                 inet_twsk_put(inet_twsk(sk));
1520                 goto discard_it;
1521         }
1522
1523         tcp_v6_fill_cb(skb, hdr, th);
1524
1525         if (tcp_checksum_complete(skb)) {
1526                 inet_twsk_put(inet_twsk(sk));
1527                 goto csum_error;
1528         }
1529
1530         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1531         case TCP_TW_SYN:
1532         {
1533                 struct sock *sk2;
1534
1535                 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1536                                             skb, __tcp_hdrlen(th),
1537                                             &ipv6_hdr(skb)->saddr, th->source,
1538                                             &ipv6_hdr(skb)->daddr,
1539                                             ntohs(th->dest), tcp_v6_iif(skb));
1540                 if (sk2) {
1541                         struct inet_timewait_sock *tw = inet_twsk(sk);
1542                         inet_twsk_deschedule_put(tw);
1543                         sk = sk2;
1544                         tcp_v6_restore_cb(skb);
1545                         refcounted = false;
1546                         goto process;
1547                 }
1548                 /* Fall through to ACK */
1549         }
1550         case TCP_TW_ACK:
1551                 tcp_v6_timewait_ack(sk, skb);
1552                 break;
1553         case TCP_TW_RST:
1554                 tcp_v6_restore_cb(skb);
1555                 tcp_v6_send_reset(sk, skb);
1556                 inet_twsk_deschedule_put(inet_twsk(sk));
1557                 goto discard_it;
1558         case TCP_TW_SUCCESS:
1559                 ;
1560         }
1561         goto discard_it;
1562 }
1563
1564 static void tcp_v6_early_demux(struct sk_buff *skb)
1565 {
1566         const struct ipv6hdr *hdr;
1567         const struct tcphdr *th;
1568         struct sock *sk;
1569
1570         if (skb->pkt_type != PACKET_HOST)
1571                 return;
1572
1573         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1574                 return;
1575
1576         hdr = ipv6_hdr(skb);
1577         th = tcp_hdr(skb);
1578
1579         if (th->doff < sizeof(struct tcphdr) / 4)
1580                 return;
1581
1582         /* Note : We use inet6_iif() here, not tcp_v6_iif() */
1583         sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1584                                         &hdr->saddr, th->source,
1585                                         &hdr->daddr, ntohs(th->dest),
1586                                         inet6_iif(skb));
1587         if (sk) {
1588                 skb->sk = sk;
1589                 skb->destructor = sock_edemux;
1590                 if (sk_fullsock(sk)) {
1591                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1592
1593                         if (dst)
1594                                 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1595                         if (dst &&
1596                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1597                                 skb_dst_set_noref(skb, dst);
1598                 }
1599         }
1600 }
1601
1602 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1603         .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1604         .twsk_unique    = tcp_twsk_unique,
1605         .twsk_destructor = tcp_twsk_destructor,
1606 };
1607
1608 static const struct inet_connection_sock_af_ops ipv6_specific = {
1609         .queue_xmit        = inet6_csk_xmit,
1610         .send_check        = tcp_v6_send_check,
1611         .rebuild_header    = inet6_sk_rebuild_header,
1612         .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1613         .conn_request      = tcp_v6_conn_request,
1614         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1615         .net_header_len    = sizeof(struct ipv6hdr),
1616         .net_frag_header_len = sizeof(struct frag_hdr),
1617         .setsockopt        = ipv6_setsockopt,
1618         .getsockopt        = ipv6_getsockopt,
1619         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1620         .sockaddr_len      = sizeof(struct sockaddr_in6),
1621         .bind_conflict     = inet6_csk_bind_conflict,
1622 #ifdef CONFIG_COMPAT
1623         .compat_setsockopt = compat_ipv6_setsockopt,
1624         .compat_getsockopt = compat_ipv6_getsockopt,
1625 #endif
1626         .mtu_reduced       = tcp_v6_mtu_reduced,
1627 };
1628
1629 #ifdef CONFIG_TCP_MD5SIG
1630 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1631         .md5_lookup     =       tcp_v6_md5_lookup,
1632         .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1633         .md5_parse      =       tcp_v6_parse_md5_keys,
1634 };
1635 #endif
1636
1637 /*
1638  *      TCP over IPv4 via INET6 API
1639  */
1640 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1641         .queue_xmit        = ip_queue_xmit,
1642         .send_check        = tcp_v4_send_check,
1643         .rebuild_header    = inet_sk_rebuild_header,
1644         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1645         .conn_request      = tcp_v6_conn_request,
1646         .syn_recv_sock     = tcp_v6_syn_recv_sock,
1647         .net_header_len    = sizeof(struct iphdr),
1648         .setsockopt        = ipv6_setsockopt,
1649         .getsockopt        = ipv6_getsockopt,
1650         .addr2sockaddr     = inet6_csk_addr2sockaddr,
1651         .sockaddr_len      = sizeof(struct sockaddr_in6),
1652         .bind_conflict     = inet6_csk_bind_conflict,
1653 #ifdef CONFIG_COMPAT
1654         .compat_setsockopt = compat_ipv6_setsockopt,
1655         .compat_getsockopt = compat_ipv6_getsockopt,
1656 #endif
1657         .mtu_reduced       = tcp_v4_mtu_reduced,
1658 };
1659
1660 #ifdef CONFIG_TCP_MD5SIG
1661 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1662         .md5_lookup     =       tcp_v4_md5_lookup,
1663         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1664         .md5_parse      =       tcp_v6_parse_md5_keys,
1665 };
1666 #endif
1667
1668 /* NOTE: A lot of things set to zero explicitly by call to
1669  *       sk_alloc() so need not be done here.
1670  */
1671 static int tcp_v6_init_sock(struct sock *sk)
1672 {
1673         struct inet_connection_sock *icsk = inet_csk(sk);
1674
1675         tcp_init_sock(sk);
1676
1677         icsk->icsk_af_ops = &ipv6_specific;
1678
1679 #ifdef CONFIG_TCP_MD5SIG
1680         tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1681 #endif
1682
1683         return 0;
1684 }
1685
1686 static void tcp_v6_destroy_sock(struct sock *sk)
1687 {
1688         tcp_v4_destroy_sock(sk);
1689         inet6_destroy_sock(sk);
1690 }
1691
1692 #ifdef CONFIG_PROC_FS
1693 /* Proc filesystem TCPv6 sock list dumping. */
1694 static void get_openreq6(struct seq_file *seq,
1695                          const struct request_sock *req, int i)
1696 {
1697         long ttd = req->rsk_timer.expires - jiffies;
1698         const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1699         const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1700
1701         if (ttd < 0)
1702                 ttd = 0;
1703
1704         seq_printf(seq,
1705                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1706                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1707                    i,
1708                    src->s6_addr32[0], src->s6_addr32[1],
1709                    src->s6_addr32[2], src->s6_addr32[3],
1710                    inet_rsk(req)->ir_num,
1711                    dest->s6_addr32[0], dest->s6_addr32[1],
1712                    dest->s6_addr32[2], dest->s6_addr32[3],
1713                    ntohs(inet_rsk(req)->ir_rmt_port),
1714                    TCP_SYN_RECV,
1715                    0, 0, /* could print option size, but that is af dependent. */
1716                    1,   /* timers active (only the expire timer) */
1717                    jiffies_to_clock_t(ttd),
1718                    req->num_timeout,
1719                    from_kuid_munged(seq_user_ns(seq),
1720                                     sock_i_uid(req->rsk_listener)),
1721                    0,  /* non standard timer */
1722                    0, /* open_requests have no inode */
1723                    0, req);
1724 }
1725
1726 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1727 {
1728         const struct in6_addr *dest, *src;
1729         __u16 destp, srcp;
1730         int timer_active;
1731         unsigned long timer_expires;
1732         const struct inet_sock *inet = inet_sk(sp);
1733         const struct tcp_sock *tp = tcp_sk(sp);
1734         const struct inet_connection_sock *icsk = inet_csk(sp);
1735         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
1736         int rx_queue;
1737         int state;
1738
1739         dest  = &sp->sk_v6_daddr;
1740         src   = &sp->sk_v6_rcv_saddr;
1741         destp = ntohs(inet->inet_dport);
1742         srcp  = ntohs(inet->inet_sport);
1743
1744         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
1745             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
1746             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
1747                 timer_active    = 1;
1748                 timer_expires   = icsk->icsk_timeout;
1749         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1750                 timer_active    = 4;
1751                 timer_expires   = icsk->icsk_timeout;
1752         } else if (timer_pending(&sp->sk_timer)) {
1753                 timer_active    = 2;
1754                 timer_expires   = sp->sk_timer.expires;
1755         } else {
1756                 timer_active    = 0;
1757                 timer_expires = jiffies;
1758         }
1759
1760         state = sk_state_load(sp);
1761         if (state == TCP_LISTEN)
1762                 rx_queue = sp->sk_ack_backlog;
1763         else
1764                 /* Because we don't lock the socket,
1765                  * we might find a transient negative value.
1766                  */
1767                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1768
1769         seq_printf(seq,
1770                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1771                    "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1772                    i,
1773                    src->s6_addr32[0], src->s6_addr32[1],
1774                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1775                    dest->s6_addr32[0], dest->s6_addr32[1],
1776                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1777                    state,
1778                    tp->write_seq - tp->snd_una,
1779                    rx_queue,
1780                    timer_active,
1781                    jiffies_delta_to_clock_t(timer_expires - jiffies),
1782                    icsk->icsk_retransmits,
1783                    from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1784                    icsk->icsk_probes_out,
1785                    sock_i_ino(sp),
1786                    atomic_read(&sp->sk_refcnt), sp,
1787                    jiffies_to_clock_t(icsk->icsk_rto),
1788                    jiffies_to_clock_t(icsk->icsk_ack.ato),
1789                    (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1790                    tp->snd_cwnd,
1791                    state == TCP_LISTEN ?
1792                         fastopenq->max_qlen :
1793                         (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1794                    );
1795 }
1796
1797 static void get_timewait6_sock(struct seq_file *seq,
1798                                struct inet_timewait_sock *tw, int i)
1799 {
1800         long delta = tw->tw_timer.expires - jiffies;
1801         const struct in6_addr *dest, *src;
1802         __u16 destp, srcp;
1803
1804         dest = &tw->tw_v6_daddr;
1805         src  = &tw->tw_v6_rcv_saddr;
1806         destp = ntohs(tw->tw_dport);
1807         srcp  = ntohs(tw->tw_sport);
1808
1809         seq_printf(seq,
1810                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1811                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1812                    i,
1813                    src->s6_addr32[0], src->s6_addr32[1],
1814                    src->s6_addr32[2], src->s6_addr32[3], srcp,
1815                    dest->s6_addr32[0], dest->s6_addr32[1],
1816                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
1817                    tw->tw_substate, 0, 0,
1818                    3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1819                    atomic_read(&tw->tw_refcnt), tw);
1820 }
1821
1822 static int tcp6_seq_show(struct seq_file *seq, void *v)
1823 {
1824         struct tcp_iter_state *st;
1825         struct sock *sk = v;
1826
1827         if (v == SEQ_START_TOKEN) {
1828                 seq_puts(seq,
1829                          "  sl  "
1830                          "local_address                         "
1831                          "remote_address                        "
1832                          "st tx_queue rx_queue tr tm->when retrnsmt"
1833                          "   uid  timeout inode\n");
1834                 goto out;
1835         }
1836         st = seq->private;
1837
1838         if (sk->sk_state == TCP_TIME_WAIT)
1839                 get_timewait6_sock(seq, v, st->num);
1840         else if (sk->sk_state == TCP_NEW_SYN_RECV)
1841                 get_openreq6(seq, v, st->num);
1842         else
1843                 get_tcp6_sock(seq, v, st->num);
1844 out:
1845         return 0;
1846 }
1847
1848 static const struct file_operations tcp6_afinfo_seq_fops = {
1849         .owner   = THIS_MODULE,
1850         .open    = tcp_seq_open,
1851         .read    = seq_read,
1852         .llseek  = seq_lseek,
1853         .release = seq_release_net
1854 };
1855
1856 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1857         .name           = "tcp6",
1858         .family         = AF_INET6,
1859         .seq_fops       = &tcp6_afinfo_seq_fops,
1860         .seq_ops        = {
1861                 .show           = tcp6_seq_show,
1862         },
1863 };
1864
1865 int __net_init tcp6_proc_init(struct net *net)
1866 {
1867         return tcp_proc_register(net, &tcp6_seq_afinfo);
1868 }
1869
1870 void tcp6_proc_exit(struct net *net)
1871 {
1872         tcp_proc_unregister(net, &tcp6_seq_afinfo);
1873 }
1874 #endif
1875
1876 struct proto tcpv6_prot = {
1877         .name                   = "TCPv6",
1878         .owner                  = THIS_MODULE,
1879         .close                  = tcp_close,
1880         .connect                = tcp_v6_connect,
1881         .disconnect             = tcp_disconnect,
1882         .accept                 = inet_csk_accept,
1883         .ioctl                  = tcp_ioctl,
1884         .init                   = tcp_v6_init_sock,
1885         .destroy                = tcp_v6_destroy_sock,
1886         .shutdown               = tcp_shutdown,
1887         .setsockopt             = tcp_setsockopt,
1888         .getsockopt             = tcp_getsockopt,
1889         .recvmsg                = tcp_recvmsg,
1890         .sendmsg                = tcp_sendmsg,
1891         .sendpage               = tcp_sendpage,
1892         .backlog_rcv            = tcp_v6_do_rcv,
1893         .release_cb             = tcp_release_cb,
1894         .hash                   = inet6_hash,
1895         .unhash                 = inet_unhash,
1896         .get_port               = inet_csk_get_port,
1897         .enter_memory_pressure  = tcp_enter_memory_pressure,
1898         .stream_memory_free     = tcp_stream_memory_free,
1899         .sockets_allocated      = &tcp_sockets_allocated,
1900         .memory_allocated       = &tcp_memory_allocated,
1901         .memory_pressure        = &tcp_memory_pressure,
1902         .orphan_count           = &tcp_orphan_count,
1903         .sysctl_mem             = sysctl_tcp_mem,
1904         .sysctl_wmem            = sysctl_tcp_wmem,
1905         .sysctl_rmem            = sysctl_tcp_rmem,
1906         .max_header             = MAX_TCP_HEADER,
1907         .obj_size               = sizeof(struct tcp6_sock),
1908         .slab_flags             = SLAB_DESTROY_BY_RCU,
1909         .twsk_prot              = &tcp6_timewait_sock_ops,
1910         .rsk_prot               = &tcp6_request_sock_ops,
1911         .h.hashinfo             = &tcp_hashinfo,
1912         .no_autobind            = true,
1913 #ifdef CONFIG_COMPAT
1914         .compat_setsockopt      = compat_tcp_setsockopt,
1915         .compat_getsockopt      = compat_tcp_getsockopt,
1916 #endif
1917         .diag_destroy           = tcp_abort,
1918 };
1919
1920 static const struct inet6_protocol tcpv6_protocol = {
1921         .early_demux    =       tcp_v6_early_demux,
1922         .handler        =       tcp_v6_rcv,
1923         .err_handler    =       tcp_v6_err,
1924         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1925 };
1926
1927 static struct inet_protosw tcpv6_protosw = {
1928         .type           =       SOCK_STREAM,
1929         .protocol       =       IPPROTO_TCP,
1930         .prot           =       &tcpv6_prot,
1931         .ops            =       &inet6_stream_ops,
1932         .flags          =       INET_PROTOSW_PERMANENT |
1933                                 INET_PROTOSW_ICSK,
1934 };
1935
1936 static int __net_init tcpv6_net_init(struct net *net)
1937 {
1938         return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1939                                     SOCK_RAW, IPPROTO_TCP, net);
1940 }
1941
1942 static void __net_exit tcpv6_net_exit(struct net *net)
1943 {
1944         inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1945 }
1946
1947 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1948 {
1949         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1950 }
1951
1952 static struct pernet_operations tcpv6_net_ops = {
1953         .init       = tcpv6_net_init,
1954         .exit       = tcpv6_net_exit,
1955         .exit_batch = tcpv6_net_exit_batch,
1956 };
1957
1958 int __init tcpv6_init(void)
1959 {
1960         int ret;
1961
1962         ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1963         if (ret)
1964                 goto out;
1965
1966         /* register inet6 protocol */
1967         ret = inet6_register_protosw(&tcpv6_protosw);
1968         if (ret)
1969                 goto out_tcpv6_protocol;
1970
1971         ret = register_pernet_subsys(&tcpv6_net_ops);
1972         if (ret)
1973                 goto out_tcpv6_protosw;
1974 out:
1975         return ret;
1976
1977 out_tcpv6_protosw:
1978         inet6_unregister_protosw(&tcpv6_protosw);
1979 out_tcpv6_protocol:
1980         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1981         goto out;
1982 }
1983
1984 void tcpv6_exit(void)
1985 {
1986         unregister_pernet_subsys(&tcpv6_net_ops);
1987         inet6_unregister_protosw(&tcpv6_protosw);
1988         inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1989 }
This page took 0.147424 seconds and 4 git commands to generate.