]> Git Repo - linux.git/blob - net/ipv4/tcp_ipv4.c
Merge tag 'tty-6.4-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[linux.git] / net / ipv4 / tcp_ipv4.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              Implementation of the Transmission Control Protocol(TCP).
8  *
9  *              IPv4 specific functions
10  *
11  *              code split from:
12  *              linux/ipv4/tcp.c
13  *              linux/ipv4/tcp_input.c
14  *              linux/ipv4/tcp_output.c
15  *
16  *              See tcp.c for author information
17  */
18
19 /*
20  * Changes:
21  *              David S. Miller :       New socket lookup architecture.
22  *                                      This code is dedicated to John Dyson.
23  *              David S. Miller :       Change semantics of established hash,
24  *                                      half is devoted to TIME_WAIT sockets
25  *                                      and the rest go in the other half.
26  *              Andi Kleen :            Add support for syncookies and fixed
27  *                                      some bugs: ip options weren't passed to
28  *                                      the TCP layer, missed a check for an
29  *                                      ACK bit.
30  *              Andi Kleen :            Implemented fast path mtu discovery.
31  *                                      Fixed many serious bugs in the
32  *                                      request_sock handling and moved
33  *                                      most of it into the af independent code.
34  *                                      Added tail drop and some other bugfixes.
35  *                                      Added new listen semantics.
36  *              Mike McLagan    :       Routing by source
37  *      Juan Jose Ciarlante:            ip_dynaddr bits
38  *              Andi Kleen:             various fixes.
39  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
40  *                                      coma.
41  *      Andi Kleen              :       Fix new listen.
42  *      Andi Kleen              :       Fix accept error reporting.
43  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
44  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
45  *                                      a single port at the same time.
46  */
47
48 #define pr_fmt(fmt) "TCP: " fmt
49
50 #include <linux/bottom_half.h>
51 #include <linux/types.h>
52 #include <linux/fcntl.h>
53 #include <linux/module.h>
54 #include <linux/random.h>
55 #include <linux/cache.h>
56 #include <linux/jhash.h>
57 #include <linux/init.h>
58 #include <linux/times.h>
59 #include <linux/slab.h>
60
61 #include <net/net_namespace.h>
62 #include <net/icmp.h>
63 #include <net/inet_hashtables.h>
64 #include <net/tcp.h>
65 #include <net/transp_v6.h>
66 #include <net/ipv6.h>
67 #include <net/inet_common.h>
68 #include <net/timewait_sock.h>
69 #include <net/xfrm.h>
70 #include <net/secure_seq.h>
71 #include <net/busy_poll.h>
72
73 #include <linux/inet.h>
74 #include <linux/ipv6.h>
75 #include <linux/stddef.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/inetdevice.h>
79 #include <linux/btf_ids.h>
80
81 #include <crypto/hash.h>
82 #include <linux/scatterlist.h>
83
84 #include <trace/events/tcp.h>
85
86 #ifdef CONFIG_TCP_MD5SIG
87 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
88                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
89 #endif
90
91 struct inet_hashinfo tcp_hashinfo;
92 EXPORT_SYMBOL(tcp_hashinfo);
93
94 static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk);
95
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
97 {
98         return secure_tcp_seq(ip_hdr(skb)->daddr,
99                               ip_hdr(skb)->saddr,
100                               tcp_hdr(skb)->dest,
101                               tcp_hdr(skb)->source);
102 }
103
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
105 {
106         return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
107 }
108
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
110 {
111         int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse);
112         const struct inet_timewait_sock *tw = inet_twsk(sktw);
113         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114         struct tcp_sock *tp = tcp_sk(sk);
115
116         if (reuse == 2) {
117                 /* Still does not detect *everything* that goes through
118                  * lo, since we require a loopback src or dst address
119                  * or direct binding to 'lo' interface.
120                  */
121                 bool loopback = false;
122                 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
123                         loopback = true;
124 #if IS_ENABLED(CONFIG_IPV6)
125                 if (tw->tw_family == AF_INET6) {
126                         if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
127                             ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
128                             ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
129                             ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
130                                 loopback = true;
131                 } else
132 #endif
133                 {
134                         if (ipv4_is_loopback(tw->tw_daddr) ||
135                             ipv4_is_loopback(tw->tw_rcv_saddr))
136                                 loopback = true;
137                 }
138                 if (!loopback)
139                         reuse = 0;
140         }
141
142         /* With PAWS, it is safe from the viewpoint
143            of data integrity. Even without PAWS it is safe provided sequence
144            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
145
146            Actually, the idea is close to VJ's one, only timestamp cache is
147            held not per host, but per port pair and TW bucket is used as state
148            holder.
149
150            If TW bucket has been already destroyed we fall back to VJ's scheme
151            and use initial timestamp retrieved from peer table.
152          */
153         if (tcptw->tw_ts_recent_stamp &&
154             (!twp || (reuse && time_after32(ktime_get_seconds(),
155                                             tcptw->tw_ts_recent_stamp)))) {
156                 /* In case of repair and re-using TIME-WAIT sockets we still
157                  * want to be sure that it is safe as above but honor the
158                  * sequence numbers and time stamps set as part of the repair
159                  * process.
160                  *
161                  * Without this check re-using a TIME-WAIT socket with TCP
162                  * repair would accumulate a -1 on the repair assigned
163                  * sequence number. The first time it is reused the sequence
164                  * is -1, the second time -2, etc. This fixes that issue
165                  * without appearing to create any others.
166                  */
167                 if (likely(!tp->repair)) {
168                         u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
169
170                         if (!seq)
171                                 seq = 1;
172                         WRITE_ONCE(tp->write_seq, seq);
173                         tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
174                         tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
175                 }
176                 sock_hold(sktw);
177                 return 1;
178         }
179
180         return 0;
181 }
182 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
183
184 static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
185                               int addr_len)
186 {
187         /* This check is replicated from tcp_v4_connect() and intended to
188          * prevent BPF program called below from accessing bytes that are out
189          * of the bound specified by user in addr_len.
190          */
191         if (addr_len < sizeof(struct sockaddr_in))
192                 return -EINVAL;
193
194         sock_owned_by_me(sk);
195
196         return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
197 }
198
199 /* This will initiate an outgoing connection. */
200 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
201 {
202         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
203         struct inet_timewait_death_row *tcp_death_row;
204         struct inet_sock *inet = inet_sk(sk);
205         struct tcp_sock *tp = tcp_sk(sk);
206         struct ip_options_rcu *inet_opt;
207         struct net *net = sock_net(sk);
208         __be16 orig_sport, orig_dport;
209         __be32 daddr, nexthop;
210         struct flowi4 *fl4;
211         struct rtable *rt;
212         int err;
213
214         if (addr_len < sizeof(struct sockaddr_in))
215                 return -EINVAL;
216
217         if (usin->sin_family != AF_INET)
218                 return -EAFNOSUPPORT;
219
220         nexthop = daddr = usin->sin_addr.s_addr;
221         inet_opt = rcu_dereference_protected(inet->inet_opt,
222                                              lockdep_sock_is_held(sk));
223         if (inet_opt && inet_opt->opt.srr) {
224                 if (!daddr)
225                         return -EINVAL;
226                 nexthop = inet_opt->opt.faddr;
227         }
228
229         orig_sport = inet->inet_sport;
230         orig_dport = usin->sin_port;
231         fl4 = &inet->cork.fl.u.ip4;
232         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
233                               sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport,
234                               orig_dport, sk);
235         if (IS_ERR(rt)) {
236                 err = PTR_ERR(rt);
237                 if (err == -ENETUNREACH)
238                         IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
239                 return err;
240         }
241
242         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
243                 ip_rt_put(rt);
244                 return -ENETUNREACH;
245         }
246
247         if (!inet_opt || !inet_opt->opt.srr)
248                 daddr = fl4->daddr;
249
250         tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
251
252         if (!inet->inet_saddr) {
253                 err = inet_bhash2_update_saddr(sk,  &fl4->saddr, AF_INET);
254                 if (err) {
255                         ip_rt_put(rt);
256                         return err;
257                 }
258         } else {
259                 sk_rcv_saddr_set(sk, inet->inet_saddr);
260         }
261
262         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
263                 /* Reset inherited state */
264                 tp->rx_opt.ts_recent       = 0;
265                 tp->rx_opt.ts_recent_stamp = 0;
266                 if (likely(!tp->repair))
267                         WRITE_ONCE(tp->write_seq, 0);
268         }
269
270         inet->inet_dport = usin->sin_port;
271         sk_daddr_set(sk, daddr);
272
273         inet_csk(sk)->icsk_ext_hdr_len = 0;
274         if (inet_opt)
275                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
276
277         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
278
279         /* Socket identity is still unknown (sport may be zero).
280          * However we set state to SYN-SENT and not releasing socket
281          * lock select source port, enter ourselves into the hash tables and
282          * complete initialization after this.
283          */
284         tcp_set_state(sk, TCP_SYN_SENT);
285         err = inet_hash_connect(tcp_death_row, sk);
286         if (err)
287                 goto failure;
288
289         sk_set_txhash(sk);
290
291         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
292                                inet->inet_sport, inet->inet_dport, sk);
293         if (IS_ERR(rt)) {
294                 err = PTR_ERR(rt);
295                 rt = NULL;
296                 goto failure;
297         }
298         /* OK, now commit destination to socket.  */
299         sk->sk_gso_type = SKB_GSO_TCPV4;
300         sk_setup_caps(sk, &rt->dst);
301         rt = NULL;
302
303         if (likely(!tp->repair)) {
304                 if (!tp->write_seq)
305                         WRITE_ONCE(tp->write_seq,
306                                    secure_tcp_seq(inet->inet_saddr,
307                                                   inet->inet_daddr,
308                                                   inet->inet_sport,
309                                                   usin->sin_port));
310                 tp->tsoffset = secure_tcp_ts_off(net, inet->inet_saddr,
311                                                  inet->inet_daddr);
312         }
313
314         inet->inet_id = get_random_u16();
315
316         if (tcp_fastopen_defer_connect(sk, &err))
317                 return err;
318         if (err)
319                 goto failure;
320
321         err = tcp_connect(sk);
322
323         if (err)
324                 goto failure;
325
326         return 0;
327
328 failure:
329         /*
330          * This unhashes the socket and releases the local port,
331          * if necessary.
332          */
333         tcp_set_state(sk, TCP_CLOSE);
334         inet_bhash2_reset_saddr(sk);
335         ip_rt_put(rt);
336         sk->sk_route_caps = 0;
337         inet->inet_dport = 0;
338         return err;
339 }
340 EXPORT_SYMBOL(tcp_v4_connect);
341
342 /*
343  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
344  * It can be called through tcp_release_cb() if socket was owned by user
345  * at the time tcp_v4_err() was called to handle ICMP message.
346  */
347 void tcp_v4_mtu_reduced(struct sock *sk)
348 {
349         struct inet_sock *inet = inet_sk(sk);
350         struct dst_entry *dst;
351         u32 mtu;
352
353         if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
354                 return;
355         mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
356         dst = inet_csk_update_pmtu(sk, mtu);
357         if (!dst)
358                 return;
359
360         /* Something is about to be wrong... Remember soft error
361          * for the case, if this connection will not able to recover.
362          */
363         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
364                 WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
365
366         mtu = dst_mtu(dst);
367
368         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
369             ip_sk_accept_pmtu(sk) &&
370             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
371                 tcp_sync_mss(sk, mtu);
372
373                 /* Resend the TCP packet because it's
374                  * clear that the old packet has been
375                  * dropped. This is the new "fast" path mtu
376                  * discovery.
377                  */
378                 tcp_simple_retransmit(sk);
379         } /* else let the usual retransmit timer handle it */
380 }
381 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
382
383 static void do_redirect(struct sk_buff *skb, struct sock *sk)
384 {
385         struct dst_entry *dst = __sk_dst_check(sk, 0);
386
387         if (dst)
388                 dst->ops->redirect(dst, sk, skb);
389 }
390
391
392 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
393 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
394 {
395         struct request_sock *req = inet_reqsk(sk);
396         struct net *net = sock_net(sk);
397
398         /* ICMPs are not backlogged, hence we cannot get
399          * an established socket here.
400          */
401         if (seq != tcp_rsk(req)->snt_isn) {
402                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
403         } else if (abort) {
404                 /*
405                  * Still in SYN_RECV, just remove it silently.
406                  * There is no good way to pass the error to the newly
407                  * created socket, and POSIX does not want network
408                  * errors returned from accept().
409                  */
410                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
411                 tcp_listendrop(req->rsk_listener);
412         }
413         reqsk_put(req);
414 }
415 EXPORT_SYMBOL(tcp_req_err);
416
417 /* TCP-LD (RFC 6069) logic */
418 void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
419 {
420         struct inet_connection_sock *icsk = inet_csk(sk);
421         struct tcp_sock *tp = tcp_sk(sk);
422         struct sk_buff *skb;
423         s32 remaining;
424         u32 delta_us;
425
426         if (sock_owned_by_user(sk))
427                 return;
428
429         if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
430             !icsk->icsk_backoff)
431                 return;
432
433         skb = tcp_rtx_queue_head(sk);
434         if (WARN_ON_ONCE(!skb))
435                 return;
436
437         icsk->icsk_backoff--;
438         icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
439         icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
440
441         tcp_mstamp_refresh(tp);
442         delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
443         remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
444
445         if (remaining > 0) {
446                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
447                                           remaining, TCP_RTO_MAX);
448         } else {
449                 /* RTO revert clocked out retransmission.
450                  * Will retransmit now.
451                  */
452                 tcp_retransmit_timer(sk);
453         }
454 }
455 EXPORT_SYMBOL(tcp_ld_RTO_revert);
456
457 /*
458  * This routine is called by the ICMP module when it gets some
459  * sort of error condition.  If err < 0 then the socket should
460  * be closed and the error returned to the user.  If err > 0
461  * it's just the icmp type << 8 | icmp code.  After adjustment
462  * header points to the first 8 bytes of the tcp header.  We need
463  * to find the appropriate port.
464  *
465  * The locking strategy used here is very "optimistic". When
466  * someone else accesses the socket the ICMP is just dropped
467  * and for some paths there is no check at all.
468  * A more general error queue to queue errors for later handling
469  * is probably better.
470  *
471  */
472
473 int tcp_v4_err(struct sk_buff *skb, u32 info)
474 {
475         const struct iphdr *iph = (const struct iphdr *)skb->data;
476         struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
477         struct tcp_sock *tp;
478         struct inet_sock *inet;
479         const int type = icmp_hdr(skb)->type;
480         const int code = icmp_hdr(skb)->code;
481         struct sock *sk;
482         struct request_sock *fastopen;
483         u32 seq, snd_una;
484         int err;
485         struct net *net = dev_net(skb->dev);
486
487         sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
488                                        iph->daddr, th->dest, iph->saddr,
489                                        ntohs(th->source), inet_iif(skb), 0);
490         if (!sk) {
491                 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
492                 return -ENOENT;
493         }
494         if (sk->sk_state == TCP_TIME_WAIT) {
495                 inet_twsk_put(inet_twsk(sk));
496                 return 0;
497         }
498         seq = ntohl(th->seq);
499         if (sk->sk_state == TCP_NEW_SYN_RECV) {
500                 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
501                                      type == ICMP_TIME_EXCEEDED ||
502                                      (type == ICMP_DEST_UNREACH &&
503                                       (code == ICMP_NET_UNREACH ||
504                                        code == ICMP_HOST_UNREACH)));
505                 return 0;
506         }
507
508         bh_lock_sock(sk);
509         /* If too many ICMPs get dropped on busy
510          * servers this needs to be solved differently.
511          * We do take care of PMTU discovery (RFC1191) special case :
512          * we can receive locally generated ICMP messages while socket is held.
513          */
514         if (sock_owned_by_user(sk)) {
515                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
516                         __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
517         }
518         if (sk->sk_state == TCP_CLOSE)
519                 goto out;
520
521         if (static_branch_unlikely(&ip4_min_ttl)) {
522                 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
523                 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
524                         __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
525                         goto out;
526                 }
527         }
528
529         tp = tcp_sk(sk);
530         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
531         fastopen = rcu_dereference(tp->fastopen_rsk);
532         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
533         if (sk->sk_state != TCP_LISTEN &&
534             !between(seq, snd_una, tp->snd_nxt)) {
535                 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
536                 goto out;
537         }
538
539         switch (type) {
540         case ICMP_REDIRECT:
541                 if (!sock_owned_by_user(sk))
542                         do_redirect(skb, sk);
543                 goto out;
544         case ICMP_SOURCE_QUENCH:
545                 /* Just silently ignore these. */
546                 goto out;
547         case ICMP_PARAMETERPROB:
548                 err = EPROTO;
549                 break;
550         case ICMP_DEST_UNREACH:
551                 if (code > NR_ICMP_UNREACH)
552                         goto out;
553
554                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
555                         /* We are not interested in TCP_LISTEN and open_requests
556                          * (SYN-ACKs send out by Linux are always <576bytes so
557                          * they should go through unfragmented).
558                          */
559                         if (sk->sk_state == TCP_LISTEN)
560                                 goto out;
561
562                         WRITE_ONCE(tp->mtu_info, info);
563                         if (!sock_owned_by_user(sk)) {
564                                 tcp_v4_mtu_reduced(sk);
565                         } else {
566                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
567                                         sock_hold(sk);
568                         }
569                         goto out;
570                 }
571
572                 err = icmp_err_convert[code].errno;
573                 /* check if this ICMP message allows revert of backoff.
574                  * (see RFC 6069)
575                  */
576                 if (!fastopen &&
577                     (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
578                         tcp_ld_RTO_revert(sk, seq);
579                 break;
580         case ICMP_TIME_EXCEEDED:
581                 err = EHOSTUNREACH;
582                 break;
583         default:
584                 goto out;
585         }
586
587         switch (sk->sk_state) {
588         case TCP_SYN_SENT:
589         case TCP_SYN_RECV:
590                 /* Only in fast or simultaneous open. If a fast open socket is
591                  * already accepted it is treated as a connected one below.
592                  */
593                 if (fastopen && !fastopen->sk)
594                         break;
595
596                 ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
597
598                 if (!sock_owned_by_user(sk)) {
599                         WRITE_ONCE(sk->sk_err, err);
600
601                         sk_error_report(sk);
602
603                         tcp_done(sk);
604                 } else {
605                         WRITE_ONCE(sk->sk_err_soft, err);
606                 }
607                 goto out;
608         }
609
610         /* If we've already connected we will keep trying
611          * until we time out, or the user gives up.
612          *
613          * rfc1122 4.2.3.9 allows to consider as hard errors
614          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
615          * but it is obsoleted by pmtu discovery).
616          *
617          * Note, that in modern internet, where routing is unreliable
618          * and in each dark corner broken firewalls sit, sending random
619          * errors ordered by their masters even this two messages finally lose
620          * their original sense (even Linux sends invalid PORT_UNREACHs)
621          *
622          * Now we are in compliance with RFCs.
623          *                                                      --ANK (980905)
624          */
625
626         inet = inet_sk(sk);
627         if (!sock_owned_by_user(sk) && inet->recverr) {
628                 WRITE_ONCE(sk->sk_err, err);
629                 sk_error_report(sk);
630         } else  { /* Only an error on timeout */
631                 WRITE_ONCE(sk->sk_err_soft, err);
632         }
633
634 out:
635         bh_unlock_sock(sk);
636         sock_put(sk);
637         return 0;
638 }
639
640 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
641 {
642         struct tcphdr *th = tcp_hdr(skb);
643
644         th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
645         skb->csum_start = skb_transport_header(skb) - skb->head;
646         skb->csum_offset = offsetof(struct tcphdr, check);
647 }
648
649 /* This routine computes an IPv4 TCP checksum. */
650 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
651 {
652         const struct inet_sock *inet = inet_sk(sk);
653
654         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
655 }
656 EXPORT_SYMBOL(tcp_v4_send_check);
657
658 /*
659  *      This routine will send an RST to the other tcp.
660  *
661  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
662  *                    for reset.
663  *      Answer: if a packet caused RST, it is not for a socket
664  *              existing in our system, if it is matched to a socket,
665  *              it is just duplicate segment or bug in other side's TCP.
666  *              So that we build reply only basing on parameters
667  *              arrived with segment.
668  *      Exception: precedence violation. We do not implement it in any case.
669  */
670
671 #ifdef CONFIG_TCP_MD5SIG
672 #define OPTION_BYTES TCPOLEN_MD5SIG_ALIGNED
673 #else
674 #define OPTION_BYTES sizeof(__be32)
675 #endif
676
677 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
678 {
679         const struct tcphdr *th = tcp_hdr(skb);
680         struct {
681                 struct tcphdr th;
682                 __be32 opt[OPTION_BYTES / sizeof(__be32)];
683         } rep;
684         struct ip_reply_arg arg;
685 #ifdef CONFIG_TCP_MD5SIG
686         struct tcp_md5sig_key *key = NULL;
687         const __u8 *hash_location = NULL;
688         unsigned char newhash[16];
689         int genhash;
690         struct sock *sk1 = NULL;
691 #endif
692         u64 transmit_time = 0;
693         struct sock *ctl_sk;
694         struct net *net;
695
696         /* Never send a reset in response to a reset. */
697         if (th->rst)
698                 return;
699
700         /* If sk not NULL, it means we did a successful lookup and incoming
701          * route had to be correct. prequeue might have dropped our dst.
702          */
703         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
704                 return;
705
706         /* Swap the send and the receive. */
707         memset(&rep, 0, sizeof(rep));
708         rep.th.dest   = th->source;
709         rep.th.source = th->dest;
710         rep.th.doff   = sizeof(struct tcphdr) / 4;
711         rep.th.rst    = 1;
712
713         if (th->ack) {
714                 rep.th.seq = th->ack_seq;
715         } else {
716                 rep.th.ack = 1;
717                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
718                                        skb->len - (th->doff << 2));
719         }
720
721         memset(&arg, 0, sizeof(arg));
722         arg.iov[0].iov_base = (unsigned char *)&rep;
723         arg.iov[0].iov_len  = sizeof(rep.th);
724
725         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
726 #ifdef CONFIG_TCP_MD5SIG
727         rcu_read_lock();
728         hash_location = tcp_parse_md5sig_option(th);
729         if (sk && sk_fullsock(sk)) {
730                 const union tcp_md5_addr *addr;
731                 int l3index;
732
733                 /* sdif set, means packet ingressed via a device
734                  * in an L3 domain and inet_iif is set to it.
735                  */
736                 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
737                 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
738                 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
739         } else if (hash_location) {
740                 const union tcp_md5_addr *addr;
741                 int sdif = tcp_v4_sdif(skb);
742                 int dif = inet_iif(skb);
743                 int l3index;
744
745                 /*
746                  * active side is lost. Try to find listening socket through
747                  * source port, and then find md5 key through listening socket.
748                  * we are not loose security here:
749                  * Incoming packet is checked with md5 hash with finding key,
750                  * no RST generated if md5 hash doesn't match.
751                  */
752                 sk1 = __inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo,
753                                              NULL, 0, ip_hdr(skb)->saddr,
754                                              th->source, ip_hdr(skb)->daddr,
755                                              ntohs(th->source), dif, sdif);
756                 /* don't send rst if it can't find key */
757                 if (!sk1)
758                         goto out;
759
760                 /* sdif set, means packet ingressed via a device
761                  * in an L3 domain and dif is set to it.
762                  */
763                 l3index = sdif ? dif : 0;
764                 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
765                 key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
766                 if (!key)
767                         goto out;
768
769
770                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
771                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
772                         goto out;
773
774         }
775
776         if (key) {
777                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
778                                    (TCPOPT_NOP << 16) |
779                                    (TCPOPT_MD5SIG << 8) |
780                                    TCPOLEN_MD5SIG);
781                 /* Update length and the length the header thinks exists */
782                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
783                 rep.th.doff = arg.iov[0].iov_len / 4;
784
785                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
786                                      key, ip_hdr(skb)->saddr,
787                                      ip_hdr(skb)->daddr, &rep.th);
788         }
789 #endif
790         /* Can't co-exist with TCPMD5, hence check rep.opt[0] */
791         if (rep.opt[0] == 0) {
792                 __be32 mrst = mptcp_reset_option(skb);
793
794                 if (mrst) {
795                         rep.opt[0] = mrst;
796                         arg.iov[0].iov_len += sizeof(mrst);
797                         rep.th.doff = arg.iov[0].iov_len / 4;
798                 }
799         }
800
801         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
802                                       ip_hdr(skb)->saddr, /* XXX */
803                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
804         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
805         arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
806
807         /* When socket is gone, all binding information is lost.
808          * routing might fail in this case. No choice here, if we choose to force
809          * input interface, we will misroute in case of asymmetric route.
810          */
811         if (sk) {
812                 arg.bound_dev_if = sk->sk_bound_dev_if;
813                 if (sk_fullsock(sk))
814                         trace_tcp_send_reset(sk, skb);
815         }
816
817         BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
818                      offsetof(struct inet_timewait_sock, tw_bound_dev_if));
819
820         arg.tos = ip_hdr(skb)->tos;
821         arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
822         local_bh_disable();
823         ctl_sk = this_cpu_read(ipv4_tcp_sk);
824         sock_net_set(ctl_sk, net);
825         if (sk) {
826                 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
827                                    inet_twsk(sk)->tw_mark : sk->sk_mark;
828                 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
829                                    inet_twsk(sk)->tw_priority : sk->sk_priority;
830                 transmit_time = tcp_transmit_time(sk);
831                 xfrm_sk_clone_policy(ctl_sk, sk);
832         } else {
833                 ctl_sk->sk_mark = 0;
834                 ctl_sk->sk_priority = 0;
835         }
836         ip_send_unicast_reply(ctl_sk,
837                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
838                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
839                               &arg, arg.iov[0].iov_len,
840                               transmit_time);
841
842         xfrm_sk_free_policy(ctl_sk);
843         sock_net_set(ctl_sk, &init_net);
844         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
845         __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
846         local_bh_enable();
847
848 #ifdef CONFIG_TCP_MD5SIG
849 out:
850         rcu_read_unlock();
851 #endif
852 }
853
854 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
855    outside socket context is ugly, certainly. What can I do?
856  */
857
858 static void tcp_v4_send_ack(const struct sock *sk,
859                             struct sk_buff *skb, u32 seq, u32 ack,
860                             u32 win, u32 tsval, u32 tsecr, int oif,
861                             struct tcp_md5sig_key *key,
862                             int reply_flags, u8 tos)
863 {
864         const struct tcphdr *th = tcp_hdr(skb);
865         struct {
866                 struct tcphdr th;
867                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
868 #ifdef CONFIG_TCP_MD5SIG
869                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
870 #endif
871                         ];
872         } rep;
873         struct net *net = sock_net(sk);
874         struct ip_reply_arg arg;
875         struct sock *ctl_sk;
876         u64 transmit_time;
877
878         memset(&rep.th, 0, sizeof(struct tcphdr));
879         memset(&arg, 0, sizeof(arg));
880
881         arg.iov[0].iov_base = (unsigned char *)&rep;
882         arg.iov[0].iov_len  = sizeof(rep.th);
883         if (tsecr) {
884                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
885                                    (TCPOPT_TIMESTAMP << 8) |
886                                    TCPOLEN_TIMESTAMP);
887                 rep.opt[1] = htonl(tsval);
888                 rep.opt[2] = htonl(tsecr);
889                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
890         }
891
892         /* Swap the send and the receive. */
893         rep.th.dest    = th->source;
894         rep.th.source  = th->dest;
895         rep.th.doff    = arg.iov[0].iov_len / 4;
896         rep.th.seq     = htonl(seq);
897         rep.th.ack_seq = htonl(ack);
898         rep.th.ack     = 1;
899         rep.th.window  = htons(win);
900
901 #ifdef CONFIG_TCP_MD5SIG
902         if (key) {
903                 int offset = (tsecr) ? 3 : 0;
904
905                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
906                                           (TCPOPT_NOP << 16) |
907                                           (TCPOPT_MD5SIG << 8) |
908                                           TCPOLEN_MD5SIG);
909                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
910                 rep.th.doff = arg.iov[0].iov_len/4;
911
912                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
913                                     key, ip_hdr(skb)->saddr,
914                                     ip_hdr(skb)->daddr, &rep.th);
915         }
916 #endif
917         arg.flags = reply_flags;
918         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
919                                       ip_hdr(skb)->saddr, /* XXX */
920                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
921         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
922         if (oif)
923                 arg.bound_dev_if = oif;
924         arg.tos = tos;
925         arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
926         local_bh_disable();
927         ctl_sk = this_cpu_read(ipv4_tcp_sk);
928         sock_net_set(ctl_sk, net);
929         ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
930                            inet_twsk(sk)->tw_mark : sk->sk_mark;
931         ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
932                            inet_twsk(sk)->tw_priority : sk->sk_priority;
933         transmit_time = tcp_transmit_time(sk);
934         ip_send_unicast_reply(ctl_sk,
935                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
936                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
937                               &arg, arg.iov[0].iov_len,
938                               transmit_time);
939
940         sock_net_set(ctl_sk, &init_net);
941         __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
942         local_bh_enable();
943 }
944
945 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
946 {
947         struct inet_timewait_sock *tw = inet_twsk(sk);
948         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
949
950         tcp_v4_send_ack(sk, skb,
951                         tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
952                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
953                         tcp_time_stamp_raw() + tcptw->tw_ts_offset,
954                         tcptw->tw_ts_recent,
955                         tw->tw_bound_dev_if,
956                         tcp_twsk_md5_key(tcptw),
957                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
958                         tw->tw_tos
959                         );
960
961         inet_twsk_put(tw);
962 }
963
964 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
965                                   struct request_sock *req)
966 {
967         const union tcp_md5_addr *addr;
968         int l3index;
969
970         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
971          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
972          */
973         u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
974                                              tcp_sk(sk)->snd_nxt;
975
976         /* RFC 7323 2.3
977          * The window field (SEG.WND) of every outgoing segment, with the
978          * exception of <SYN> segments, MUST be right-shifted by
979          * Rcv.Wind.Shift bits:
980          */
981         addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
982         l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
983         tcp_v4_send_ack(sk, skb, seq,
984                         tcp_rsk(req)->rcv_nxt,
985                         req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
986                         tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
987                         req->ts_recent,
988                         0,
989                         tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
990                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
991                         ip_hdr(skb)->tos);
992 }
993
994 /*
995  *      Send a SYN-ACK after having received a SYN.
996  *      This still operates on a request_sock only, not on a big
997  *      socket.
998  */
999 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
1000                               struct flowi *fl,
1001                               struct request_sock *req,
1002                               struct tcp_fastopen_cookie *foc,
1003                               enum tcp_synack_type synack_type,
1004                               struct sk_buff *syn_skb)
1005 {
1006         const struct inet_request_sock *ireq = inet_rsk(req);
1007         struct flowi4 fl4;
1008         int err = -1;
1009         struct sk_buff *skb;
1010         u8 tos;
1011
1012         /* First, grab a route. */
1013         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1014                 return -1;
1015
1016         skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
1017
1018         if (skb) {
1019                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
1020
1021                 tos = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos) ?
1022                                 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
1023                                 (inet_sk(sk)->tos & INET_ECN_MASK) :
1024                                 inet_sk(sk)->tos;
1025
1026                 if (!INET_ECN_is_capable(tos) &&
1027                     tcp_bpf_ca_needs_ecn((struct sock *)req))
1028                         tos |= INET_ECN_ECT_0;
1029
1030                 rcu_read_lock();
1031                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
1032                                             ireq->ir_rmt_addr,
1033                                             rcu_dereference(ireq->ireq_opt),
1034                                             tos);
1035                 rcu_read_unlock();
1036                 err = net_xmit_eval(err);
1037         }
1038
1039         return err;
1040 }
1041
1042 /*
1043  *      IPv4 request_sock destructor.
1044  */
1045 static void tcp_v4_reqsk_destructor(struct request_sock *req)
1046 {
1047         kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
1048 }
1049
1050 #ifdef CONFIG_TCP_MD5SIG
1051 /*
1052  * RFC2385 MD5 checksumming requires a mapping of
1053  * IP address->MD5 Key.
1054  * We need to maintain these in the sk structure.
1055  */
1056
1057 DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ);
1058 EXPORT_SYMBOL(tcp_md5_needed);
1059
1060 static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1061 {
1062         if (!old)
1063                 return true;
1064
1065         /* l3index always overrides non-l3index */
1066         if (old->l3index && new->l3index == 0)
1067                 return false;
1068         if (old->l3index == 0 && new->l3index)
1069                 return true;
1070
1071         return old->prefixlen < new->prefixlen;
1072 }
1073
1074 /* Find the Key structure for an address.  */
1075 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1076                                            const union tcp_md5_addr *addr,
1077                                            int family)
1078 {
1079         const struct tcp_sock *tp = tcp_sk(sk);
1080         struct tcp_md5sig_key *key;
1081         const struct tcp_md5sig_info *md5sig;
1082         __be32 mask;
1083         struct tcp_md5sig_key *best_match = NULL;
1084         bool match;
1085
1086         /* caller either holds rcu_read_lock() or socket lock */
1087         md5sig = rcu_dereference_check(tp->md5sig_info,
1088                                        lockdep_sock_is_held(sk));
1089         if (!md5sig)
1090                 return NULL;
1091
1092         hlist_for_each_entry_rcu(key, &md5sig->head, node,
1093                                  lockdep_sock_is_held(sk)) {
1094                 if (key->family != family)
1095                         continue;
1096                 if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
1097                         continue;
1098                 if (family == AF_INET) {
1099                         mask = inet_make_mask(key->prefixlen);
1100                         match = (key->addr.a4.s_addr & mask) ==
1101                                 (addr->a4.s_addr & mask);
1102 #if IS_ENABLED(CONFIG_IPV6)
1103                 } else if (family == AF_INET6) {
1104                         match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1105                                                   key->prefixlen);
1106 #endif
1107                 } else {
1108                         match = false;
1109                 }
1110
1111                 if (match && better_md5_match(best_match, key))
1112                         best_match = key;
1113         }
1114         return best_match;
1115 }
1116 EXPORT_SYMBOL(__tcp_md5_do_lookup);
1117
1118 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1119                                                       const union tcp_md5_addr *addr,
1120                                                       int family, u8 prefixlen,
1121                                                       int l3index, u8 flags)
1122 {
1123         const struct tcp_sock *tp = tcp_sk(sk);
1124         struct tcp_md5sig_key *key;
1125         unsigned int size = sizeof(struct in_addr);
1126         const struct tcp_md5sig_info *md5sig;
1127
1128         /* caller either holds rcu_read_lock() or socket lock */
1129         md5sig = rcu_dereference_check(tp->md5sig_info,
1130                                        lockdep_sock_is_held(sk));
1131         if (!md5sig)
1132                 return NULL;
1133 #if IS_ENABLED(CONFIG_IPV6)
1134         if (family == AF_INET6)
1135                 size = sizeof(struct in6_addr);
1136 #endif
1137         hlist_for_each_entry_rcu(key, &md5sig->head, node,
1138                                  lockdep_sock_is_held(sk)) {
1139                 if (key->family != family)
1140                         continue;
1141                 if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
1142                         continue;
1143                 if (key->l3index != l3index)
1144                         continue;
1145                 if (!memcmp(&key->addr, addr, size) &&
1146                     key->prefixlen == prefixlen)
1147                         return key;
1148         }
1149         return NULL;
1150 }
1151
1152 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1153                                          const struct sock *addr_sk)
1154 {
1155         const union tcp_md5_addr *addr;
1156         int l3index;
1157
1158         l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1159                                                  addr_sk->sk_bound_dev_if);
1160         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
1161         return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1162 }
1163 EXPORT_SYMBOL(tcp_v4_md5_lookup);
1164
1165 static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp)
1166 {
1167         struct tcp_sock *tp = tcp_sk(sk);
1168         struct tcp_md5sig_info *md5sig;
1169
1170         md5sig = kmalloc(sizeof(*md5sig), gfp);
1171         if (!md5sig)
1172                 return -ENOMEM;
1173
1174         sk_gso_disable(sk);
1175         INIT_HLIST_HEAD(&md5sig->head);
1176         rcu_assign_pointer(tp->md5sig_info, md5sig);
1177         return 0;
1178 }
1179
1180 /* This can be called on a newly created socket, from other files */
1181 static int __tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1182                             int family, u8 prefixlen, int l3index, u8 flags,
1183                             const u8 *newkey, u8 newkeylen, gfp_t gfp)
1184 {
1185         /* Add Key to the list */
1186         struct tcp_md5sig_key *key;
1187         struct tcp_sock *tp = tcp_sk(sk);
1188         struct tcp_md5sig_info *md5sig;
1189
1190         key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1191         if (key) {
1192                 /* Pre-existing entry - just update that one.
1193                  * Note that the key might be used concurrently.
1194                  * data_race() is telling kcsan that we do not care of
1195                  * key mismatches, since changing MD5 key on live flows
1196                  * can lead to packet drops.
1197                  */
1198                 data_race(memcpy(key->key, newkey, newkeylen));
1199
1200                 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1201                  * Also note that a reader could catch new key->keylen value
1202                  * but old key->key[], this is the reason we use __GFP_ZERO
1203                  * at sock_kmalloc() time below these lines.
1204                  */
1205                 WRITE_ONCE(key->keylen, newkeylen);
1206
1207                 return 0;
1208         }
1209
1210         md5sig = rcu_dereference_protected(tp->md5sig_info,
1211                                            lockdep_sock_is_held(sk));
1212
1213         key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1214         if (!key)
1215                 return -ENOMEM;
1216         if (!tcp_alloc_md5sig_pool()) {
1217                 sock_kfree_s(sk, key, sizeof(*key));
1218                 return -ENOMEM;
1219         }
1220
1221         memcpy(key->key, newkey, newkeylen);
1222         key->keylen = newkeylen;
1223         key->family = family;
1224         key->prefixlen = prefixlen;
1225         key->l3index = l3index;
1226         key->flags = flags;
1227         memcpy(&key->addr, addr,
1228                (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) ? sizeof(struct in6_addr) :
1229                                                                  sizeof(struct in_addr));
1230         hlist_add_head_rcu(&key->node, &md5sig->head);
1231         return 0;
1232 }
1233
1234 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1235                    int family, u8 prefixlen, int l3index, u8 flags,
1236                    const u8 *newkey, u8 newkeylen)
1237 {
1238         struct tcp_sock *tp = tcp_sk(sk);
1239
1240         if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1241                 if (tcp_md5sig_info_add(sk, GFP_KERNEL))
1242                         return -ENOMEM;
1243
1244                 if (!static_branch_inc(&tcp_md5_needed.key)) {
1245                         struct tcp_md5sig_info *md5sig;
1246
1247                         md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1248                         rcu_assign_pointer(tp->md5sig_info, NULL);
1249                         kfree_rcu(md5sig, rcu);
1250                         return -EUSERS;
1251                 }
1252         }
1253
1254         return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags,
1255                                 newkey, newkeylen, GFP_KERNEL);
1256 }
1257 EXPORT_SYMBOL(tcp_md5_do_add);
1258
1259 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1260                      int family, u8 prefixlen, int l3index,
1261                      struct tcp_md5sig_key *key)
1262 {
1263         struct tcp_sock *tp = tcp_sk(sk);
1264
1265         if (!rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk))) {
1266                 if (tcp_md5sig_info_add(sk, sk_gfp_mask(sk, GFP_ATOMIC)))
1267                         return -ENOMEM;
1268
1269                 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) {
1270                         struct tcp_md5sig_info *md5sig;
1271
1272                         md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk));
1273                         net_warn_ratelimited("Too many TCP-MD5 keys in the system\n");
1274                         rcu_assign_pointer(tp->md5sig_info, NULL);
1275                         kfree_rcu(md5sig, rcu);
1276                         return -EUSERS;
1277                 }
1278         }
1279
1280         return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index,
1281                                 key->flags, key->key, key->keylen,
1282                                 sk_gfp_mask(sk, GFP_ATOMIC));
1283 }
1284 EXPORT_SYMBOL(tcp_md5_key_copy);
1285
1286 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1287                    u8 prefixlen, int l3index, u8 flags)
1288 {
1289         struct tcp_md5sig_key *key;
1290
1291         key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
1292         if (!key)
1293                 return -ENOENT;
1294         hlist_del_rcu(&key->node);
1295         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1296         kfree_rcu(key, rcu);
1297         return 0;
1298 }
1299 EXPORT_SYMBOL(tcp_md5_do_del);
1300
1301 static void tcp_clear_md5_list(struct sock *sk)
1302 {
1303         struct tcp_sock *tp = tcp_sk(sk);
1304         struct tcp_md5sig_key *key;
1305         struct hlist_node *n;
1306         struct tcp_md5sig_info *md5sig;
1307
1308         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1309
1310         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1311                 hlist_del_rcu(&key->node);
1312                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1313                 kfree_rcu(key, rcu);
1314         }
1315 }
1316
1317 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1318                                  sockptr_t optval, int optlen)
1319 {
1320         struct tcp_md5sig cmd;
1321         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1322         const union tcp_md5_addr *addr;
1323         u8 prefixlen = 32;
1324         int l3index = 0;
1325         u8 flags;
1326
1327         if (optlen < sizeof(cmd))
1328                 return -EINVAL;
1329
1330         if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
1331                 return -EFAULT;
1332
1333         if (sin->sin_family != AF_INET)
1334                 return -EINVAL;
1335
1336         flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1337
1338         if (optname == TCP_MD5SIG_EXT &&
1339             cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1340                 prefixlen = cmd.tcpm_prefixlen;
1341                 if (prefixlen > 32)
1342                         return -EINVAL;
1343         }
1344
1345         if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
1346             cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1347                 struct net_device *dev;
1348
1349                 rcu_read_lock();
1350                 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1351                 if (dev && netif_is_l3_master(dev))
1352                         l3index = dev->ifindex;
1353
1354                 rcu_read_unlock();
1355
1356                 /* ok to reference set/not set outside of rcu;
1357                  * right now device MUST be an L3 master
1358                  */
1359                 if (!dev || !l3index)
1360                         return -EINVAL;
1361         }
1362
1363         addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1364
1365         if (!cmd.tcpm_keylen)
1366                 return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
1367
1368         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1369                 return -EINVAL;
1370
1371         return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
1372                               cmd.tcpm_key, cmd.tcpm_keylen);
1373 }
1374
1375 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1376                                    __be32 daddr, __be32 saddr,
1377                                    const struct tcphdr *th, int nbytes)
1378 {
1379         struct tcp4_pseudohdr *bp;
1380         struct scatterlist sg;
1381         struct tcphdr *_th;
1382
1383         bp = hp->scratch;
1384         bp->saddr = saddr;
1385         bp->daddr = daddr;
1386         bp->pad = 0;
1387         bp->protocol = IPPROTO_TCP;
1388         bp->len = cpu_to_be16(nbytes);
1389
1390         _th = (struct tcphdr *)(bp + 1);
1391         memcpy(_th, th, sizeof(*th));
1392         _th->check = 0;
1393
1394         sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1395         ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1396                                 sizeof(*bp) + sizeof(*th));
1397         return crypto_ahash_update(hp->md5_req);
1398 }
1399
1400 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1401                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1402 {
1403         struct tcp_md5sig_pool *hp;
1404         struct ahash_request *req;
1405
1406         hp = tcp_get_md5sig_pool();
1407         if (!hp)
1408                 goto clear_hash_noput;
1409         req = hp->md5_req;
1410
1411         if (crypto_ahash_init(req))
1412                 goto clear_hash;
1413         if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1414                 goto clear_hash;
1415         if (tcp_md5_hash_key(hp, key))
1416                 goto clear_hash;
1417         ahash_request_set_crypt(req, NULL, md5_hash, 0);
1418         if (crypto_ahash_final(req))
1419                 goto clear_hash;
1420
1421         tcp_put_md5sig_pool();
1422         return 0;
1423
1424 clear_hash:
1425         tcp_put_md5sig_pool();
1426 clear_hash_noput:
1427         memset(md5_hash, 0, 16);
1428         return 1;
1429 }
1430
1431 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1432                         const struct sock *sk,
1433                         const struct sk_buff *skb)
1434 {
1435         struct tcp_md5sig_pool *hp;
1436         struct ahash_request *req;
1437         const struct tcphdr *th = tcp_hdr(skb);
1438         __be32 saddr, daddr;
1439
1440         if (sk) { /* valid for establish/request sockets */
1441                 saddr = sk->sk_rcv_saddr;
1442                 daddr = sk->sk_daddr;
1443         } else {
1444                 const struct iphdr *iph = ip_hdr(skb);
1445                 saddr = iph->saddr;
1446                 daddr = iph->daddr;
1447         }
1448
1449         hp = tcp_get_md5sig_pool();
1450         if (!hp)
1451                 goto clear_hash_noput;
1452         req = hp->md5_req;
1453
1454         if (crypto_ahash_init(req))
1455                 goto clear_hash;
1456
1457         if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1458                 goto clear_hash;
1459         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1460                 goto clear_hash;
1461         if (tcp_md5_hash_key(hp, key))
1462                 goto clear_hash;
1463         ahash_request_set_crypt(req, NULL, md5_hash, 0);
1464         if (crypto_ahash_final(req))
1465                 goto clear_hash;
1466
1467         tcp_put_md5sig_pool();
1468         return 0;
1469
1470 clear_hash:
1471         tcp_put_md5sig_pool();
1472 clear_hash_noput:
1473         memset(md5_hash, 0, 16);
1474         return 1;
1475 }
1476 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1477
1478 #endif
1479
1480 static void tcp_v4_init_req(struct request_sock *req,
1481                             const struct sock *sk_listener,
1482                             struct sk_buff *skb)
1483 {
1484         struct inet_request_sock *ireq = inet_rsk(req);
1485         struct net *net = sock_net(sk_listener);
1486
1487         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1488         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1489         RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1490 }
1491
1492 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1493                                           struct sk_buff *skb,
1494                                           struct flowi *fl,
1495                                           struct request_sock *req)
1496 {
1497         tcp_v4_init_req(req, sk, skb);
1498
1499         if (security_inet_conn_request(sk, skb, req))
1500                 return NULL;
1501
1502         return inet_csk_route_req(sk, &fl->u.ip4, req);
1503 }
1504
1505 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1506         .family         =       PF_INET,
1507         .obj_size       =       sizeof(struct tcp_request_sock),
1508         .rtx_syn_ack    =       tcp_rtx_synack,
1509         .send_ack       =       tcp_v4_reqsk_send_ack,
1510         .destructor     =       tcp_v4_reqsk_destructor,
1511         .send_reset     =       tcp_v4_send_reset,
1512         .syn_ack_timeout =      tcp_syn_ack_timeout,
1513 };
1514
1515 const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1516         .mss_clamp      =       TCP_MSS_DEFAULT,
1517 #ifdef CONFIG_TCP_MD5SIG
1518         .req_md5_lookup =       tcp_v4_md5_lookup,
1519         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1520 #endif
1521 #ifdef CONFIG_SYN_COOKIES
1522         .cookie_init_seq =      cookie_v4_init_sequence,
1523 #endif
1524         .route_req      =       tcp_v4_route_req,
1525         .init_seq       =       tcp_v4_init_seq,
1526         .init_ts_off    =       tcp_v4_init_ts_off,
1527         .send_synack    =       tcp_v4_send_synack,
1528 };
1529
1530 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1531 {
1532         /* Never answer to SYNs send to broadcast or multicast */
1533         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1534                 goto drop;
1535
1536         return tcp_conn_request(&tcp_request_sock_ops,
1537                                 &tcp_request_sock_ipv4_ops, sk, skb);
1538
1539 drop:
1540         tcp_listendrop(sk);
1541         return 0;
1542 }
1543 EXPORT_SYMBOL(tcp_v4_conn_request);
1544
1545
1546 /*
1547  * The three way handshake has completed - we got a valid synack -
1548  * now create the new socket.
1549  */
1550 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1551                                   struct request_sock *req,
1552                                   struct dst_entry *dst,
1553                                   struct request_sock *req_unhash,
1554                                   bool *own_req)
1555 {
1556         struct inet_request_sock *ireq;
1557         bool found_dup_sk = false;
1558         struct inet_sock *newinet;
1559         struct tcp_sock *newtp;
1560         struct sock *newsk;
1561 #ifdef CONFIG_TCP_MD5SIG
1562         const union tcp_md5_addr *addr;
1563         struct tcp_md5sig_key *key;
1564         int l3index;
1565 #endif
1566         struct ip_options_rcu *inet_opt;
1567
1568         if (sk_acceptq_is_full(sk))
1569                 goto exit_overflow;
1570
1571         newsk = tcp_create_openreq_child(sk, req, skb);
1572         if (!newsk)
1573                 goto exit_nonewsk;
1574
1575         newsk->sk_gso_type = SKB_GSO_TCPV4;
1576         inet_sk_rx_dst_set(newsk, skb);
1577
1578         newtp                 = tcp_sk(newsk);
1579         newinet               = inet_sk(newsk);
1580         ireq                  = inet_rsk(req);
1581         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1582         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1583         newsk->sk_bound_dev_if = ireq->ir_iif;
1584         newinet->inet_saddr   = ireq->ir_loc_addr;
1585         inet_opt              = rcu_dereference(ireq->ireq_opt);
1586         RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1587         newinet->mc_index     = inet_iif(skb);
1588         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1589         newinet->rcv_tos      = ip_hdr(skb)->tos;
1590         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1591         if (inet_opt)
1592                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1593         newinet->inet_id = get_random_u16();
1594
1595         /* Set ToS of the new socket based upon the value of incoming SYN.
1596          * ECT bits are set later in tcp_init_transfer().
1597          */
1598         if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reflect_tos))
1599                 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1600
1601         if (!dst) {
1602                 dst = inet_csk_route_child_sock(sk, newsk, req);
1603                 if (!dst)
1604                         goto put_and_exit;
1605         } else {
1606                 /* syncookie case : see end of cookie_v4_check() */
1607         }
1608         sk_setup_caps(newsk, dst);
1609
1610         tcp_ca_openreq_child(newsk, dst);
1611
1612         tcp_sync_mss(newsk, dst_mtu(dst));
1613         newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1614
1615         tcp_initialize_rcv_mss(newsk);
1616
1617 #ifdef CONFIG_TCP_MD5SIG
1618         l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1619         /* Copy over the MD5 key from the original socket */
1620         addr = (union tcp_md5_addr *)&newinet->inet_daddr;
1621         key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
1622         if (key) {
1623                 if (tcp_md5_key_copy(newsk, addr, AF_INET, 32, l3index, key))
1624                         goto put_and_exit;
1625                 sk_gso_disable(newsk);
1626         }
1627 #endif
1628
1629         if (__inet_inherit_port(sk, newsk) < 0)
1630                 goto put_and_exit;
1631         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1632                                        &found_dup_sk);
1633         if (likely(*own_req)) {
1634                 tcp_move_syn(newtp, req);
1635                 ireq->ireq_opt = NULL;
1636         } else {
1637                 newinet->inet_opt = NULL;
1638
1639                 if (!req_unhash && found_dup_sk) {
1640                         /* This code path should only be executed in the
1641                          * syncookie case only
1642                          */
1643                         bh_unlock_sock(newsk);
1644                         sock_put(newsk);
1645                         newsk = NULL;
1646                 }
1647         }
1648         return newsk;
1649
1650 exit_overflow:
1651         NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1652 exit_nonewsk:
1653         dst_release(dst);
1654 exit:
1655         tcp_listendrop(sk);
1656         return NULL;
1657 put_and_exit:
1658         newinet->inet_opt = NULL;
1659         inet_csk_prepare_forced_close(newsk);
1660         tcp_done(newsk);
1661         goto exit;
1662 }
1663 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1664
1665 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1666 {
1667 #ifdef CONFIG_SYN_COOKIES
1668         const struct tcphdr *th = tcp_hdr(skb);
1669
1670         if (!th->syn)
1671                 sk = cookie_v4_check(sk, skb);
1672 #endif
1673         return sk;
1674 }
1675
1676 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1677                          struct tcphdr *th, u32 *cookie)
1678 {
1679         u16 mss = 0;
1680 #ifdef CONFIG_SYN_COOKIES
1681         mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1682                                     &tcp_request_sock_ipv4_ops, sk, th);
1683         if (mss) {
1684                 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1685                 tcp_synq_overflow(sk);
1686         }
1687 #endif
1688         return mss;
1689 }
1690
1691 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1692                                                            u32));
1693 /* The socket must have it's spinlock held when we get
1694  * here, unless it is a TCP_LISTEN socket.
1695  *
1696  * We have a potential double-lock case here, so even when
1697  * doing backlog processing we use the BH locking scheme.
1698  * This is because we cannot sleep with the original spinlock
1699  * held.
1700  */
1701 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1702 {
1703         enum skb_drop_reason reason;
1704         struct sock *rsk;
1705
1706         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1707                 struct dst_entry *dst;
1708
1709                 dst = rcu_dereference_protected(sk->sk_rx_dst,
1710                                                 lockdep_sock_is_held(sk));
1711
1712                 sock_rps_save_rxhash(sk, skb);
1713                 sk_mark_napi_id(sk, skb);
1714                 if (dst) {
1715                         if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
1716                             !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
1717                                              dst, 0)) {
1718                                 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
1719                                 dst_release(dst);
1720                         }
1721                 }
1722                 tcp_rcv_established(sk, skb);
1723                 return 0;
1724         }
1725
1726         reason = SKB_DROP_REASON_NOT_SPECIFIED;
1727         if (tcp_checksum_complete(skb))
1728                 goto csum_err;
1729
1730         if (sk->sk_state == TCP_LISTEN) {
1731                 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1732
1733                 if (!nsk)
1734                         goto discard;
1735                 if (nsk != sk) {
1736                         if (tcp_child_process(sk, nsk, skb)) {
1737                                 rsk = nsk;
1738                                 goto reset;
1739                         }
1740                         return 0;
1741                 }
1742         } else
1743                 sock_rps_save_rxhash(sk, skb);
1744
1745         if (tcp_rcv_state_process(sk, skb)) {
1746                 rsk = sk;
1747                 goto reset;
1748         }
1749         return 0;
1750
1751 reset:
1752         tcp_v4_send_reset(rsk, skb);
1753 discard:
1754         kfree_skb_reason(skb, reason);
1755         /* Be careful here. If this function gets more complicated and
1756          * gcc suffers from register pressure on the x86, sk (in %ebx)
1757          * might be destroyed here. This current version compiles correctly,
1758          * but you have been warned.
1759          */
1760         return 0;
1761
1762 csum_err:
1763         reason = SKB_DROP_REASON_TCP_CSUM;
1764         trace_tcp_bad_csum(skb);
1765         TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1766         TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1767         goto discard;
1768 }
1769 EXPORT_SYMBOL(tcp_v4_do_rcv);
1770
1771 int tcp_v4_early_demux(struct sk_buff *skb)
1772 {
1773         struct net *net = dev_net(skb->dev);
1774         const struct iphdr *iph;
1775         const struct tcphdr *th;
1776         struct sock *sk;
1777
1778         if (skb->pkt_type != PACKET_HOST)
1779                 return 0;
1780
1781         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1782                 return 0;
1783
1784         iph = ip_hdr(skb);
1785         th = tcp_hdr(skb);
1786
1787         if (th->doff < sizeof(struct tcphdr) / 4)
1788                 return 0;
1789
1790         sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
1791                                        iph->saddr, th->source,
1792                                        iph->daddr, ntohs(th->dest),
1793                                        skb->skb_iif, inet_sdif(skb));
1794         if (sk) {
1795                 skb->sk = sk;
1796                 skb->destructor = sock_edemux;
1797                 if (sk_fullsock(sk)) {
1798                         struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
1799
1800                         if (dst)
1801                                 dst = dst_check(dst, 0);
1802                         if (dst &&
1803                             sk->sk_rx_dst_ifindex == skb->skb_iif)
1804                                 skb_dst_set_noref(skb, dst);
1805                 }
1806         }
1807         return 0;
1808 }
1809
1810 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1811                      enum skb_drop_reason *reason)
1812 {
1813         u32 limit, tail_gso_size, tail_gso_segs;
1814         struct skb_shared_info *shinfo;
1815         const struct tcphdr *th;
1816         struct tcphdr *thtail;
1817         struct sk_buff *tail;
1818         unsigned int hdrlen;
1819         bool fragstolen;
1820         u32 gso_segs;
1821         u32 gso_size;
1822         int delta;
1823
1824         /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1825          * we can fix skb->truesize to its real value to avoid future drops.
1826          * This is valid because skb is not yet charged to the socket.
1827          * It has been noticed pure SACK packets were sometimes dropped
1828          * (if cooked by drivers without copybreak feature).
1829          */
1830         skb_condense(skb);
1831
1832         skb_dst_drop(skb);
1833
1834         if (unlikely(tcp_checksum_complete(skb))) {
1835                 bh_unlock_sock(sk);
1836                 trace_tcp_bad_csum(skb);
1837                 *reason = SKB_DROP_REASON_TCP_CSUM;
1838                 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1839                 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1840                 return true;
1841         }
1842
1843         /* Attempt coalescing to last skb in backlog, even if we are
1844          * above the limits.
1845          * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1846          */
1847         th = (const struct tcphdr *)skb->data;
1848         hdrlen = th->doff * 4;
1849
1850         tail = sk->sk_backlog.tail;
1851         if (!tail)
1852                 goto no_coalesce;
1853         thtail = (struct tcphdr *)tail->data;
1854
1855         if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1856             TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1857             ((TCP_SKB_CB(tail)->tcp_flags |
1858               TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1859             !((TCP_SKB_CB(tail)->tcp_flags &
1860               TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
1861             ((TCP_SKB_CB(tail)->tcp_flags ^
1862               TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1863 #ifdef CONFIG_TLS_DEVICE
1864             tail->decrypted != skb->decrypted ||
1865 #endif
1866             thtail->doff != th->doff ||
1867             memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1868                 goto no_coalesce;
1869
1870         __skb_pull(skb, hdrlen);
1871
1872         shinfo = skb_shinfo(skb);
1873         gso_size = shinfo->gso_size ?: skb->len;
1874         gso_segs = shinfo->gso_segs ?: 1;
1875
1876         shinfo = skb_shinfo(tail);
1877         tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
1878         tail_gso_segs = shinfo->gso_segs ?: 1;
1879
1880         if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1881                 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1882
1883                 if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
1884                         TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1885                         thtail->window = th->window;
1886                 }
1887
1888                 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1889                  * thtail->fin, so that the fast path in tcp_rcv_established()
1890                  * is not entered if we append a packet with a FIN.
1891                  * SYN, RST, URG are not present.
1892                  * ACK is set on both packets.
1893                  * PSH : we do not really care in TCP stack,
1894                  *       at least for 'GRO' packets.
1895                  */
1896                 thtail->fin |= th->fin;
1897                 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1898
1899                 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1900                         TCP_SKB_CB(tail)->has_rxtstamp = true;
1901                         tail->tstamp = skb->tstamp;
1902                         skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1903                 }
1904
1905                 /* Not as strict as GRO. We only need to carry mss max value */
1906                 shinfo->gso_size = max(gso_size, tail_gso_size);
1907                 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
1908
1909                 sk->sk_backlog.len += delta;
1910                 __NET_INC_STATS(sock_net(sk),
1911                                 LINUX_MIB_TCPBACKLOGCOALESCE);
1912                 kfree_skb_partial(skb, fragstolen);
1913                 return false;
1914         }
1915         __skb_push(skb, hdrlen);
1916
1917 no_coalesce:
1918         limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
1919
1920         /* Only socket owner can try to collapse/prune rx queues
1921          * to reduce memory overhead, so add a little headroom here.
1922          * Few sockets backlog are possibly concurrently non empty.
1923          */
1924         limit += 64 * 1024;
1925
1926         if (unlikely(sk_add_backlog(sk, skb, limit))) {
1927                 bh_unlock_sock(sk);
1928                 *reason = SKB_DROP_REASON_SOCKET_BACKLOG;
1929                 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1930                 return true;
1931         }
1932         return false;
1933 }
1934 EXPORT_SYMBOL(tcp_add_backlog);
1935
1936 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1937 {
1938         struct tcphdr *th = (struct tcphdr *)skb->data;
1939
1940         return sk_filter_trim_cap(sk, skb, th->doff * 4);
1941 }
1942 EXPORT_SYMBOL(tcp_filter);
1943
1944 static void tcp_v4_restore_cb(struct sk_buff *skb)
1945 {
1946         memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1947                 sizeof(struct inet_skb_parm));
1948 }
1949
1950 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1951                            const struct tcphdr *th)
1952 {
1953         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1954          * barrier() makes sure compiler wont play fool^Waliasing games.
1955          */
1956         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1957                 sizeof(struct inet_skb_parm));
1958         barrier();
1959
1960         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1961         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1962                                     skb->len - th->doff * 4);
1963         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1964         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1965         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1966         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1967         TCP_SKB_CB(skb)->sacked  = 0;
1968         TCP_SKB_CB(skb)->has_rxtstamp =
1969                         skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1970 }
1971
1972 /*
1973  *      From tcp_input.c
1974  */
1975
1976 int tcp_v4_rcv(struct sk_buff *skb)
1977 {
1978         struct net *net = dev_net(skb->dev);
1979         enum skb_drop_reason drop_reason;
1980         int sdif = inet_sdif(skb);
1981         int dif = inet_iif(skb);
1982         const struct iphdr *iph;
1983         const struct tcphdr *th;
1984         bool refcounted;
1985         struct sock *sk;
1986         int ret;
1987
1988         drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1989         if (skb->pkt_type != PACKET_HOST)
1990                 goto discard_it;
1991
1992         /* Count it even if it's bad */
1993         __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1994
1995         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1996                 goto discard_it;
1997
1998         th = (const struct tcphdr *)skb->data;
1999
2000         if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
2001                 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
2002                 goto bad_packet;
2003         }
2004         if (!pskb_may_pull(skb, th->doff * 4))
2005                 goto discard_it;
2006
2007         /* An explanation is required here, I think.
2008          * Packet length and doff are validated by header prediction,
2009          * provided case of th->doff==0 is eliminated.
2010          * So, we defer the checks. */
2011
2012         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
2013                 goto csum_error;
2014
2015         th = (const struct tcphdr *)skb->data;
2016         iph = ip_hdr(skb);
2017 lookup:
2018         sk = __inet_lookup_skb(net->ipv4.tcp_death_row.hashinfo,
2019                                skb, __tcp_hdrlen(th), th->source,
2020                                th->dest, sdif, &refcounted);
2021         if (!sk)
2022                 goto no_tcp_socket;
2023
2024 process:
2025         if (sk->sk_state == TCP_TIME_WAIT)
2026                 goto do_time_wait;
2027
2028         if (sk->sk_state == TCP_NEW_SYN_RECV) {
2029                 struct request_sock *req = inet_reqsk(sk);
2030                 bool req_stolen = false;
2031                 struct sock *nsk;
2032
2033                 sk = req->rsk_listener;
2034                 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2035                         drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2036                 else
2037                         drop_reason = tcp_inbound_md5_hash(sk, skb,
2038                                                    &iph->saddr, &iph->daddr,
2039                                                    AF_INET, dif, sdif);
2040                 if (unlikely(drop_reason)) {
2041                         sk_drops_add(sk, skb);
2042                         reqsk_put(req);
2043                         goto discard_it;
2044                 }
2045                 if (tcp_checksum_complete(skb)) {
2046                         reqsk_put(req);
2047                         goto csum_error;
2048                 }
2049                 if (unlikely(sk->sk_state != TCP_LISTEN)) {
2050                         nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
2051                         if (!nsk) {
2052                                 inet_csk_reqsk_queue_drop_and_put(sk, req);
2053                                 goto lookup;
2054                         }
2055                         sk = nsk;
2056                         /* reuseport_migrate_sock() has already held one sk_refcnt
2057                          * before returning.
2058                          */
2059                 } else {
2060                         /* We own a reference on the listener, increase it again
2061                          * as we might lose it too soon.
2062                          */
2063                         sock_hold(sk);
2064                 }
2065                 refcounted = true;
2066                 nsk = NULL;
2067                 if (!tcp_filter(sk, skb)) {
2068                         th = (const struct tcphdr *)skb->data;
2069                         iph = ip_hdr(skb);
2070                         tcp_v4_fill_cb(skb, iph, th);
2071                         nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
2072                 } else {
2073                         drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2074                 }
2075                 if (!nsk) {
2076                         reqsk_put(req);
2077                         if (req_stolen) {
2078                                 /* Another cpu got exclusive access to req
2079                                  * and created a full blown socket.
2080                                  * Try to feed this packet to this socket
2081                                  * instead of discarding it.
2082                                  */
2083                                 tcp_v4_restore_cb(skb);
2084                                 sock_put(sk);
2085                                 goto lookup;
2086                         }
2087                         goto discard_and_relse;
2088                 }
2089                 nf_reset_ct(skb);
2090                 if (nsk == sk) {
2091                         reqsk_put(req);
2092                         tcp_v4_restore_cb(skb);
2093                 } else if (tcp_child_process(sk, nsk, skb)) {
2094                         tcp_v4_send_reset(nsk, skb);
2095                         goto discard_and_relse;
2096                 } else {
2097                         sock_put(sk);
2098                         return 0;
2099                 }
2100         }
2101
2102         if (static_branch_unlikely(&ip4_min_ttl)) {
2103                 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
2104                 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
2105                         __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2106                         drop_reason = SKB_DROP_REASON_TCP_MINTTL;
2107                         goto discard_and_relse;
2108                 }
2109         }
2110
2111         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
2112                 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2113                 goto discard_and_relse;
2114         }
2115
2116         drop_reason = tcp_inbound_md5_hash(sk, skb, &iph->saddr,
2117                                            &iph->daddr, AF_INET, dif, sdif);
2118         if (drop_reason)
2119                 goto discard_and_relse;
2120
2121         nf_reset_ct(skb);
2122
2123         if (tcp_filter(sk, skb)) {
2124                 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
2125                 goto discard_and_relse;
2126         }
2127         th = (const struct tcphdr *)skb->data;
2128         iph = ip_hdr(skb);
2129         tcp_v4_fill_cb(skb, iph, th);
2130
2131         skb->dev = NULL;
2132
2133         if (sk->sk_state == TCP_LISTEN) {
2134                 ret = tcp_v4_do_rcv(sk, skb);
2135                 goto put_and_return;
2136         }
2137
2138         sk_incoming_cpu_update(sk);
2139
2140         bh_lock_sock_nested(sk);
2141         tcp_segs_in(tcp_sk(sk), skb);
2142         ret = 0;
2143         if (!sock_owned_by_user(sk)) {
2144                 ret = tcp_v4_do_rcv(sk, skb);
2145         } else {
2146                 if (tcp_add_backlog(sk, skb, &drop_reason))
2147                         goto discard_and_relse;
2148         }
2149         bh_unlock_sock(sk);
2150
2151 put_and_return:
2152         if (refcounted)
2153                 sock_put(sk);
2154
2155         return ret;
2156
2157 no_tcp_socket:
2158         drop_reason = SKB_DROP_REASON_NO_SOCKET;
2159         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2160                 goto discard_it;
2161
2162         tcp_v4_fill_cb(skb, iph, th);
2163
2164         if (tcp_checksum_complete(skb)) {
2165 csum_error:
2166                 drop_reason = SKB_DROP_REASON_TCP_CSUM;
2167                 trace_tcp_bad_csum(skb);
2168                 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
2169 bad_packet:
2170                 __TCP_INC_STATS(net, TCP_MIB_INERRS);
2171         } else {
2172                 tcp_v4_send_reset(NULL, skb);
2173         }
2174
2175 discard_it:
2176         SKB_DR_OR(drop_reason, NOT_SPECIFIED);
2177         /* Discard frame. */
2178         kfree_skb_reason(skb, drop_reason);
2179         return 0;
2180
2181 discard_and_relse:
2182         sk_drops_add(sk, skb);
2183         if (refcounted)
2184                 sock_put(sk);
2185         goto discard_it;
2186
2187 do_time_wait:
2188         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2189                 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
2190                 inet_twsk_put(inet_twsk(sk));
2191                 goto discard_it;
2192         }
2193
2194         tcp_v4_fill_cb(skb, iph, th);
2195
2196         if (tcp_checksum_complete(skb)) {
2197                 inet_twsk_put(inet_twsk(sk));
2198                 goto csum_error;
2199         }
2200         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2201         case TCP_TW_SYN: {
2202                 struct sock *sk2 = inet_lookup_listener(net,
2203                                                         net->ipv4.tcp_death_row.hashinfo,
2204                                                         skb, __tcp_hdrlen(th),
2205                                                         iph->saddr, th->source,
2206                                                         iph->daddr, th->dest,
2207                                                         inet_iif(skb),
2208                                                         sdif);
2209                 if (sk2) {
2210                         inet_twsk_deschedule_put(inet_twsk(sk));
2211                         sk = sk2;
2212                         tcp_v4_restore_cb(skb);
2213                         refcounted = false;
2214                         goto process;
2215                 }
2216         }
2217                 /* to ACK */
2218                 fallthrough;
2219         case TCP_TW_ACK:
2220                 tcp_v4_timewait_ack(sk, skb);
2221                 break;
2222         case TCP_TW_RST:
2223                 tcp_v4_send_reset(sk, skb);
2224                 inet_twsk_deschedule_put(inet_twsk(sk));
2225                 goto discard_it;
2226         case TCP_TW_SUCCESS:;
2227         }
2228         goto discard_it;
2229 }
2230
2231 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2232         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
2233         .twsk_unique    = tcp_twsk_unique,
2234         .twsk_destructor= tcp_twsk_destructor,
2235 };
2236
2237 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2238 {
2239         struct dst_entry *dst = skb_dst(skb);
2240
2241         if (dst && dst_hold_safe(dst)) {
2242                 rcu_assign_pointer(sk->sk_rx_dst, dst);
2243                 sk->sk_rx_dst_ifindex = skb->skb_iif;
2244         }
2245 }
2246 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2247
2248 const struct inet_connection_sock_af_ops ipv4_specific = {
2249         .queue_xmit        = ip_queue_xmit,
2250         .send_check        = tcp_v4_send_check,
2251         .rebuild_header    = inet_sk_rebuild_header,
2252         .sk_rx_dst_set     = inet_sk_rx_dst_set,
2253         .conn_request      = tcp_v4_conn_request,
2254         .syn_recv_sock     = tcp_v4_syn_recv_sock,
2255         .net_header_len    = sizeof(struct iphdr),
2256         .setsockopt        = ip_setsockopt,
2257         .getsockopt        = ip_getsockopt,
2258         .addr2sockaddr     = inet_csk_addr2sockaddr,
2259         .sockaddr_len      = sizeof(struct sockaddr_in),
2260         .mtu_reduced       = tcp_v4_mtu_reduced,
2261 };
2262 EXPORT_SYMBOL(ipv4_specific);
2263
2264 #ifdef CONFIG_TCP_MD5SIG
2265 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2266         .md5_lookup             = tcp_v4_md5_lookup,
2267         .calc_md5_hash          = tcp_v4_md5_hash_skb,
2268         .md5_parse              = tcp_v4_parse_md5_keys,
2269 };
2270 #endif
2271
2272 /* NOTE: A lot of things set to zero explicitly by call to
2273  *       sk_alloc() so need not be done here.
2274  */
2275 static int tcp_v4_init_sock(struct sock *sk)
2276 {
2277         struct inet_connection_sock *icsk = inet_csk(sk);
2278
2279         tcp_init_sock(sk);
2280
2281         icsk->icsk_af_ops = &ipv4_specific;
2282
2283 #ifdef CONFIG_TCP_MD5SIG
2284         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2285 #endif
2286
2287         return 0;
2288 }
2289
2290 void tcp_v4_destroy_sock(struct sock *sk)
2291 {
2292         struct tcp_sock *tp = tcp_sk(sk);
2293
2294         trace_tcp_destroy_sock(sk);
2295
2296         tcp_clear_xmit_timers(sk);
2297
2298         tcp_cleanup_congestion_control(sk);
2299
2300         tcp_cleanup_ulp(sk);
2301
2302         /* Cleanup up the write buffer. */
2303         tcp_write_queue_purge(sk);
2304
2305         /* Check if we want to disable active TFO */
2306         tcp_fastopen_active_disable_ofo_check(sk);
2307
2308         /* Cleans up our, hopefully empty, out_of_order_queue. */
2309         skb_rbtree_purge(&tp->out_of_order_queue);
2310
2311 #ifdef CONFIG_TCP_MD5SIG
2312         /* Clean up the MD5 key list, if any */
2313         if (tp->md5sig_info) {
2314                 tcp_clear_md5_list(sk);
2315                 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
2316                 tp->md5sig_info = NULL;
2317                 static_branch_slow_dec_deferred(&tcp_md5_needed);
2318         }
2319 #endif
2320
2321         /* Clean up a referenced TCP bind bucket. */
2322         if (inet_csk(sk)->icsk_bind_hash)
2323                 inet_put_port(sk);
2324
2325         BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
2326
2327         /* If socket is aborted during connect operation */
2328         tcp_free_fastopen_req(tp);
2329         tcp_fastopen_destroy_cipher(sk);
2330         tcp_saved_syn_free(tp);
2331
2332         sk_sockets_allocated_dec(sk);
2333 }
2334 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2335
2336 #ifdef CONFIG_PROC_FS
2337 /* Proc filesystem TCP sock list dumping. */
2338
2339 static unsigned short seq_file_family(const struct seq_file *seq);
2340
2341 static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
2342 {
2343         unsigned short family = seq_file_family(seq);
2344
2345         /* AF_UNSPEC is used as a match all */
2346         return ((family == AF_UNSPEC || family == sk->sk_family) &&
2347                 net_eq(sock_net(sk), seq_file_net(seq)));
2348 }
2349
2350 /* Find a non empty bucket (starting from st->bucket)
2351  * and return the first sk from it.
2352  */
2353 static void *listening_get_first(struct seq_file *seq)
2354 {
2355         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2356         struct tcp_iter_state *st = seq->private;
2357
2358         st->offset = 0;
2359         for (; st->bucket <= hinfo->lhash2_mask; st->bucket++) {
2360                 struct inet_listen_hashbucket *ilb2;
2361                 struct hlist_nulls_node *node;
2362                 struct sock *sk;
2363
2364                 ilb2 = &hinfo->lhash2[st->bucket];
2365                 if (hlist_nulls_empty(&ilb2->nulls_head))
2366                         continue;
2367
2368                 spin_lock(&ilb2->lock);
2369                 sk_nulls_for_each(sk, node, &ilb2->nulls_head) {
2370                         if (seq_sk_match(seq, sk))
2371                                 return sk;
2372                 }
2373                 spin_unlock(&ilb2->lock);
2374         }
2375
2376         return NULL;
2377 }
2378
2379 /* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2380  * If "cur" is the last one in the st->bucket,
2381  * call listening_get_first() to return the first sk of the next
2382  * non empty bucket.
2383  */
2384 static void *listening_get_next(struct seq_file *seq, void *cur)
2385 {
2386         struct tcp_iter_state *st = seq->private;
2387         struct inet_listen_hashbucket *ilb2;
2388         struct hlist_nulls_node *node;
2389         struct inet_hashinfo *hinfo;
2390         struct sock *sk = cur;
2391
2392         ++st->num;
2393         ++st->offset;
2394
2395         sk = sk_nulls_next(sk);
2396         sk_nulls_for_each_from(sk, node) {
2397                 if (seq_sk_match(seq, sk))
2398                         return sk;
2399         }
2400
2401         hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2402         ilb2 = &hinfo->lhash2[st->bucket];
2403         spin_unlock(&ilb2->lock);
2404         ++st->bucket;
2405         return listening_get_first(seq);
2406 }
2407
2408 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2409 {
2410         struct tcp_iter_state *st = seq->private;
2411         void *rc;
2412
2413         st->bucket = 0;
2414         st->offset = 0;
2415         rc = listening_get_first(seq);
2416
2417         while (rc && *pos) {
2418                 rc = listening_get_next(seq, rc);
2419                 --*pos;
2420         }
2421         return rc;
2422 }
2423
2424 static inline bool empty_bucket(struct inet_hashinfo *hinfo,
2425                                 const struct tcp_iter_state *st)
2426 {
2427         return hlist_nulls_empty(&hinfo->ehash[st->bucket].chain);
2428 }
2429
2430 /*
2431  * Get first established socket starting from bucket given in st->bucket.
2432  * If st->bucket is zero, the very first socket in the hash is returned.
2433  */
2434 static void *established_get_first(struct seq_file *seq)
2435 {
2436         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2437         struct tcp_iter_state *st = seq->private;
2438
2439         st->offset = 0;
2440         for (; st->bucket <= hinfo->ehash_mask; ++st->bucket) {
2441                 struct sock *sk;
2442                 struct hlist_nulls_node *node;
2443                 spinlock_t *lock = inet_ehash_lockp(hinfo, st->bucket);
2444
2445                 /* Lockless fast path for the common case of empty buckets */
2446                 if (empty_bucket(hinfo, st))
2447                         continue;
2448
2449                 spin_lock_bh(lock);
2450                 sk_nulls_for_each(sk, node, &hinfo->ehash[st->bucket].chain) {
2451                         if (seq_sk_match(seq, sk))
2452                                 return sk;
2453                 }
2454                 spin_unlock_bh(lock);
2455         }
2456
2457         return NULL;
2458 }
2459
2460 static void *established_get_next(struct seq_file *seq, void *cur)
2461 {
2462         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2463         struct tcp_iter_state *st = seq->private;
2464         struct hlist_nulls_node *node;
2465         struct sock *sk = cur;
2466
2467         ++st->num;
2468         ++st->offset;
2469
2470         sk = sk_nulls_next(sk);
2471
2472         sk_nulls_for_each_from(sk, node) {
2473                 if (seq_sk_match(seq, sk))
2474                         return sk;
2475         }
2476
2477         spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2478         ++st->bucket;
2479         return established_get_first(seq);
2480 }
2481
2482 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2483 {
2484         struct tcp_iter_state *st = seq->private;
2485         void *rc;
2486
2487         st->bucket = 0;
2488         rc = established_get_first(seq);
2489
2490         while (rc && pos) {
2491                 rc = established_get_next(seq, rc);
2492                 --pos;
2493         }
2494         return rc;
2495 }
2496
2497 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2498 {
2499         void *rc;
2500         struct tcp_iter_state *st = seq->private;
2501
2502         st->state = TCP_SEQ_STATE_LISTENING;
2503         rc        = listening_get_idx(seq, &pos);
2504
2505         if (!rc) {
2506                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2507                 rc        = established_get_idx(seq, pos);
2508         }
2509
2510         return rc;
2511 }
2512
2513 static void *tcp_seek_last_pos(struct seq_file *seq)
2514 {
2515         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2516         struct tcp_iter_state *st = seq->private;
2517         int bucket = st->bucket;
2518         int offset = st->offset;
2519         int orig_num = st->num;
2520         void *rc = NULL;
2521
2522         switch (st->state) {
2523         case TCP_SEQ_STATE_LISTENING:
2524                 if (st->bucket > hinfo->lhash2_mask)
2525                         break;
2526                 rc = listening_get_first(seq);
2527                 while (offset-- && rc && bucket == st->bucket)
2528                         rc = listening_get_next(seq, rc);
2529                 if (rc)
2530                         break;
2531                 st->bucket = 0;
2532                 st->state = TCP_SEQ_STATE_ESTABLISHED;
2533                 fallthrough;
2534         case TCP_SEQ_STATE_ESTABLISHED:
2535                 if (st->bucket > hinfo->ehash_mask)
2536                         break;
2537                 rc = established_get_first(seq);
2538                 while (offset-- && rc && bucket == st->bucket)
2539                         rc = established_get_next(seq, rc);
2540         }
2541
2542         st->num = orig_num;
2543
2544         return rc;
2545 }
2546
2547 void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2548 {
2549         struct tcp_iter_state *st = seq->private;
2550         void *rc;
2551
2552         if (*pos && *pos == st->last_pos) {
2553                 rc = tcp_seek_last_pos(seq);
2554                 if (rc)
2555                         goto out;
2556         }
2557
2558         st->state = TCP_SEQ_STATE_LISTENING;
2559         st->num = 0;
2560         st->bucket = 0;
2561         st->offset = 0;
2562         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2563
2564 out:
2565         st->last_pos = *pos;
2566         return rc;
2567 }
2568 EXPORT_SYMBOL(tcp_seq_start);
2569
2570 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2571 {
2572         struct tcp_iter_state *st = seq->private;
2573         void *rc = NULL;
2574
2575         if (v == SEQ_START_TOKEN) {
2576                 rc = tcp_get_idx(seq, 0);
2577                 goto out;
2578         }
2579
2580         switch (st->state) {
2581         case TCP_SEQ_STATE_LISTENING:
2582                 rc = listening_get_next(seq, v);
2583                 if (!rc) {
2584                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2585                         st->bucket = 0;
2586                         st->offset = 0;
2587                         rc        = established_get_first(seq);
2588                 }
2589                 break;
2590         case TCP_SEQ_STATE_ESTABLISHED:
2591                 rc = established_get_next(seq, v);
2592                 break;
2593         }
2594 out:
2595         ++*pos;
2596         st->last_pos = *pos;
2597         return rc;
2598 }
2599 EXPORT_SYMBOL(tcp_seq_next);
2600
2601 void tcp_seq_stop(struct seq_file *seq, void *v)
2602 {
2603         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2604         struct tcp_iter_state *st = seq->private;
2605
2606         switch (st->state) {
2607         case TCP_SEQ_STATE_LISTENING:
2608                 if (v != SEQ_START_TOKEN)
2609                         spin_unlock(&hinfo->lhash2[st->bucket].lock);
2610                 break;
2611         case TCP_SEQ_STATE_ESTABLISHED:
2612                 if (v)
2613                         spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2614                 break;
2615         }
2616 }
2617 EXPORT_SYMBOL(tcp_seq_stop);
2618
2619 static void get_openreq4(const struct request_sock *req,
2620                          struct seq_file *f, int i)
2621 {
2622         const struct inet_request_sock *ireq = inet_rsk(req);
2623         long delta = req->rsk_timer.expires - jiffies;
2624
2625         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2626                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2627                 i,
2628                 ireq->ir_loc_addr,
2629                 ireq->ir_num,
2630                 ireq->ir_rmt_addr,
2631                 ntohs(ireq->ir_rmt_port),
2632                 TCP_SYN_RECV,
2633                 0, 0, /* could print option size, but that is af dependent. */
2634                 1,    /* timers active (only the expire timer) */
2635                 jiffies_delta_to_clock_t(delta),
2636                 req->num_timeout,
2637                 from_kuid_munged(seq_user_ns(f),
2638                                  sock_i_uid(req->rsk_listener)),
2639                 0,  /* non standard timer */
2640                 0, /* open_requests have no inode */
2641                 0,
2642                 req);
2643 }
2644
2645 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2646 {
2647         int timer_active;
2648         unsigned long timer_expires;
2649         const struct tcp_sock *tp = tcp_sk(sk);
2650         const struct inet_connection_sock *icsk = inet_csk(sk);
2651         const struct inet_sock *inet = inet_sk(sk);
2652         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2653         __be32 dest = inet->inet_daddr;
2654         __be32 src = inet->inet_rcv_saddr;
2655         __u16 destp = ntohs(inet->inet_dport);
2656         __u16 srcp = ntohs(inet->inet_sport);
2657         int rx_queue;
2658         int state;
2659
2660         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2661             icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2662             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2663                 timer_active    = 1;
2664                 timer_expires   = icsk->icsk_timeout;
2665         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2666                 timer_active    = 4;
2667                 timer_expires   = icsk->icsk_timeout;
2668         } else if (timer_pending(&sk->sk_timer)) {
2669                 timer_active    = 2;
2670                 timer_expires   = sk->sk_timer.expires;
2671         } else {
2672                 timer_active    = 0;
2673                 timer_expires = jiffies;
2674         }
2675
2676         state = inet_sk_state_load(sk);
2677         if (state == TCP_LISTEN)
2678                 rx_queue = READ_ONCE(sk->sk_ack_backlog);
2679         else
2680                 /* Because we don't lock the socket,
2681                  * we might find a transient negative value.
2682                  */
2683                 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
2684                                       READ_ONCE(tp->copied_seq), 0);
2685
2686         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2687                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2688                 i, src, srcp, dest, destp, state,
2689                 READ_ONCE(tp->write_seq) - tp->snd_una,
2690                 rx_queue,
2691                 timer_active,
2692                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2693                 icsk->icsk_retransmits,
2694                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2695                 icsk->icsk_probes_out,
2696                 sock_i_ino(sk),
2697                 refcount_read(&sk->sk_refcnt), sk,
2698                 jiffies_to_clock_t(icsk->icsk_rto),
2699                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2700                 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
2701                 tcp_snd_cwnd(tp),
2702                 state == TCP_LISTEN ?
2703                     fastopenq->max_qlen :
2704                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2705 }
2706
2707 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2708                                struct seq_file *f, int i)
2709 {
2710         long delta = tw->tw_timer.expires - jiffies;
2711         __be32 dest, src;
2712         __u16 destp, srcp;
2713
2714         dest  = tw->tw_daddr;
2715         src   = tw->tw_rcv_saddr;
2716         destp = ntohs(tw->tw_dport);
2717         srcp  = ntohs(tw->tw_sport);
2718
2719         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2720                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2721                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2722                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2723                 refcount_read(&tw->tw_refcnt), tw);
2724 }
2725
2726 #define TMPSZ 150
2727
2728 static int tcp4_seq_show(struct seq_file *seq, void *v)
2729 {
2730         struct tcp_iter_state *st;
2731         struct sock *sk = v;
2732
2733         seq_setwidth(seq, TMPSZ - 1);
2734         if (v == SEQ_START_TOKEN) {
2735                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2736                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2737                            "inode");
2738                 goto out;
2739         }
2740         st = seq->private;
2741
2742         if (sk->sk_state == TCP_TIME_WAIT)
2743                 get_timewait4_sock(v, seq, st->num);
2744         else if (sk->sk_state == TCP_NEW_SYN_RECV)
2745                 get_openreq4(v, seq, st->num);
2746         else
2747                 get_tcp4_sock(v, seq, st->num);
2748 out:
2749         seq_pad(seq, '\n');
2750         return 0;
2751 }
2752
2753 #ifdef CONFIG_BPF_SYSCALL
2754 struct bpf_tcp_iter_state {
2755         struct tcp_iter_state state;
2756         unsigned int cur_sk;
2757         unsigned int end_sk;
2758         unsigned int max_sk;
2759         struct sock **batch;
2760         bool st_bucket_done;
2761 };
2762
2763 struct bpf_iter__tcp {
2764         __bpf_md_ptr(struct bpf_iter_meta *, meta);
2765         __bpf_md_ptr(struct sock_common *, sk_common);
2766         uid_t uid __aligned(8);
2767 };
2768
2769 static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2770                              struct sock_common *sk_common, uid_t uid)
2771 {
2772         struct bpf_iter__tcp ctx;
2773
2774         meta->seq_num--;  /* skip SEQ_START_TOKEN */
2775         ctx.meta = meta;
2776         ctx.sk_common = sk_common;
2777         ctx.uid = uid;
2778         return bpf_iter_run_prog(prog, &ctx);
2779 }
2780
2781 static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
2782 {
2783         while (iter->cur_sk < iter->end_sk)
2784                 sock_gen_put(iter->batch[iter->cur_sk++]);
2785 }
2786
2787 static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
2788                                       unsigned int new_batch_sz)
2789 {
2790         struct sock **new_batch;
2791
2792         new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
2793                              GFP_USER | __GFP_NOWARN);
2794         if (!new_batch)
2795                 return -ENOMEM;
2796
2797         bpf_iter_tcp_put_batch(iter);
2798         kvfree(iter->batch);
2799         iter->batch = new_batch;
2800         iter->max_sk = new_batch_sz;
2801
2802         return 0;
2803 }
2804
2805 static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
2806                                                  struct sock *start_sk)
2807 {
2808         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2809         struct bpf_tcp_iter_state *iter = seq->private;
2810         struct tcp_iter_state *st = &iter->state;
2811         struct hlist_nulls_node *node;
2812         unsigned int expected = 1;
2813         struct sock *sk;
2814
2815         sock_hold(start_sk);
2816         iter->batch[iter->end_sk++] = start_sk;
2817
2818         sk = sk_nulls_next(start_sk);
2819         sk_nulls_for_each_from(sk, node) {
2820                 if (seq_sk_match(seq, sk)) {
2821                         if (iter->end_sk < iter->max_sk) {
2822                                 sock_hold(sk);
2823                                 iter->batch[iter->end_sk++] = sk;
2824                         }
2825                         expected++;
2826                 }
2827         }
2828         spin_unlock(&hinfo->lhash2[st->bucket].lock);
2829
2830         return expected;
2831 }
2832
2833 static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
2834                                                    struct sock *start_sk)
2835 {
2836         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2837         struct bpf_tcp_iter_state *iter = seq->private;
2838         struct tcp_iter_state *st = &iter->state;
2839         struct hlist_nulls_node *node;
2840         unsigned int expected = 1;
2841         struct sock *sk;
2842
2843         sock_hold(start_sk);
2844         iter->batch[iter->end_sk++] = start_sk;
2845
2846         sk = sk_nulls_next(start_sk);
2847         sk_nulls_for_each_from(sk, node) {
2848                 if (seq_sk_match(seq, sk)) {
2849                         if (iter->end_sk < iter->max_sk) {
2850                                 sock_hold(sk);
2851                                 iter->batch[iter->end_sk++] = sk;
2852                         }
2853                         expected++;
2854                 }
2855         }
2856         spin_unlock_bh(inet_ehash_lockp(hinfo, st->bucket));
2857
2858         return expected;
2859 }
2860
2861 static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
2862 {
2863         struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2864         struct bpf_tcp_iter_state *iter = seq->private;
2865         struct tcp_iter_state *st = &iter->state;
2866         unsigned int expected;
2867         bool resized = false;
2868         struct sock *sk;
2869
2870         /* The st->bucket is done.  Directly advance to the next
2871          * bucket instead of having the tcp_seek_last_pos() to skip
2872          * one by one in the current bucket and eventually find out
2873          * it has to advance to the next bucket.
2874          */
2875         if (iter->st_bucket_done) {
2876                 st->offset = 0;
2877                 st->bucket++;
2878                 if (st->state == TCP_SEQ_STATE_LISTENING &&
2879                     st->bucket > hinfo->lhash2_mask) {
2880                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2881                         st->bucket = 0;
2882                 }
2883         }
2884
2885 again:
2886         /* Get a new batch */
2887         iter->cur_sk = 0;
2888         iter->end_sk = 0;
2889         iter->st_bucket_done = false;
2890
2891         sk = tcp_seek_last_pos(seq);
2892         if (!sk)
2893                 return NULL; /* Done */
2894
2895         if (st->state == TCP_SEQ_STATE_LISTENING)
2896                 expected = bpf_iter_tcp_listening_batch(seq, sk);
2897         else
2898                 expected = bpf_iter_tcp_established_batch(seq, sk);
2899
2900         if (iter->end_sk == expected) {
2901                 iter->st_bucket_done = true;
2902                 return sk;
2903         }
2904
2905         if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
2906                 resized = true;
2907                 goto again;
2908         }
2909
2910         return sk;
2911 }
2912
2913 static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
2914 {
2915         /* bpf iter does not support lseek, so it always
2916          * continue from where it was stop()-ped.
2917          */
2918         if (*pos)
2919                 return bpf_iter_tcp_batch(seq);
2920
2921         return SEQ_START_TOKEN;
2922 }
2923
2924 static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2925 {
2926         struct bpf_tcp_iter_state *iter = seq->private;
2927         struct tcp_iter_state *st = &iter->state;
2928         struct sock *sk;
2929
2930         /* Whenever seq_next() is called, the iter->cur_sk is
2931          * done with seq_show(), so advance to the next sk in
2932          * the batch.
2933          */
2934         if (iter->cur_sk < iter->end_sk) {
2935                 /* Keeping st->num consistent in tcp_iter_state.
2936                  * bpf_iter_tcp does not use st->num.
2937                  * meta.seq_num is used instead.
2938                  */
2939                 st->num++;
2940                 /* Move st->offset to the next sk in the bucket such that
2941                  * the future start() will resume at st->offset in
2942                  * st->bucket.  See tcp_seek_last_pos().
2943                  */
2944                 st->offset++;
2945                 sock_gen_put(iter->batch[iter->cur_sk++]);
2946         }
2947
2948         if (iter->cur_sk < iter->end_sk)
2949                 sk = iter->batch[iter->cur_sk];
2950         else
2951                 sk = bpf_iter_tcp_batch(seq);
2952
2953         ++*pos;
2954         /* Keeping st->last_pos consistent in tcp_iter_state.
2955          * bpf iter does not do lseek, so st->last_pos always equals to *pos.
2956          */
2957         st->last_pos = *pos;
2958         return sk;
2959 }
2960
2961 static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
2962 {
2963         struct bpf_iter_meta meta;
2964         struct bpf_prog *prog;
2965         struct sock *sk = v;
2966         bool slow;
2967         uid_t uid;
2968         int ret;
2969
2970         if (v == SEQ_START_TOKEN)
2971                 return 0;
2972
2973         if (sk_fullsock(sk))
2974                 slow = lock_sock_fast(sk);
2975
2976         if (unlikely(sk_unhashed(sk))) {
2977                 ret = SEQ_SKIP;
2978                 goto unlock;
2979         }
2980
2981         if (sk->sk_state == TCP_TIME_WAIT) {
2982                 uid = 0;
2983         } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
2984                 const struct request_sock *req = v;
2985
2986                 uid = from_kuid_munged(seq_user_ns(seq),
2987                                        sock_i_uid(req->rsk_listener));
2988         } else {
2989                 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
2990         }
2991
2992         meta.seq = seq;
2993         prog = bpf_iter_get_info(&meta, false);
2994         ret = tcp_prog_seq_show(prog, &meta, v, uid);
2995
2996 unlock:
2997         if (sk_fullsock(sk))
2998                 unlock_sock_fast(sk, slow);
2999         return ret;
3000
3001 }
3002
3003 static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
3004 {
3005         struct bpf_tcp_iter_state *iter = seq->private;
3006         struct bpf_iter_meta meta;
3007         struct bpf_prog *prog;
3008
3009         if (!v) {
3010                 meta.seq = seq;
3011                 prog = bpf_iter_get_info(&meta, true);
3012                 if (prog)
3013                         (void)tcp_prog_seq_show(prog, &meta, v, 0);
3014         }
3015
3016         if (iter->cur_sk < iter->end_sk) {
3017                 bpf_iter_tcp_put_batch(iter);
3018                 iter->st_bucket_done = false;
3019         }
3020 }
3021
3022 static const struct seq_operations bpf_iter_tcp_seq_ops = {
3023         .show           = bpf_iter_tcp_seq_show,
3024         .start          = bpf_iter_tcp_seq_start,
3025         .next           = bpf_iter_tcp_seq_next,
3026         .stop           = bpf_iter_tcp_seq_stop,
3027 };
3028 #endif
3029 static unsigned short seq_file_family(const struct seq_file *seq)
3030 {
3031         const struct tcp_seq_afinfo *afinfo;
3032
3033 #ifdef CONFIG_BPF_SYSCALL
3034         /* Iterated from bpf_iter.  Let the bpf prog to filter instead. */
3035         if (seq->op == &bpf_iter_tcp_seq_ops)
3036                 return AF_UNSPEC;
3037 #endif
3038
3039         /* Iterated from proc fs */
3040         afinfo = pde_data(file_inode(seq->file));
3041         return afinfo->family;
3042 }
3043
3044 static const struct seq_operations tcp4_seq_ops = {
3045         .show           = tcp4_seq_show,
3046         .start          = tcp_seq_start,
3047         .next           = tcp_seq_next,
3048         .stop           = tcp_seq_stop,
3049 };
3050
3051 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
3052         .family         = AF_INET,
3053 };
3054
3055 static int __net_init tcp4_proc_init_net(struct net *net)
3056 {
3057         if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
3058                         sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
3059                 return -ENOMEM;
3060         return 0;
3061 }
3062
3063 static void __net_exit tcp4_proc_exit_net(struct net *net)
3064 {
3065         remove_proc_entry("tcp", net->proc_net);
3066 }
3067
3068 static struct pernet_operations tcp4_net_ops = {
3069         .init = tcp4_proc_init_net,
3070         .exit = tcp4_proc_exit_net,
3071 };
3072
3073 int __init tcp4_proc_init(void)
3074 {
3075         return register_pernet_subsys(&tcp4_net_ops);
3076 }
3077
3078 void tcp4_proc_exit(void)
3079 {
3080         unregister_pernet_subsys(&tcp4_net_ops);
3081 }
3082 #endif /* CONFIG_PROC_FS */
3083
3084 /* @wake is one when sk_stream_write_space() calls us.
3085  * This sends EPOLLOUT only if notsent_bytes is half the limit.
3086  * This mimics the strategy used in sock_def_write_space().
3087  */
3088 bool tcp_stream_memory_free(const struct sock *sk, int wake)
3089 {
3090         const struct tcp_sock *tp = tcp_sk(sk);
3091         u32 notsent_bytes = READ_ONCE(tp->write_seq) -
3092                             READ_ONCE(tp->snd_nxt);
3093
3094         return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
3095 }
3096 EXPORT_SYMBOL(tcp_stream_memory_free);
3097
3098 struct proto tcp_prot = {
3099         .name                   = "TCP",
3100         .owner                  = THIS_MODULE,
3101         .close                  = tcp_close,
3102         .pre_connect            = tcp_v4_pre_connect,
3103         .connect                = tcp_v4_connect,
3104         .disconnect             = tcp_disconnect,
3105         .accept                 = inet_csk_accept,
3106         .ioctl                  = tcp_ioctl,
3107         .init                   = tcp_v4_init_sock,
3108         .destroy                = tcp_v4_destroy_sock,
3109         .shutdown               = tcp_shutdown,
3110         .setsockopt             = tcp_setsockopt,
3111         .getsockopt             = tcp_getsockopt,
3112         .bpf_bypass_getsockopt  = tcp_bpf_bypass_getsockopt,
3113         .keepalive              = tcp_set_keepalive,
3114         .recvmsg                = tcp_recvmsg,
3115         .sendmsg                = tcp_sendmsg,
3116         .sendpage               = tcp_sendpage,
3117         .backlog_rcv            = tcp_v4_do_rcv,
3118         .release_cb             = tcp_release_cb,
3119         .hash                   = inet_hash,
3120         .unhash                 = inet_unhash,
3121         .get_port               = inet_csk_get_port,
3122         .put_port               = inet_put_port,
3123 #ifdef CONFIG_BPF_SYSCALL
3124         .psock_update_sk_prot   = tcp_bpf_update_proto,
3125 #endif
3126         .enter_memory_pressure  = tcp_enter_memory_pressure,
3127         .leave_memory_pressure  = tcp_leave_memory_pressure,
3128         .stream_memory_free     = tcp_stream_memory_free,
3129         .sockets_allocated      = &tcp_sockets_allocated,
3130         .orphan_count           = &tcp_orphan_count,
3131
3132         .memory_allocated       = &tcp_memory_allocated,
3133         .per_cpu_fw_alloc       = &tcp_memory_per_cpu_fw_alloc,
3134
3135         .memory_pressure        = &tcp_memory_pressure,
3136         .sysctl_mem             = sysctl_tcp_mem,
3137         .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
3138         .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_rmem),
3139         .max_header             = MAX_TCP_HEADER,
3140         .obj_size               = sizeof(struct tcp_sock),
3141         .slab_flags             = SLAB_TYPESAFE_BY_RCU,
3142         .twsk_prot              = &tcp_timewait_sock_ops,
3143         .rsk_prot               = &tcp_request_sock_ops,
3144         .h.hashinfo             = NULL,
3145         .no_autobind            = true,
3146         .diag_destroy           = tcp_abort,
3147 };
3148 EXPORT_SYMBOL(tcp_prot);
3149
3150 static void __net_exit tcp_sk_exit(struct net *net)
3151 {
3152         if (net->ipv4.tcp_congestion_control)
3153                 bpf_module_put(net->ipv4.tcp_congestion_control,
3154                                net->ipv4.tcp_congestion_control->owner);
3155 }
3156
3157 static void __net_init tcp_set_hashinfo(struct net *net)
3158 {
3159         struct inet_hashinfo *hinfo;
3160         unsigned int ehash_entries;
3161         struct net *old_net;
3162
3163         if (net_eq(net, &init_net))
3164                 goto fallback;
3165
3166         old_net = current->nsproxy->net_ns;
3167         ehash_entries = READ_ONCE(old_net->ipv4.sysctl_tcp_child_ehash_entries);
3168         if (!ehash_entries)
3169                 goto fallback;
3170
3171         ehash_entries = roundup_pow_of_two(ehash_entries);
3172         hinfo = inet_pernet_hashinfo_alloc(&tcp_hashinfo, ehash_entries);
3173         if (!hinfo) {
3174                 pr_warn("Failed to allocate TCP ehash (entries: %u) "
3175                         "for a netns, fallback to the global one\n",
3176                         ehash_entries);
3177 fallback:
3178                 hinfo = &tcp_hashinfo;
3179                 ehash_entries = tcp_hashinfo.ehash_mask + 1;
3180         }
3181
3182         net->ipv4.tcp_death_row.hashinfo = hinfo;
3183         net->ipv4.tcp_death_row.sysctl_max_tw_buckets = ehash_entries / 2;
3184         net->ipv4.sysctl_max_syn_backlog = max(128U, ehash_entries / 128);
3185 }
3186
3187 static int __net_init tcp_sk_init(struct net *net)
3188 {
3189         net->ipv4.sysctl_tcp_ecn = 2;
3190         net->ipv4.sysctl_tcp_ecn_fallback = 1;
3191
3192         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
3193         net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
3194         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
3195         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
3196         net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
3197
3198         net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
3199         net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
3200         net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
3201
3202         net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
3203         net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
3204         net->ipv4.sysctl_tcp_syncookies = 1;
3205         net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
3206         net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
3207         net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
3208         net->ipv4.sysctl_tcp_orphan_retries = 0;
3209         net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
3210         net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
3211         net->ipv4.sysctl_tcp_tw_reuse = 2;
3212         net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
3213
3214         refcount_set(&net->ipv4.tcp_death_row.tw_refcount, 1);
3215         tcp_set_hashinfo(net);
3216
3217         net->ipv4.sysctl_tcp_sack = 1;
3218         net->ipv4.sysctl_tcp_window_scaling = 1;
3219         net->ipv4.sysctl_tcp_timestamps = 1;
3220         net->ipv4.sysctl_tcp_early_retrans = 3;
3221         net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
3222         net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior.  */
3223         net->ipv4.sysctl_tcp_retrans_collapse = 1;
3224         net->ipv4.sysctl_tcp_max_reordering = 300;
3225         net->ipv4.sysctl_tcp_dsack = 1;
3226         net->ipv4.sysctl_tcp_app_win = 31;
3227         net->ipv4.sysctl_tcp_adv_win_scale = 1;
3228         net->ipv4.sysctl_tcp_frto = 2;
3229         net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
3230         /* This limits the percentage of the congestion window which we
3231          * will allow a single TSO frame to consume.  Building TSO frames
3232          * which are too large can cause TCP streams to be bursty.
3233          */
3234         net->ipv4.sysctl_tcp_tso_win_divisor = 3;
3235         /* Default TSQ limit of 16 TSO segments */
3236         net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
3237
3238         /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
3239         net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
3240
3241         net->ipv4.sysctl_tcp_min_tso_segs = 2;
3242         net->ipv4.sysctl_tcp_tso_rtt_log = 9;  /* 2^9 = 512 usec */
3243         net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
3244         net->ipv4.sysctl_tcp_autocorking = 1;
3245         net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
3246         net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
3247         net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
3248         if (net != &init_net) {
3249                 memcpy(net->ipv4.sysctl_tcp_rmem,
3250                        init_net.ipv4.sysctl_tcp_rmem,
3251                        sizeof(init_net.ipv4.sysctl_tcp_rmem));
3252                 memcpy(net->ipv4.sysctl_tcp_wmem,
3253                        init_net.ipv4.sysctl_tcp_wmem,
3254                        sizeof(init_net.ipv4.sysctl_tcp_wmem));
3255         }
3256         net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
3257         net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
3258         net->ipv4.sysctl_tcp_comp_sack_nr = 44;
3259         net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
3260         net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
3261         atomic_set(&net->ipv4.tfo_active_disable_times, 0);
3262
3263         /* Set default values for PLB */
3264         net->ipv4.sysctl_tcp_plb_enabled = 0; /* Disabled by default */
3265         net->ipv4.sysctl_tcp_plb_idle_rehash_rounds = 3;
3266         net->ipv4.sysctl_tcp_plb_rehash_rounds = 12;
3267         net->ipv4.sysctl_tcp_plb_suspend_rto_sec = 60;
3268         /* Default congestion threshold for PLB to mark a round is 50% */
3269         net->ipv4.sysctl_tcp_plb_cong_thresh = (1 << TCP_PLB_SCALE) / 2;
3270
3271         /* Reno is always built in */
3272         if (!net_eq(net, &init_net) &&
3273             bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
3274                                init_net.ipv4.tcp_congestion_control->owner))
3275                 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
3276         else
3277                 net->ipv4.tcp_congestion_control = &tcp_reno;
3278
3279         return 0;
3280 }
3281
3282 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3283 {
3284         struct net *net;
3285
3286         tcp_twsk_purge(net_exit_list, AF_INET);
3287
3288         list_for_each_entry(net, net_exit_list, exit_list) {
3289                 inet_pernet_hashinfo_free(net->ipv4.tcp_death_row.hashinfo);
3290                 WARN_ON_ONCE(!refcount_dec_and_test(&net->ipv4.tcp_death_row.tw_refcount));
3291                 tcp_fastopen_ctx_destroy(net);
3292         }
3293 }
3294
3295 static struct pernet_operations __net_initdata tcp_sk_ops = {
3296        .init       = tcp_sk_init,
3297        .exit       = tcp_sk_exit,
3298        .exit_batch = tcp_sk_exit_batch,
3299 };
3300
3301 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3302 DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
3303                      struct sock_common *sk_common, uid_t uid)
3304
3305 #define INIT_BATCH_SZ 16
3306
3307 static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
3308 {
3309         struct bpf_tcp_iter_state *iter = priv_data;
3310         int err;
3311
3312         err = bpf_iter_init_seq_net(priv_data, aux);
3313         if (err)
3314                 return err;
3315
3316         err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
3317         if (err) {
3318                 bpf_iter_fini_seq_net(priv_data);
3319                 return err;
3320         }
3321
3322         return 0;
3323 }
3324
3325 static void bpf_iter_fini_tcp(void *priv_data)
3326 {
3327         struct bpf_tcp_iter_state *iter = priv_data;
3328
3329         bpf_iter_fini_seq_net(priv_data);
3330         kvfree(iter->batch);
3331 }
3332
3333 static const struct bpf_iter_seq_info tcp_seq_info = {
3334         .seq_ops                = &bpf_iter_tcp_seq_ops,
3335         .init_seq_private       = bpf_iter_init_tcp,
3336         .fini_seq_private       = bpf_iter_fini_tcp,
3337         .seq_priv_size          = sizeof(struct bpf_tcp_iter_state),
3338 };
3339
3340 static const struct bpf_func_proto *
3341 bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
3342                             const struct bpf_prog *prog)
3343 {
3344         switch (func_id) {
3345         case BPF_FUNC_setsockopt:
3346                 return &bpf_sk_setsockopt_proto;
3347         case BPF_FUNC_getsockopt:
3348                 return &bpf_sk_getsockopt_proto;
3349         default:
3350                 return NULL;
3351         }
3352 }
3353
3354 static struct bpf_iter_reg tcp_reg_info = {
3355         .target                 = "tcp",
3356         .ctx_arg_info_size      = 1,
3357         .ctx_arg_info           = {
3358                 { offsetof(struct bpf_iter__tcp, sk_common),
3359                   PTR_TO_BTF_ID_OR_NULL },
3360         },
3361         .get_func_proto         = bpf_iter_tcp_get_func_proto,
3362         .seq_info               = &tcp_seq_info,
3363 };
3364
3365 static void __init bpf_iter_register(void)
3366 {
3367         tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
3368         if (bpf_iter_reg_target(&tcp_reg_info))
3369                 pr_warn("Warning: could not register bpf iterator tcp\n");
3370 }
3371
3372 #endif
3373
3374 void __init tcp_v4_init(void)
3375 {
3376         int cpu, res;
3377
3378         for_each_possible_cpu(cpu) {
3379                 struct sock *sk;
3380
3381                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3382                                            IPPROTO_TCP, &init_net);
3383                 if (res)
3384                         panic("Failed to create the TCP control socket.\n");
3385                 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
3386
3387                 /* Please enforce IP_DF and IPID==0 for RST and
3388                  * ACK sent in SYN-RECV and TIME-WAIT state.
3389                  */
3390                 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
3391
3392                 per_cpu(ipv4_tcp_sk, cpu) = sk;
3393         }
3394         if (register_pernet_subsys(&tcp_sk_ops))
3395                 panic("Failed to create the TCP control socket.\n");
3396
3397 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3398         bpf_iter_register();
3399 #endif
3400 }
This page took 0.236776 seconds and 4 git commands to generate.