]> Git Repo - linux.git/commitdiff
Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
authorJakub Kicinski <[email protected]>
Sat, 18 Jun 2022 02:35:17 +0000 (19:35 -0700)
committerJakub Kicinski <[email protected]>
Sat, 18 Jun 2022 02:35:19 +0000 (19:35 -0700)
Daniel Borkmann says:

====================
pull-request: bpf-next 2022-06-17

We've added 72 non-merge commits during the last 15 day(s) which contain
a total of 92 files changed, 4582 insertions(+), 834 deletions(-).

The main changes are:

1) Add 64 bit enum value support to BTF, from Yonghong Song.

2) Implement support for sleepable BPF uprobe programs, from Delyan Kratunov.

3) Add new BPF helpers to issue and check TCP SYN cookies without binding to a
   socket especially useful in synproxy scenarios, from Maxim Mikityanskiy.

4) Fix libbpf's internal USDT address translation logic for shared libraries as
   well as uprobe's symbol file offset calculation, from Andrii Nakryiko.

5) Extend libbpf to provide an API for textual representation of the various
   map/prog/attach/link types and use it in bpftool, from Daniel Müller.

6) Provide BTF line info for RV64 and RV32 JITs, and fix a put_user bug in the
   core seen in 32 bit when storing BPF function addresses, from Pu Lehui.

7) Fix libbpf's BTF pointer size guessing by adding a list of various aliases
   for 'long' types, from Douglas Raillard.

8) Fix bpftool to readd setting rlimit since probing for memcg-based accounting
   has been unreliable and caused a regression on COS, from Quentin Monnet.

9) Fix UAF in BPF cgroup's effective program computation triggered upon BPF link
   detachment, from Tadeusz Struk.

10) Fix bpftool build bootstrapping during cross compilation which was pointing
    to the wrong AR process, from Shahab Vahedi.

11) Fix logic bug in libbpf's is_pow_of_2 implementation, from Yuze Chi.

12) BPF hash map optimization to avoid grabbing spinlocks of all CPUs when there
    is no free element. Also add a benchmark as reproducer, from Feng Zhou.

13) Fix bpftool's codegen to bail out when there's no BTF, from Michael Mullin.

14) Various minor cleanup and improvements all over the place.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (72 commits)
  bpf: Fix bpf_skc_lookup comment wrt. return type
  bpf: Fix non-static bpf_func_proto struct definitions
  selftests/bpf: Don't force lld on non-x86 architectures
  selftests/bpf: Add selftests for raw syncookie helpers in TC mode
  bpf: Allow the new syncookie helpers to work with SKBs
  selftests/bpf: Add selftests for raw syncookie helpers
  bpf: Add helpers to issue and check SYN cookies in XDP
  bpf: Allow helpers to accept pointers with a fixed size
  bpf: Fix documentation of th_len in bpf_tcp_{gen,check}_syncookie
  selftests/bpf: add tests for sleepable (uk)probes
  libbpf: add support for sleepable uprobe programs
  bpf: allow sleepable uprobe programs to attach
  bpf: implement sleepable uprobes by chaining gps
  bpf: move bpf_prog to bpf.h
  libbpf: Fix internal USDT address translation logic for shared libraries
  samples/bpf: Check detach prog exist or not in xdp_fwd
  selftests/bpf: Avoid skipping certain subtests
  selftests/bpf: Fix test_varlen verification failure with latest llvm
  bpftool: Do not check return value from libbpf_set_strict_mode()
  Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"
  ...
====================

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
1  2 
include/net/tcp.h
kernel/bpf/btf.c
kernel/events/core.c
kernel/trace/bpf_trace.c
net/ipv4/tcp_input.c

diff --combined include/net/tcp.h
index 4794cae4577e4c64ce2664ed734ae90bbc531782,9a1efe23fab72ac67d6db826a61f69e5f391f917..c21a9b516f1ed29ff3bf3c6c7d792a04f04f4660
@@@ -253,8 -253,6 +253,8 @@@ extern long sysctl_tcp_mem[3]
  #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
  
  extern atomic_long_t tcp_memory_allocated;
 +DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
 +
  extern struct percpu_counter tcp_sockets_allocated;
  extern unsigned long tcp_memory_pressure;
  
@@@ -434,6 -432,7 +434,7 @@@ u16 tcp_v4_get_syncookie(struct sock *s
                         struct tcphdr *th, u32 *cookie);
  u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
                         struct tcphdr *th, u32 *cookie);
+ u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
  u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
                          const struct tcp_request_sock_ops *af_ops,
                          struct sock *sk, struct tcphdr *th);
diff --combined kernel/bpf/btf.c
index 63d0ac7dfe2fbf991c4badafc2e4460a0ed04c8a,6c0d8480e15c8a0bd039a85b403e9aac58b43109..f08037c31dd7837935584ce8efe9707d7888f534
@@@ -309,6 -309,7 +309,7 @@@ static const char * const btf_kind_str[
        [BTF_KIND_FLOAT]        = "FLOAT",
        [BTF_KIND_DECL_TAG]     = "DECL_TAG",
        [BTF_KIND_TYPE_TAG]     = "TYPE_TAG",
+       [BTF_KIND_ENUM64]       = "ENUM64",
  };
  
  const char *btf_type_str(const struct btf_type *t)
@@@ -666,6 -667,7 +667,7 @@@ static bool btf_type_has_size(const str
        case BTF_KIND_ENUM:
        case BTF_KIND_DATASEC:
        case BTF_KIND_FLOAT:
+       case BTF_KIND_ENUM64:
                return true;
        }
  
@@@ -711,6 -713,11 +713,11 @@@ static const struct btf_decl_tag *btf_t
        return (const struct btf_decl_tag *)(t + 1);
  }
  
+ static const struct btf_enum64 *btf_type_enum64(const struct btf_type *t)
+ {
+       return (const struct btf_enum64 *)(t + 1);
+ }
  static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
  {
        return kind_ops[BTF_INFO_KIND(t->info)];
@@@ -1019,6 -1026,7 +1026,7 @@@ static const char *btf_show_name(struc
                        parens = "{";
                break;
        case BTF_KIND_ENUM:
+       case BTF_KIND_ENUM64:
                prefix = "enum";
                break;
        default:
@@@ -1834,6 -1842,7 +1842,7 @@@ __btf_resolve_size(const struct btf *bt
                case BTF_KIND_UNION:
                case BTF_KIND_ENUM:
                case BTF_KIND_FLOAT:
+               case BTF_KIND_ENUM64:
                        size = type->size;
                        goto resolved;
  
@@@ -3670,6 -3679,7 +3679,7 @@@ static s32 btf_enum_check_meta(struct b
  {
        const struct btf_enum *enums = btf_type_enum(t);
        struct btf *btf = env->btf;
+       const char *fmt_str;
        u16 i, nr_enums;
        u32 meta_needed;
  
                return -EINVAL;
        }
  
-       if (btf_type_kflag(t)) {
-               btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
-               return -EINVAL;
-       }
        if (t->size > 8 || !is_power_of_2(t->size)) {
                btf_verifier_log_type(env, t, "Unexpected size");
                return -EINVAL;
  
                if (env->log.level == BPF_LOG_KERNEL)
                        continue;
-               btf_verifier_log(env, "\t%s val=%d\n",
+               fmt_str = btf_type_kflag(t) ? "\t%s val=%d\n" : "\t%s val=%u\n";
+               btf_verifier_log(env, fmt_str,
                                 __btf_name_by_offset(btf, enums[i].name_off),
                                 enums[i].val);
        }
@@@ -3759,7 -3765,10 +3765,10 @@@ static void btf_enum_show(const struct 
                return;
        }
  
-       btf_show_type_value(show, "%d", v);
+       if (btf_type_kflag(t))
+               btf_show_type_value(show, "%d", v);
+       else
+               btf_show_type_value(show, "%u", v);
        btf_show_end_type(show);
  }
  
@@@ -3772,6 -3781,109 +3781,109 @@@ static struct btf_kind_operations enum_
        .show = btf_enum_show,
  };
  
+ static s32 btf_enum64_check_meta(struct btf_verifier_env *env,
+                                const struct btf_type *t,
+                                u32 meta_left)
+ {
+       const struct btf_enum64 *enums = btf_type_enum64(t);
+       struct btf *btf = env->btf;
+       const char *fmt_str;
+       u16 i, nr_enums;
+       u32 meta_needed;
+       nr_enums = btf_type_vlen(t);
+       meta_needed = nr_enums * sizeof(*enums);
+       if (meta_left < meta_needed) {
+               btf_verifier_log_basic(env, t,
+                                      "meta_left:%u meta_needed:%u",
+                                      meta_left, meta_needed);
+               return -EINVAL;
+       }
+       if (t->size > 8 || !is_power_of_2(t->size)) {
+               btf_verifier_log_type(env, t, "Unexpected size");
+               return -EINVAL;
+       }
+       /* enum type either no name or a valid one */
+       if (t->name_off &&
+           !btf_name_valid_identifier(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+       btf_verifier_log_type(env, t, NULL);
+       for (i = 0; i < nr_enums; i++) {
+               if (!btf_name_offset_valid(btf, enums[i].name_off)) {
+                       btf_verifier_log(env, "\tInvalid name_offset:%u",
+                                        enums[i].name_off);
+                       return -EINVAL;
+               }
+               /* enum member must have a valid name */
+               if (!enums[i].name_off ||
+                   !btf_name_valid_identifier(btf, enums[i].name_off)) {
+                       btf_verifier_log_type(env, t, "Invalid name");
+                       return -EINVAL;
+               }
+               if (env->log.level == BPF_LOG_KERNEL)
+                       continue;
+               fmt_str = btf_type_kflag(t) ? "\t%s val=%lld\n" : "\t%s val=%llu\n";
+               btf_verifier_log(env, fmt_str,
+                                __btf_name_by_offset(btf, enums[i].name_off),
+                                btf_enum64_value(enums + i));
+       }
+       return meta_needed;
+ }
+ static void btf_enum64_show(const struct btf *btf, const struct btf_type *t,
+                           u32 type_id, void *data, u8 bits_offset,
+                           struct btf_show *show)
+ {
+       const struct btf_enum64 *enums = btf_type_enum64(t);
+       u32 i, nr_enums = btf_type_vlen(t);
+       void *safe_data;
+       s64 v;
+       safe_data = btf_show_start_type(show, t, type_id, data);
+       if (!safe_data)
+               return;
+       v = *(u64 *)safe_data;
+       for (i = 0; i < nr_enums; i++) {
+               if (v != btf_enum64_value(enums + i))
+                       continue;
+               btf_show_type_value(show, "%s",
+                                   __btf_name_by_offset(btf,
+                                                        enums[i].name_off));
+               btf_show_end_type(show);
+               return;
+       }
+       if (btf_type_kflag(t))
+               btf_show_type_value(show, "%lld", v);
+       else
+               btf_show_type_value(show, "%llu", v);
+       btf_show_end_type(show);
+ }
+ static struct btf_kind_operations enum64_ops = {
+       .check_meta = btf_enum64_check_meta,
+       .resolve = btf_df_resolve,
+       .check_member = btf_enum_check_member,
+       .check_kflag_member = btf_enum_check_kflag_member,
+       .log_details = btf_enum_log,
+       .show = btf_enum64_show,
+ };
  static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
                                     const struct btf_type *t,
                                     u32 meta_left)
@@@ -4438,6 -4550,7 +4550,7 @@@ static const struct btf_kind_operation
        [BTF_KIND_FLOAT] = &float_ops,
        [BTF_KIND_DECL_TAG] = &decl_tag_ops,
        [BTF_KIND_TYPE_TAG] = &modifier_ops,
+       [BTF_KIND_ENUM64] = &enum64_ops,
  };
  
  static s32 btf_check_meta(struct btf_verifier_env *env,
@@@ -5299,7 -5412,7 +5412,7 @@@ bool btf_ctx_access(int off, int size, 
        /* skip modifiers */
        while (btf_type_is_modifier(t))
                t = btf_type_by_id(btf, t->type);
-       if (btf_type_is_small_int(t) || btf_type_is_enum(t))
+       if (btf_type_is_small_int(t) || btf_is_any_enum(t))
                /* accessing a scalar */
                return true;
        if (!btf_type_is_ptr(t)) {
@@@ -5763,7 -5876,7 +5876,7 @@@ static int __get_type_size(struct btf *
        if (btf_type_is_ptr(t))
                /* kernel size of pointer. Not BPF's size of pointer*/
                return sizeof(void *);
-       if (btf_type_is_int(t) || btf_type_is_enum(t))
+       if (btf_type_is_int(t) || btf_is_any_enum(t))
                return t->size;
        *bad_type = t;
        return -EINVAL;
@@@ -5911,7 -6024,7 +6024,7 @@@ static int btf_check_func_type_match(st
                 * to context only. And only global functions can be replaced.
                 * Hence type check only those types.
                 */
-               if (btf_type_is_int(t1) || btf_type_is_enum(t1))
+               if (btf_type_is_int(t1) || btf_is_any_enum(t1))
                        continue;
                if (!btf_type_is_ptr(t1)) {
                        bpf_log(log,
@@@ -6054,7 -6167,6 +6167,7 @@@ static int btf_check_func_arg_match(str
                                    struct bpf_reg_state *regs,
                                    bool ptr_to_mem_ok)
  {
 +      enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
        struct bpf_verifier_log *log = &env->log;
        u32 i, nargs, ref_id, ref_obj_id = 0;
        bool is_kfunc = btf_is_kernel(btf);
                                return -EINVAL;
                        }
                        /* rest of the arguments can be anything, like normal kfunc */
 -              } else if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) {
 +              } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
                        /* If function expects ctx type in BTF check that caller
                         * is passing PTR_TO_CTX.
                         */
@@@ -6409,7 -6521,7 +6522,7 @@@ int btf_prepare_func_args(struct bpf_ve
        t = btf_type_by_id(btf, t->type);
        while (btf_type_is_modifier(t))
                t = btf_type_by_id(btf, t->type);
-       if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
+       if (!btf_type_is_int(t) && !btf_is_any_enum(t)) {
                bpf_log(log,
                        "Global function %s() doesn't return scalar. Only those are supported.\n",
                        tname);
                t = btf_type_by_id(btf, args[i].type);
                while (btf_type_is_modifier(t))
                        t = btf_type_by_id(btf, t->type);
-               if (btf_type_is_int(t) || btf_type_is_enum(t)) {
+               if (btf_type_is_int(t) || btf_is_any_enum(t)) {
                        reg->type = SCALAR_VALUE;
                        continue;
                }
@@@ -7336,6 -7448,7 +7449,7 @@@ recur
        case BTF_KIND_UNION:
        case BTF_KIND_ENUM:
        case BTF_KIND_FWD:
+       case BTF_KIND_ENUM64:
                return 1;
        case BTF_KIND_INT:
                /* just reject deprecated bitfield-like integers; all other
   * field-based relocations. This function assumes that root types were already
   * checked for name match. Beyond that initial root-level name check, names
   * are completely ignored. Compatibility rules are as follows:
-  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+  *   - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs/ENUM64s are considered compatible, but
   *     kind should match for local and target types (i.e., STRUCT is not
   *     compatible with UNION);
-  *   - for ENUMs, the size is ignored;
+  *   - for ENUMs/ENUM64s, the size is ignored;
   *   - for INT, size and signedness are ignored;
   *   - for ARRAY, dimensionality is ignored, element types are checked for
   *     compatibility recursively;
diff --combined kernel/events/core.c
index 80782cddb1dabf6f917b877acf262ead2cd911ae,deee6815bdd3b45fa603e5f3b3d928d40188afc5..48bae58d240e5c0c8c11bdcf541b17164219a537
@@@ -4257,6 -4257,7 +4257,6 @@@ static void perf_event_remove_on_exec(i
  {
        struct perf_event_context *ctx, *clone_ctx = NULL;
        struct perf_event *event, *next;
 -      LIST_HEAD(free_list);
        unsigned long flags;
        bool modified = false;
  
@@@ -10068,26 -10069,30 +10068,30 @@@ static inline bool perf_event_is_tracin
  int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
                            u64 bpf_cookie)
  {
-       bool is_kprobe, is_tracepoint, is_syscall_tp;
+       bool is_kprobe, is_uprobe, is_tracepoint, is_syscall_tp;
  
        if (!perf_event_is_tracing(event))
                return perf_event_set_bpf_handler(event, prog, bpf_cookie);
  
-       is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
+       is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE;
+       is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE;
        is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
        is_syscall_tp = is_syscall_trace_event(event->tp_event);
-       if (!is_kprobe && !is_tracepoint && !is_syscall_tp)
+       if (!is_kprobe && !is_uprobe && !is_tracepoint && !is_syscall_tp)
                /* bpf programs can only be attached to u/kprobe or tracepoint */
                return -EINVAL;
  
-       if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
+       if (((is_kprobe || is_uprobe) && prog->type != BPF_PROG_TYPE_KPROBE) ||
            (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) ||
            (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
                return -EINVAL;
  
+       if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe)
+               /* only uprobe programs are allowed to be sleepable */
+               return -EINVAL;
        /* Kprobe override only works for kprobes, not uprobes. */
-       if (prog->kprobe_override &&
-           !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE))
+       if (prog->kprobe_override && !is_kprobe)
                return -EINVAL;
  
        if (is_tracepoint || is_syscall_tp) {
diff --combined kernel/trace/bpf_trace.c
index 7a13e6ac6327ced0adedd309a1895a947572f1f7,d1c22594dbf9c046ca11520e8a6b6046a1b33356..4be976cf7d63db65a85797d808ad20a44a1e1030
@@@ -1936,7 -1936,7 +1936,7 @@@ int perf_event_attach_bpf_prog(struct p
        event->prog = prog;
        event->bpf_cookie = bpf_cookie;
        rcu_assign_pointer(event->tp_event->prog_array, new_array);
-       bpf_prog_array_free(old_array);
+       bpf_prog_array_free_sleepable(old_array);
  
  unlock:
        mutex_unlock(&bpf_event_mutex);
@@@ -1962,7 -1962,7 +1962,7 @@@ void perf_event_detach_bpf_prog(struct 
                bpf_prog_array_delete_safe(old_array, event->prog);
        } else {
                rcu_assign_pointer(event->tp_event->prog_array, new_array);
-               bpf_prog_array_free(old_array);
+               bpf_prog_array_free_sleepable(old_array);
        }
  
        bpf_prog_put(event->prog);
@@@ -2263,11 -2263,11 +2263,11 @@@ static int copy_user_syms(struct user_s
        int err = -ENOMEM;
        unsigned int i;
  
 -      syms = kvmalloc(cnt * sizeof(*syms), GFP_KERNEL);
 +      syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL);
        if (!syms)
                goto error;
  
 -      buf = kvmalloc(cnt * KSYM_NAME_LEN, GFP_KERNEL);
 +      buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL);
        if (!buf)
                goto error;
  
@@@ -2464,7 -2464,7 +2464,7 @@@ int bpf_kprobe_multi_link_attach(const 
                return -EINVAL;
  
        size = cnt * sizeof(*addrs);
 -      addrs = kvmalloc(size, GFP_KERNEL);
 +      addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
        if (!addrs)
                return -ENOMEM;
  
  
        ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
        if (ucookies) {
 -              cookies = kvmalloc(size, GFP_KERNEL);
 +              cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
                if (!cookies) {
                        err = -ENOMEM;
                        goto error;
diff --combined net/ipv4/tcp_input.c
index fdc7beb81b684560ed9e66b24464d870fdd6650e,6426f6a2e744654c416ba51bb665b6e447372d6a..80cb112ef14259c334a60f517bf1ff35e7d1f587
@@@ -805,6 -805,7 +805,6 @@@ static void tcp_event_data_recv(struct 
                         * restart window, so that we send ACKs quickly.
                         */
                        tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
 -                      sk_mem_reclaim(sk);
                }
        }
        icsk->icsk_ack.lrcvtime = now;
@@@ -3966,7 -3967,7 +3966,7 @@@ static bool smc_parse_options(const str
  /* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
   * value on success.
   */
static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
+ u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
  {
        const unsigned char *ptr = (const unsigned char *)(th + 1);
        int length = (th->doff * 4) - sizeof(struct tcphdr);
        }
        return mss;
  }
+ EXPORT_SYMBOL_GPL(tcp_parse_mss_option);
  
  /* Look for tcp options. Normally only called on SYN and SYNACK packets.
   * But, this can also be called on packets in the established flow when
@@@ -4389,6 -4391,7 +4390,6 @@@ void tcp_fin(struct sock *sk
        skb_rbtree_purge(&tp->out_of_order_queue);
        if (tcp_is_sack(tp))
                tcp_sack_reset(&tp->rx_opt);
 -      sk_mem_reclaim(sk);
  
        if (!sock_flag(sk, SOCK_DEAD)) {
                sk->sk_state_change(sk);
@@@ -5285,7 -5288,7 +5286,7 @@@ new_range
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
                        /* Do not attempt collapsing tiny skbs */
                        if (range_truesize != head->truesize ||
 -                          end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
 +                          end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) {
                                tcp_collapse(sk, NULL, &tp->out_of_order_queue,
                                             head, skb, start, end);
                        } else {
@@@ -5334,6 -5337,7 +5335,6 @@@ static bool tcp_prune_ofo_queue(struct 
                tcp_drop_reason(sk, rb_to_skb(node),
                                SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
                if (!prev || goal <= 0) {
 -                      sk_mem_reclaim(sk);
                        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
                            !tcp_under_memory_pressure(sk))
                                break;
@@@ -5380,6 -5384,7 +5381,6 @@@ static int tcp_prune_queue(struct sock 
                             skb_peek(&sk->sk_receive_queue),
                             NULL,
                             tp->copied_seq, tp->rcv_nxt);
 -      sk_mem_reclaim(sk);
  
        if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
                return 0;
This page took 0.177929 seconds and 4 git commands to generate.