]> Git Repo - J-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorJakub Kicinski <[email protected]>
Fri, 29 Mar 2024 00:24:10 +0000 (17:24 -0700)
committerJakub Kicinski <[email protected]>
Fri, 29 Mar 2024 00:25:57 +0000 (17:25 -0700)
Cross-merge networking fixes after downstream PR.

No conflicts, or adjacent changes.

Signed-off-by: Jakub Kicinski <[email protected]>
1  2 
arch/arm64/net/bpf_jit_comp.c
arch/s390/net/bpf_jit_comp.c
include/linux/skbuff.h
kernel/bpf/helpers.c
kernel/bpf/verifier.c
tools/bpf/bpftool/gen.c
tools/lib/bpf/libbpf.c

index bc16eb6946578bebe2933aa5fe1ee7f9cb77faf0,122021f9bdfc87c3c9634d6801edad7845b9f96e..82d6c562e8c7ee49cd662eab497daf72cc1939b1
@@@ -943,7 -943,7 +943,7 @@@ static int build_insn(const struct bpf_
                        emit(A64_UXTH(is64, dst, dst), ctx);
                        break;
                case 32:
-                       emit(A64_REV32(is64, dst, dst), ctx);
+                       emit(A64_REV32(0, dst, dst), ctx);
                        /* upper 32 bits already cleared */
                        break;
                case 64:
@@@ -1256,7 -1256,7 +1256,7 @@@ emit_cond_jmp
                        } else {
                                emit_a64_mov_i(1, tmp, off, ctx);
                                if (sign_extend)
-                                       emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
+                                       emit(A64_LDRSW(dst, src, tmp), ctx);
                                else
                                        emit(A64_LDR32(dst, src, tmp), ctx);
                        }
@@@ -2176,9 -2176,12 +2176,9 @@@ void arch_free_bpf_trampoline(void *ima
        bpf_prog_pack_free(image, size);
  }
  
 -void arch_protect_bpf_trampoline(void *image, unsigned int size)
 -{
 -}
 -
 -void arch_unprotect_bpf_trampoline(void *image, unsigned int size)
 +int arch_protect_bpf_trampoline(void *image, unsigned int size)
  {
 +      return 0;
  }
  
  int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
index e613eebfd3492fde3019780dcca19d72b3e0b492,5af0402e94b88c07ec3772699926d19f8f18b043..fa2f824e3b062d2b307c75d098a5573018486ae8
@@@ -516,11 -516,12 +516,12 @@@ static void bpf_skip(struct bpf_jit *ji
   * PLT for hotpatchable calls. The calling convention is the same as for the
   * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
   */
- extern const char bpf_plt[];
- extern const char bpf_plt_ret[];
- extern const char bpf_plt_target[];
- extern const char bpf_plt_end[];
- #define BPF_PLT_SIZE 32
+ struct bpf_plt {
+       char code[16];
+       void *ret;
+       void *target;
+ } __packed;
+ extern const struct bpf_plt bpf_plt;
  asm(
        ".pushsection .rodata\n"
        "       .balign 8\n"
        "       .balign 8\n"
        "bpf_plt_ret: .quad 0\n"
        "bpf_plt_target: .quad 0\n"
-       "bpf_plt_end:\n"
        "       .popsection\n"
  );
  
- static void bpf_jit_plt(void *plt, void *ret, void *target)
+ static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
  {
-       memcpy(plt, bpf_plt, BPF_PLT_SIZE);
-       *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
-       *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
+       memcpy(plt, &bpf_plt, sizeof(*plt));
+       plt->ret = ret;
+       plt->target = target;
  }
  
  /*
@@@ -662,9 -662,9 +662,9 @@@ static void bpf_jit_epilogue(struct bpf
        jit->prg = ALIGN(jit->prg, 8);
        jit->prologue_plt = jit->prg;
        if (jit->prg_buf)
-               bpf_jit_plt(jit->prg_buf + jit->prg,
+               bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
                            jit->prg_buf + jit->prologue_plt_ret, NULL);
-       jit->prg += BPF_PLT_SIZE;
+       jit->prg += sizeof(struct bpf_plt);
  }
  
  static int get_probe_mem_regno(const u8 *insn)
@@@ -2040,9 -2040,6 +2040,6 @@@ struct bpf_prog *bpf_int_jit_compile(st
        struct bpf_jit jit;
        int pass;
  
-       if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
-               return orig_fp;
        if (!fp->jit_requested)
                return orig_fp;
  
@@@ -2111,11 -2108,7 +2108,11 @@@ skip_init_ctx
                print_fn_code(jit.prg_buf, jit.size_prg);
        }
        if (!fp->is_func || extra_pass) {
 -              bpf_jit_binary_lock_ro(header);
 +              if (bpf_jit_binary_lock_ro(header)) {
 +                      bpf_jit_binary_free(header);
 +                      fp = orig_fp;
 +                      goto free_addrs;
 +              }
        } else {
                jit_data->header = header;
                jit_data->ctx = jit;
@@@ -2152,14 -2145,11 +2149,11 @@@ bool bpf_jit_supports_far_kfunc_call(vo
  int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
                       void *old_addr, void *new_addr)
  {
+       struct bpf_plt expected_plt, current_plt, new_plt, *plt;
        struct {
                u16 opc;
                s32 disp;
        } __packed insn;
-       char expected_plt[BPF_PLT_SIZE];
-       char current_plt[BPF_PLT_SIZE];
-       char new_plt[BPF_PLT_SIZE];
-       char *plt;
        char *ret;
        int err;
  
                 */
        } else {
                /* Verify the PLT. */
-               plt = (char *)ip + (insn.disp << 1);
-               err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
+               plt = ip + (insn.disp << 1);
+               err = copy_from_kernel_nofault(&current_plt, plt,
+                                              sizeof(current_plt));
                if (err < 0)
                        return err;
                ret = (char *)ip + 6;
-               bpf_jit_plt(expected_plt, ret, old_addr);
-               if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
+               bpf_jit_plt(&expected_plt, ret, old_addr);
+               if (memcmp(&current_plt, &expected_plt, sizeof(current_plt)))
                        return -EINVAL;
                /* Adjust the call address. */
-               bpf_jit_plt(new_plt, ret, new_addr);
-               s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
-                                 new_plt + (bpf_plt_target - bpf_plt),
+               bpf_jit_plt(&new_plt, ret, new_addr);
+               s390_kernel_write(&plt->target, &new_plt.target,
                                  sizeof(void *));
        }
  
diff --combined include/linux/skbuff.h
index b945af8a620881ad07ed465cc1c46a2cf6d95333,9d24aec064e888316b5a5cf450cf25ffdd391183..517e546a120aec086b45657d73761a5c90f5f475
@@@ -753,8 -753,6 +753,6 @@@ typedef unsigned char *sk_buff_data_t
   *    @list: queue head
   *    @ll_node: anchor in an llist (eg socket defer_list)
   *    @sk: Socket we are owned by
-  *    @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
-  *            fragmentation management
   *    @dev: Device we arrived on/are leaving by
   *    @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
   *    @cb: Control buffer. Free for use by every layer. Put private vars here
@@@ -875,10 -873,7 +873,7 @@@ struct sk_buff 
                struct llist_node       ll_node;
        };
  
-       union {
-               struct sock             *sk;
-               int                     ip_defrag_offset;
-       };
+       struct sock             *sk;
  
        union {
                ktime_t         tstamp;
@@@ -4063,6 -4058,12 +4058,6 @@@ int skb_copy_datagram_from_iter(struct 
                                 struct iov_iter *from, int len);
  int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
  void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
 -void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
 -static inline void skb_free_datagram_locked(struct sock *sk,
 -                                          struct sk_buff *skb)
 -{
 -      __skb_free_datagram_locked(sk, skb, 0);
 -}
  int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
  int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
  int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
diff --combined kernel/bpf/helpers.c
index 9234174ccb21b9e5f682e4c7f572ea2c6033bf2f,449b9a5d3fe3f3fd0a88e945e0039f850d5225b2..f860adab3eb960106a3184c59d3a3a045e6c22ae
@@@ -1730,10 -1730,6 +1730,10 @@@ bpf_base_func_proto(enum bpf_func_id fu
                return &bpf_strtol_proto;
        case BPF_FUNC_strtoul:
                return &bpf_strtoul_proto;
 +      case BPF_FUNC_get_current_pid_tgid:
 +              return &bpf_get_current_pid_tgid_proto;
 +      case BPF_FUNC_get_ns_current_pid_tgid:
 +              return &bpf_get_ns_current_pid_tgid_proto;
        default:
                break;
        }
@@@ -2552,7 -2548,7 +2552,7 @@@ __bpf_kfunc void bpf_throw(u64 cookie
  __bpf_kfunc_end_defs();
  
  BTF_KFUNCS_START(generic_btf_ids)
- #ifdef CONFIG_KEXEC_CORE
+ #ifdef CONFIG_CRASH_DUMP
  BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
  #endif
  BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
diff --combined kernel/bpf/verifier.c
index ca6cacf7b42f216fedbaccda99ac2fb04dac5cb0,353985b2b6a279f0d5a1784d753b10002cfb216e..17f26ba1a9e011606572f4308fbe523a6f85670d
@@@ -5682,6 -5682,13 +5682,13 @@@ static bool is_flow_key_reg(struct bpf_
        return reg->type == PTR_TO_FLOW_KEYS;
  }
  
+ static bool is_arena_reg(struct bpf_verifier_env *env, int regno)
+ {
+       const struct bpf_reg_state *reg = reg_state(env, regno);
+       return reg->type == PTR_TO_ARENA;
+ }
  static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
  #ifdef CONFIG_NET
        [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
@@@ -6694,6 -6701,11 +6701,11 @@@ static int check_stack_access_within_bo
        err = check_stack_slot_within_bounds(env, min_off, state, type);
        if (!err && max_off > 0)
                err = -EINVAL; /* out of stack access into non-negative offsets */
+       if (!err && access_size < 0)
+               /* access_size should not be negative (or overflow an int); others checks
+                * along the way should have prevented such an access.
+                */
+               err = -EFAULT; /* invalid negative access size; integer overflow? */
  
        if (err) {
                if (tnum_is_const(reg->var_off)) {
@@@ -7019,7 -7031,8 +7031,8 @@@ static int check_atomic(struct bpf_veri
        if (is_ctx_reg(env, insn->dst_reg) ||
            is_pkt_reg(env, insn->dst_reg) ||
            is_flow_key_reg(env, insn->dst_reg) ||
-           is_sk_reg(env, insn->dst_reg)) {
+           is_sk_reg(env, insn->dst_reg) ||
+           is_arena_reg(env, insn->dst_reg)) {
                verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
                        insn->dst_reg,
                        reg_type_str(env, reg_state(env, insn->dst_reg)->type));
@@@ -14014,6 -14027,10 +14027,10 @@@ static int check_alu_op(struct bpf_veri
                                        verbose(env, "addr_space_cast insn can only convert between address space 1 and 0\n");
                                        return -EINVAL;
                                }
+                               if (!env->prog->aux->arena) {
+                                       verbose(env, "addr_space_cast insn can only be used in a program that has an associated arena\n");
+                                       return -EINVAL;
+                               }
                        } else {
                                if ((insn->off != 0 && insn->off != 8 && insn->off != 16 &&
                                     insn->off != 32) || insn->imm) {
                                if (insn->imm) {
                                        /* off == BPF_ADDR_SPACE_CAST */
                                        mark_reg_unknown(env, regs, insn->dst_reg);
-                                       if (insn->imm == 1) /* cast from as(1) to as(0) */
+                                       if (insn->imm == 1) /* cast from as(1) to as(0) */
                                                dst_reg->type = PTR_TO_ARENA;
+                                               /* PTR_TO_ARENA is 32-bit */
+                                               dst_reg->subreg_def = env->insn_idx + 1;
+                                       }
                                } else if (insn->off == 0) {
                                        /* case: R1 = R2
                                         * copy register state to dest reg
@@@ -14544,19 -14564,7 +14564,19 @@@ static void regs_refine_cond_op(struct 
        struct tnum t;
        u64 val;
  
 -again:
 +      /* In case of GE/GT/SGE/JST, reuse LE/LT/SLE/SLT logic from below */
 +      switch (opcode) {
 +      case BPF_JGE:
 +      case BPF_JGT:
 +      case BPF_JSGE:
 +      case BPF_JSGT:
 +              opcode = flip_opcode(opcode);
 +              swap(reg1, reg2);
 +              break;
 +      default:
 +              break;
 +      }
 +
        switch (opcode) {
        case BPF_JEQ:
                if (is_jmp32) {
                        reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value);
                }
                break;
 -      case BPF_JGE:
 -      case BPF_JGT:
 -      case BPF_JSGE:
 -      case BPF_JSGT:
 -              /* just reuse LE/LT logic above */
 -              opcode = flip_opcode(opcode);
 -              swap(reg1, reg2);
 -              goto again;
        default:
                return;
        }
@@@ -19162,7 -19178,6 +19182,7 @@@ static int jit_subprogs(struct bpf_veri
                if (bpf_prog_calc_tag(func[i]))
                        goto out_free;
                func[i]->is_func = 1;
 +              func[i]->sleepable = prog->sleepable;
                func[i]->aux->func_idx = i;
                /* Below members will be freed only at prog->aux */
                func[i]->aux->btf = prog->aux->btf;
         * bpf_prog_load will add the kallsyms for the main program.
         */
        for (i = 1; i < env->subprog_cnt; i++) {
 -              bpf_prog_lock_ro(func[i]);
 -              bpf_prog_kallsyms_add(func[i]);
 +              err = bpf_prog_lock_ro(func[i]);
 +              if (err)
 +                      goto out_free;
        }
  
 +      for (i = 1; i < env->subprog_cnt; i++)
 +              bpf_prog_kallsyms_add(func[i]);
 +
        /* Last step: make now unused interpreter insns from main
         * prog consistent for later dump requests, so they can
         * later look the same as if they were interpreted only.
@@@ -19610,8 -19621,9 +19630,9 @@@ static int do_misc_fixups(struct bpf_ve
                            (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
                                /* convert to 32-bit mov that clears upper 32-bit */
                                insn->code = BPF_ALU | BPF_MOV | BPF_X;
-                               /* clear off, so it's a normal 'wX = wY' from JIT pov */
+                               /* clear off and imm, so it's a normal 'wX = wY' from JIT pov */
                                insn->off = 0;
+                               insn->imm = 0;
                        } /* cast from as(0) to as(1) should be handled by JIT */
                        goto next_insn;
                }
diff --combined tools/bpf/bpftool/gen.c
index 3ce277544c246d5615cb283a41089f3fbfe79e71,540c0f2c4fda07cef798769e4795cfb0fb574d3e..786268f1a483560999021c00ea66dbe7273a15b9
@@@ -121,7 -121,7 +121,7 @@@ static bool get_datasec_ident(const cha
        int i, n;
  
        /* recognize hard coded LLVM section name */
-       if (strcmp(sec_name, ".arena.1") == 0) {
+       if (strcmp(sec_name, ".addr_space.1") == 0) {
                /* this is the name to use in skeleton */
                snprintf(buf, buf_sz, "arena");
                return true;
@@@ -1131,8 -1131,7 +1131,8 @@@ static void gen_st_ops_shadow_init(stru
                        continue;
                codegen("\
                        \n\
 -                              obj->struct_ops.%1$s = bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
 +                              obj->struct_ops.%1$s = (typeof(obj->struct_ops.%1$s))\n\
 +                                      bpf_map__initial_value(obj->maps.%1$s, NULL);\n\
                        \n\
                        ", ident);
        }
diff --combined tools/lib/bpf/libbpf.c
index 8eb2cd4ef2889a0dd6f770808d55116b3f56428f,a2061fcd612d7f67d22daa1f86168104f92d11e2..d7d8f78f88463801f48a3019808c6f72f0962219
@@@ -498,7 -498,7 +498,7 @@@ struct bpf_struct_ops 
  #define KSYMS_SEC ".ksyms"
  #define STRUCT_OPS_SEC ".struct_ops"
  #define STRUCT_OPS_LINK_SEC ".struct_ops.link"
- #define ARENA_SEC ".arena.1"
+ #define ARENA_SEC ".addr_space.1"
  
  enum libbpf_map_type {
        LIBBPF_MAP_UNSPEC,
@@@ -1132,26 -1132,8 +1132,26 @@@ static int bpf_map__init_kern_struct_op
                const char *mname;
  
                mname = btf__name_by_offset(btf, member->name_off);
 +              moff = member->offset / 8;
 +              mdata = data + moff;
 +              msize = btf__resolve_size(btf, member->type);
 +              if (msize < 0) {
 +                      pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n",
 +                              map->name, mname);
 +                      return msize;
 +              }
 +
                kern_member = find_member_by_name(kern_btf, kern_type, mname);
                if (!kern_member) {
 +                      /* Skip all zeros or null fields if they are not
 +                       * presented in the kernel BTF.
 +                       */
 +                      if (libbpf_is_mem_zeroed(mdata, msize)) {
 +                              pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
 +                                      map->name, mname);
 +                              continue;
 +                      }
 +
                        pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
                                map->name, mname);
                        return -ENOTSUP;
                        return -ENOTSUP;
                }
  
 -              moff = member->offset / 8;
                kern_moff = kern_member->offset / 8;
 -
 -              mdata = data + moff;
                kern_mdata = kern_data + kern_moff;
  
                mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
                        continue;
                }
  
 -              msize = btf__resolve_size(btf, mtype_id);
                kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
 -              if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
 +              if (kern_msize < 0 || msize != kern_msize) {
                        pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
                                map->name, mname, (ssize_t)msize,
                                (ssize_t)kern_msize);
@@@ -1664,6 -1650,10 +1664,10 @@@ static int sys_memfd_create(const char 
        return syscall(__NR_memfd_create, name, flags);
  }
  
+ #ifndef MFD_CLOEXEC
+ #define MFD_CLOEXEC 0x0001U
+ #endif
  static int create_placeholder_fd(void)
  {
        int fd;
@@@ -5366,8 -5356,8 +5370,8 @@@ retry
                                        goto err_out;
                        }
                        if (map->def.type == BPF_MAP_TYPE_ARENA) {
-                               map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map),
-                                                  PROT_READ | PROT_WRITE,
+                               map->mmaped = mmap((void *)(long)map->map_extra,
+                                                  bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
                                                   map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
                                                   map->fd, 0);
                                if (map->mmaped == MAP_FAILED) {
@@@ -7331,9 -7321,9 +7335,9 @@@ static int bpf_object_load_prog(struct 
        char *cp, errmsg[STRERR_BUFSIZE];
        size_t log_buf_size = 0;
        char *log_buf = NULL, *tmp;
 -      int btf_fd, ret, err;
        bool own_log_buf = true;
        __u32 log_level = prog->log_level;
 +      int ret, err;
  
        if (prog->type == BPF_PROG_TYPE_UNSPEC) {
                /*
        load_attr.prog_ifindex = prog->prog_ifindex;
  
        /* specify func_info/line_info only if kernel supports them */
 -      btf_fd = btf__fd(obj->btf);
 -      if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
 -              load_attr.prog_btf_fd = btf_fd;
 +      if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
 +              load_attr.prog_btf_fd = btf__fd(obj->btf);
                load_attr.func_info = prog->func_info;
                load_attr.func_info_rec_size = prog->func_info_rec_size;
                load_attr.func_info_cnt = prog->func_info_cnt;
@@@ -8572,11 -8563,6 +8576,11 @@@ int bpf_map__pin(struct bpf_map *map, c
                return libbpf_err(-EINVAL);
        }
  
 +      if (map->fd < 0) {
 +              pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
 +              return libbpf_err(-EINVAL);
 +      }
 +
        if (map->pin_path) {
                if (path && strcmp(path, map->pin_path)) {
                        pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
@@@ -9312,7 -9298,6 +9316,7 @@@ static const struct bpf_sec_def section
        SEC_DEF("sockops",              SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
        SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
        SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
 +      SEC_DEF("sk_skb/verdict",       SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT),
        SEC_DEF("sk_skb",               SK_SKB, 0, SEC_NONE),
        SEC_DEF("sk_msg",               SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
        SEC_DEF("lirc_mode2",           LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
@@@ -10322,11 -10307,6 +10326,11 @@@ static int validate_map_op(const struc
                return -EINVAL;
        }
  
 +      if (map->fd < 0) {
 +              pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
 +              return -EINVAL;
 +      }
 +
        if (!check_value_sz)
                return 0;
  
@@@ -10439,15 -10419,8 +10443,15 @@@ long libbpf_get_error(const void *ptr
  int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
  {
        int ret;
 +      int prog_fd = bpf_program__fd(prog);
  
 -      ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
 +      if (prog_fd < 0) {
 +              pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n",
 +                      prog->name);
 +              return libbpf_err(-EINVAL);
 +      }
 +
 +      ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL);
        return libbpf_err_errno(ret);
  }
  
@@@ -10641,7 -10614,7 +10645,7 @@@ struct bpf_link *bpf_program__attach_pe
        }
        prog_fd = bpf_program__fd(prog);
        if (prog_fd < 0) {
 -              pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
 +              pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
                        prog->name);
                return libbpf_err_ptr(-EINVAL);
        }
@@@ -11365,13 -11338,6 +11369,13 @@@ bpf_program__attach_kprobe_multi_opts(c
        if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
                return libbpf_err_ptr(-EINVAL);
  
 +      prog_fd = bpf_program__fd(prog);
 +      if (prog_fd < 0) {
 +              pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
 +                      prog->name);
 +              return libbpf_err_ptr(-EINVAL);
 +      }
 +
        syms    = OPTS_GET(opts, syms, false);
        addrs   = OPTS_GET(opts, addrs, false);
        cnt     = OPTS_GET(opts, cnt, false);
        }
        link->detach = &bpf_link__detach_fd;
  
 -      prog_fd = bpf_program__fd(prog);
        link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
        if (link_fd < 0) {
                err = -errno;
@@@ -11794,13 -11761,6 +11798,13 @@@ bpf_program__attach_uprobe_multi(const 
        if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
                return libbpf_err_ptr(-EINVAL);
  
 +      prog_fd = bpf_program__fd(prog);
 +      if (prog_fd < 0) {
 +              pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
 +                      prog->name);
 +              return libbpf_err_ptr(-EINVAL);
 +      }
 +
        syms = OPTS_GET(opts, syms, NULL);
        offsets = OPTS_GET(opts, offsets, NULL);
        ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
        }
        link->detach = &bpf_link__detach_fd;
  
 -      prog_fd = bpf_program__fd(prog);
        link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
        if (link_fd < 0) {
                err = -errno;
@@@ -12119,7 -12080,7 +12123,7 @@@ struct bpf_link *bpf_program__attach_us
                return libbpf_err_ptr(-EINVAL);
  
        if (bpf_program__fd(prog) < 0) {
 -              pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
 +              pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
                        prog->name);
                return libbpf_err_ptr(-EINVAL);
        }
@@@ -12310,19 -12271,13 +12314,19 @@@ static int attach_tp(const struct bpf_p
        return libbpf_get_error(*link);
  }
  
 -struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
 -                                                  const char *tp_name)
 +struct bpf_link *
 +bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
 +                                      const char *tp_name,
 +                                      struct bpf_raw_tracepoint_opts *opts)
  {
 +      LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts);
        char errmsg[STRERR_BUFSIZE];
        struct bpf_link *link;
        int prog_fd, pfd;
  
 +      if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts))
 +              return libbpf_err_ptr(-EINVAL);
 +
        prog_fd = bpf_program__fd(prog);
        if (prog_fd < 0) {
                pr_warn("prog '%s': can't attach before loaded\n", prog->name);
                return libbpf_err_ptr(-ENOMEM);
        link->detach = &bpf_link__detach_fd;
  
 -      pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
 +      raw_opts.tp_name = tp_name;
 +      raw_opts.cookie = OPTS_GET(opts, cookie, 0);
 +      pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts);
        if (pfd < 0) {
                pfd = -errno;
                free(link);
        return link;
  }
  
 +struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
 +                                                  const char *tp_name)
 +{
 +      return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL);
 +}
 +
  static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
  {
        static const char *const prefixes[] = {
@@@ -12715,12 -12662,6 +12719,12 @@@ struct bpf_link *bpf_program__attach(co
        if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
                return libbpf_err_ptr(-EOPNOTSUPP);
  
 +      if (bpf_program__fd(prog) < 0) {
 +              pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
 +                      prog->name);
 +              return libbpf_err_ptr(-EINVAL);
 +      }
 +
        err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
        if (err)
                return libbpf_err_ptr(err);
@@@ -12761,13 -12702,8 +12765,13 @@@ struct bpf_link *bpf_map__attach_struct
        __u32 zero = 0;
        int err, fd;
  
 -      if (!bpf_map__is_struct_ops(map) || map->fd == -1)
 +      if (!bpf_map__is_struct_ops(map))
 +              return libbpf_err_ptr(-EINVAL);
 +
 +      if (map->fd < 0) {
 +              pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
                return libbpf_err_ptr(-EINVAL);
 +      }
  
        link = calloc(1, sizeof(*link));
        if (!link)
@@@ -12815,14 -12751,9 +12819,14 @@@ int bpf_link__update_map(struct bpf_lin
        __u32 zero = 0;
        int err;
  
 -      if (!bpf_map__is_struct_ops(map) || !map_is_created(map))
 +      if (!bpf_map__is_struct_ops(map))
                return -EINVAL;
  
 +      if (map->fd < 0) {
 +              pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
 +              return -EINVAL;
 +      }
 +
        st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
        /* Ensure the type of a link is correct */
        if (st_ops_link->map_fd < 0)
This page took 0.154126 seconds and 4 git commands to generate.