1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
5 #include <linux/bpf_verifier.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/poll.h>
17 struct bpf_struct_ops_value {
18 struct bpf_struct_ops_common_value common;
19 char data[] ____cacheline_aligned_in_smp;
22 #define MAX_TRAMP_IMAGE_PAGES 8
24 struct bpf_struct_ops_map {
26 const struct bpf_struct_ops_desc *st_ops_desc;
27 /* protect map_update */
29 /* link has all the bpf_links that is populated
30 * to the func ptr of the kernel's struct
33 struct bpf_link **links;
34 /* ksyms for bpf trampolines */
35 struct bpf_ksym **ksyms;
38 /* image_pages is an array of pages that has all the trampolines
39 * that stores the func args before calling the bpf_prog.
41 void *image_pages[MAX_TRAMP_IMAGE_PAGES];
42 /* The owner moduler's btf. */
44 /* uvalue->data stores the kernel struct
45 * (e.g. tcp_congestion_ops) that is more useful
46 * to userspace than the kvalue. For example,
47 * the bpf_prog's id is stored instead of the kernel
48 * address of a func ptr.
50 struct bpf_struct_ops_value *uvalue;
51 /* kvalue.data stores the actual kernel's struct
52 * (e.g. tcp_congestion_ops) that will be
53 * registered to the kernel subsystem.
55 struct bpf_struct_ops_value kvalue;
58 struct bpf_struct_ops_link {
60 struct bpf_map __rcu *map;
61 wait_queue_head_t wait_hup;
64 static DEFINE_MUTEX(update_mutex);
66 #define VALUE_PREFIX "bpf_struct_ops_"
67 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
69 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
72 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
74 .test_run = bpf_struct_ops_test_run,
78 BTF_ID_LIST(st_ops_ids)
79 BTF_ID(struct, module)
80 BTF_ID(struct, bpf_struct_ops_common_value)
84 IDX_ST_OPS_COMMON_VALUE_ID,
87 extern struct btf *btf_vmlinux;
89 static bool is_valid_value_type(struct btf *btf, s32 value_id,
90 const struct btf_type *type,
91 const char *value_name)
93 const struct btf_type *common_value_type;
94 const struct btf_member *member;
95 const struct btf_type *vt, *mt;
97 vt = btf_type_by_id(btf, value_id);
98 if (btf_vlen(vt) != 2) {
99 pr_warn("The number of %s's members should be 2, but we get %d\n",
100 value_name, btf_vlen(vt));
103 member = btf_type_member(vt);
104 mt = btf_type_by_id(btf, member->type);
105 common_value_type = btf_type_by_id(btf_vmlinux,
106 st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
107 if (mt != common_value_type) {
108 pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
113 mt = btf_type_by_id(btf, member->type);
115 pr_warn("The second member of %s should be %s\n",
116 value_name, btf_name_by_offset(btf, type->name_off));
123 static void *bpf_struct_ops_image_alloc(void)
128 err = bpf_jit_charge_modmem(PAGE_SIZE);
131 image = arch_alloc_bpf_trampoline(PAGE_SIZE);
133 bpf_jit_uncharge_modmem(PAGE_SIZE);
134 return ERR_PTR(-ENOMEM);
140 void bpf_struct_ops_image_free(void *image)
143 arch_free_bpf_trampoline(image, PAGE_SIZE);
144 bpf_jit_uncharge_modmem(PAGE_SIZE);
148 #define MAYBE_NULL_SUFFIX "__nullable"
149 #define MAX_STUB_NAME 128
151 /* Return the type info of a stub function, if it exists.
153 * The name of a stub function is made up of the name of the struct_ops and
154 * the name of the function pointer member, separated by "__". For example,
155 * if the struct_ops type is named "foo_ops" and the function pointer
156 * member is named "bar", the stub function name would be "foo_ops__bar".
158 static const struct btf_type *
159 find_stub_func_proto(const struct btf *btf, const char *st_op_name,
160 const char *member_name)
162 char stub_func_name[MAX_STUB_NAME];
163 const struct btf_type *func_type;
167 cp = snprintf(stub_func_name, MAX_STUB_NAME, "%s__%s",
168 st_op_name, member_name);
169 if (cp >= MAX_STUB_NAME) {
170 pr_warn("Stub function name too long\n");
173 btf_id = btf_find_by_name_kind(btf, stub_func_name, BTF_KIND_FUNC);
176 func_type = btf_type_by_id(btf, btf_id);
180 return btf_type_by_id(btf, func_type->type); /* FUNC_PROTO */
183 /* Prepare argument info for every nullable argument of a member of a
186 * Initialize a struct bpf_struct_ops_arg_info according to type info of
187 * the arguments of a stub function. (Check kCFI for more information about
190 * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
191 * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
192 * the information that used by the verifier to check the arguments of the
193 * BPF struct_ops program assigned to the member. Here, we only care about
194 * the arguments that are marked as __nullable.
196 * The array of struct bpf_ctx_arg_aux is eventually assigned to
197 * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
198 * verifier. (See check_struct_ops_btf_id())
200 * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
201 * fails, it will be kept untouched.
203 static int prepare_arg_info(struct btf *btf,
204 const char *st_ops_name,
205 const char *member_name,
206 const struct btf_type *func_proto,
207 struct bpf_struct_ops_arg_info *arg_info)
209 const struct btf_type *stub_func_proto, *pointed_type;
210 const struct btf_param *stub_args, *args;
211 struct bpf_ctx_arg_aux *info, *info_buf;
212 u32 nargs, arg_no, info_cnt = 0;
216 stub_func_proto = find_stub_func_proto(btf, st_ops_name, member_name);
217 if (!stub_func_proto)
220 /* Check if the number of arguments of the stub function is the same
221 * as the number of arguments of the function pointer.
223 nargs = btf_type_vlen(func_proto);
224 if (nargs != btf_type_vlen(stub_func_proto)) {
225 pr_warn("the number of arguments of the stub function %s__%s does not match the number of arguments of the member %s of struct %s\n",
226 st_ops_name, member_name, member_name, st_ops_name);
233 args = btf_params(func_proto);
234 stub_args = btf_params(stub_func_proto);
236 info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
240 /* Prepare info for every nullable argument */
242 for (arg_no = 0; arg_no < nargs; arg_no++) {
243 /* Skip arguments that is not suffixed with
246 if (!btf_param_match_suffix(btf, &stub_args[arg_no],
250 /* Should be a pointer to struct */
251 pointed_type = btf_type_resolve_ptr(btf,
255 !btf_type_is_struct(pointed_type)) {
256 pr_warn("stub function %s__%s has %s tagging to an unsupported type\n",
257 st_ops_name, member_name, MAYBE_NULL_SUFFIX);
261 offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
263 pr_warn("stub function %s__%s has an invalid trampoline ctx offset for arg#%u\n",
264 st_ops_name, member_name, arg_no);
268 if (args[arg_no].type != stub_args[arg_no].type) {
269 pr_warn("arg#%u type in stub function %s__%s does not match with its original func_proto\n",
270 arg_no, st_ops_name, member_name);
274 /* Fill the information of the new argument */
276 PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
277 info->btf_id = arg_btf_id;
279 info->offset = offset;
286 arg_info->info = info_buf;
287 arg_info->cnt = info_cnt;
300 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
301 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
303 struct bpf_struct_ops_arg_info *arg_info;
306 arg_info = st_ops_desc->arg_info;
307 for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
308 kfree(arg_info[i].info);
313 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
315 struct bpf_verifier_log *log)
317 struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
318 struct bpf_struct_ops_arg_info *arg_info;
319 const struct btf_member *member;
320 const struct btf_type *t;
321 s32 type_id, value_id;
322 char value_name[128];
326 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
327 sizeof(value_name)) {
328 pr_warn("struct_ops name %s is too long\n",
332 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
334 if (!st_ops->cfi_stubs) {
335 pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
339 type_id = btf_find_by_name_kind(btf, st_ops->name,
342 pr_warn("Cannot find struct %s in %s\n",
343 st_ops->name, btf_get_name(btf));
346 t = btf_type_by_id(btf, type_id);
347 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
348 pr_warn("Cannot support #%u members in struct %s\n",
349 btf_type_vlen(t), st_ops->name);
353 value_id = btf_find_by_name_kind(btf, value_name,
356 pr_warn("Cannot find struct %s in %s\n",
357 value_name, btf_get_name(btf));
360 if (!is_valid_value_type(btf, value_id, t, value_name))
363 arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
368 st_ops_desc->arg_info = arg_info;
369 st_ops_desc->type = t;
370 st_ops_desc->type_id = type_id;
371 st_ops_desc->value_id = value_id;
372 st_ops_desc->value_type = btf_type_by_id(btf, value_id);
374 for_each_member(i, t, member) {
375 const struct btf_type *func_proto;
377 mname = btf_name_by_offset(btf, member->name_off);
379 pr_warn("anon member in struct %s is not supported\n",
385 if (__btf_member_bitfield_size(t, member)) {
386 pr_warn("bit field member %s in struct %s is not supported\n",
387 mname, st_ops->name);
392 func_proto = btf_type_resolve_func_ptr(btf,
398 if (btf_distill_func_proto(log, btf,
400 &st_ops->func_models[i])) {
401 pr_warn("Error in parsing func ptr %s in struct %s\n",
402 mname, st_ops->name);
407 err = prepare_arg_info(btf, st_ops->name, mname,
414 if (st_ops->init(btf)) {
415 pr_warn("Error in init bpf_struct_ops %s\n",
424 bpf_struct_ops_desc_release(st_ops_desc);
429 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
432 if (key && *(u32 *)key == 0)
435 *(u32 *)next_key = 0;
439 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
442 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
443 struct bpf_struct_ops_value *uvalue, *kvalue;
444 enum bpf_struct_ops_state state;
447 if (unlikely(*(u32 *)key != 0))
450 kvalue = &st_map->kvalue;
451 /* Pair with smp_store_release() during map_update */
452 state = smp_load_acquire(&kvalue->common.state);
453 if (state == BPF_STRUCT_OPS_STATE_INIT) {
454 memset(value, 0, map->value_size);
458 /* No lock is needed. state and refcnt do not need
459 * to be updated together under atomic context.
462 memcpy(uvalue, st_map->uvalue, map->value_size);
463 uvalue->common.state = state;
465 /* This value offers the user space a general estimate of how
466 * many sockets are still utilizing this struct_ops for TCP
467 * congestion control. The number might not be exact, but it
468 * should sufficiently meet our present goals.
470 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
471 refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
476 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
478 return ERR_PTR(-EINVAL);
481 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
485 for (i = 0; i < st_map->funcs_cnt; i++) {
486 if (!st_map->links[i])
488 bpf_link_put(st_map->links[i]);
489 st_map->links[i] = NULL;
493 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
497 for (i = 0; i < st_map->image_pages_cnt; i++)
498 bpf_struct_ops_image_free(st_map->image_pages[i]);
499 st_map->image_pages_cnt = 0;
502 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
504 const struct btf_member *member;
505 u32 i, moff, msize, prev_mend = 0;
506 const struct btf_type *mtype;
508 for_each_member(i, t, member) {
509 moff = __btf_member_bit_offset(t, member) / 8;
510 if (moff > prev_mend &&
511 memchr_inv(data + prev_mend, 0, moff - prev_mend))
514 mtype = btf_type_by_id(btf, member->type);
515 mtype = btf_resolve_size(btf, mtype, &msize);
517 return PTR_ERR(mtype);
518 prev_mend = moff + msize;
521 if (t->size > prev_mend &&
522 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
528 static void bpf_struct_ops_link_release(struct bpf_link *link)
532 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
534 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
539 const struct bpf_link_ops bpf_struct_ops_link_lops = {
540 .release = bpf_struct_ops_link_release,
541 .dealloc = bpf_struct_ops_link_dealloc,
544 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
545 struct bpf_tramp_link *link,
546 const struct btf_func_model *model,
548 void **_image, u32 *_image_off,
551 u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
552 void *image = *_image;
555 tlinks[BPF_TRAMP_FENTRY].links[0] = link;
556 tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
558 if (model->ret_size > 0)
559 flags |= BPF_TRAMP_F_RET_FENTRY_RET;
561 size = arch_bpf_trampoline_size(model, flags, tlinks, NULL);
563 return size ? : -EFAULT;
565 /* Allocate image buffer if necessary */
566 if (!image || size > PAGE_SIZE - image_off) {
570 image = bpf_struct_ops_image_alloc();
572 return PTR_ERR(image);
576 size = arch_prepare_bpf_trampoline(NULL, image + image_off,
577 image + image_off + size,
578 model, flags, tlinks, stub_func);
580 if (image != *_image)
581 bpf_struct_ops_image_free(image);
582 return size ? : -EFAULT;
586 *_image_off = image_off + size;
590 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
591 void *image, unsigned int size,
592 struct bpf_ksym *ksym)
594 snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
595 INIT_LIST_HEAD_RCU(&ksym->lnode);
596 bpf_image_ksym_init(image, size, ksym);
599 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
603 for (i = 0; i < st_map->funcs_cnt; i++) {
604 if (!st_map->ksyms[i])
606 bpf_image_ksym_add(st_map->ksyms[i]);
610 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
614 for (i = 0; i < st_map->funcs_cnt; i++) {
615 if (!st_map->ksyms[i])
617 bpf_image_ksym_del(st_map->ksyms[i]);
621 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
625 for (i = 0; i < st_map->funcs_cnt; i++) {
626 if (!st_map->ksyms[i])
628 kfree(st_map->ksyms[i]);
629 st_map->ksyms[i] = NULL;
633 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
634 void *value, u64 flags)
636 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
637 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
638 const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
639 struct bpf_struct_ops_value *uvalue, *kvalue;
640 const struct btf_type *module_type;
641 const struct btf_member *member;
642 const struct btf_type *t = st_ops_desc->type;
643 struct bpf_tramp_links *tlinks;
646 u32 i, trampoline_start, image_off = 0;
647 void *cur_image = NULL, *image = NULL;
648 struct bpf_link **plink;
649 struct bpf_ksym **pksym;
650 const char *tname, *mname;
655 if (*(u32 *)key != 0)
658 err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
663 err = check_zero_holes(st_map->btf, t, uvalue->data);
667 if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
670 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
674 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
675 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
677 mutex_lock(&st_map->lock);
679 if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
684 memcpy(uvalue, value, map->value_size);
686 udata = &uvalue->data;
687 kdata = &kvalue->data;
689 plink = st_map->links;
690 pksym = st_map->ksyms;
691 tname = btf_name_by_offset(st_map->btf, t->name_off);
692 module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
693 for_each_member(i, t, member) {
694 const struct btf_type *mtype, *ptype;
695 struct bpf_prog *prog;
696 struct bpf_tramp_link *link;
697 struct bpf_ksym *ksym;
700 moff = __btf_member_bit_offset(t, member) / 8;
701 mname = btf_name_by_offset(st_map->btf, member->name_off);
702 ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
703 if (ptype == module_type) {
704 if (*(void **)(udata + moff))
706 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
710 err = st_ops->init_member(t, member, kdata, udata);
714 /* The ->init_member() has handled this member */
718 /* If st_ops->init_member does not handle it,
719 * we will only handle func ptrs and zero-ed members
720 * here. Reject everything else.
723 /* All non func ptr member must be 0 */
724 if (!ptype || !btf_type_is_func_proto(ptype)) {
727 mtype = btf_type_by_id(st_map->btf, member->type);
728 mtype = btf_resolve_size(st_map->btf, mtype, &msize);
730 err = PTR_ERR(mtype);
734 if (memchr_inv(udata + moff, 0, msize)) {
742 prog_fd = (int)(*(unsigned long *)(udata + moff));
743 /* Similar check as the attr->attach_prog_fd */
747 prog = bpf_prog_get(prog_fd);
753 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
754 prog->aux->attach_btf_id != st_ops_desc->type_id ||
755 prog->expected_attach_type != i) {
761 link = kzalloc(sizeof(*link), GFP_USER);
767 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
768 &bpf_struct_ops_link_lops, prog);
769 *plink++ = &link->link;
771 ksym = kzalloc(sizeof(*ksym), GFP_USER);
778 trampoline_start = image_off;
779 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
780 &st_ops->func_models[i],
781 *(void **)(st_ops->cfi_stubs + moff),
783 st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
787 if (cur_image != image) {
788 st_map->image_pages[st_map->image_pages_cnt++] = image;
790 trampoline_start = 0;
793 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
795 /* put prog_id to udata */
796 *(unsigned long *)(udata + moff) = prog->aux->id;
798 /* init ksym for this trampoline */
799 bpf_struct_ops_ksym_init(tname, mname,
800 image + trampoline_start,
801 image_off - trampoline_start,
805 if (st_ops->validate) {
806 err = st_ops->validate(kdata);
810 for (i = 0; i < st_map->image_pages_cnt; i++) {
811 err = arch_protect_bpf_trampoline(st_map->image_pages[i],
817 if (st_map->map.map_flags & BPF_F_LINK) {
819 /* Let bpf_link handle registration & unregistration.
821 * Pair with smp_load_acquire() during lookup_elem().
823 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
827 err = st_ops->reg(kdata, NULL);
829 /* This refcnt increment on the map here after
830 * 'st_ops->reg()' is secure since the state of the
831 * map must be set to INIT at this moment, and thus
832 * bpf_struct_ops_map_delete_elem() can't unregister
833 * or transition it to TOBEFREE concurrently.
836 /* Pair with smp_load_acquire() during lookup_elem().
837 * It ensures the above udata updates (e.g. prog->aux->id)
838 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
840 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
844 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
845 * verified as a whole, after all init_member() calls. Can also happen if
846 * there was a race in registering the struct_ops (under the same name) to
847 * a sub-system through different struct_ops's maps.
851 bpf_struct_ops_map_free_ksyms(st_map);
852 bpf_struct_ops_map_free_image(st_map);
853 bpf_struct_ops_map_put_progs(st_map);
854 memset(uvalue, 0, map->value_size);
855 memset(kvalue, 0, map->value_size);
858 mutex_unlock(&st_map->lock);
860 bpf_struct_ops_map_add_ksyms(st_map);
864 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
866 enum bpf_struct_ops_state prev_state;
867 struct bpf_struct_ops_map *st_map;
869 st_map = (struct bpf_struct_ops_map *)map;
870 if (st_map->map.map_flags & BPF_F_LINK)
873 prev_state = cmpxchg(&st_map->kvalue.common.state,
874 BPF_STRUCT_OPS_STATE_INUSE,
875 BPF_STRUCT_OPS_STATE_TOBEFREE);
876 switch (prev_state) {
877 case BPF_STRUCT_OPS_STATE_INUSE:
878 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
881 case BPF_STRUCT_OPS_STATE_TOBEFREE:
883 case BPF_STRUCT_OPS_STATE_INIT:
887 /* Should never happen. Treat it as not found. */
892 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
895 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
899 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
903 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
905 btf_type_seq_show(st_map->btf,
906 map->btf_vmlinux_value_type_id,
914 static void __bpf_struct_ops_map_free(struct bpf_map *map)
916 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
919 bpf_struct_ops_map_put_progs(st_map);
921 bpf_struct_ops_map_free_ksyms(st_map);
922 bpf_map_area_free(st_map->links);
923 bpf_map_area_free(st_map->ksyms);
924 bpf_struct_ops_map_free_image(st_map);
925 bpf_map_area_free(st_map->uvalue);
926 bpf_map_area_free(st_map);
929 static void bpf_struct_ops_map_free(struct bpf_map *map)
931 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
933 /* st_ops->owner was acquired during map_alloc to implicitly holds
934 * the btf's refcnt. The acquire was only done when btf_is_module()
935 * st_map->btf cannot be NULL here.
937 if (btf_is_module(st_map->btf))
938 module_put(st_map->st_ops_desc->st_ops->owner);
940 bpf_struct_ops_map_del_ksyms(st_map);
942 /* The struct_ops's function may switch to another struct_ops.
944 * For example, bpf_tcp_cc_x->init() may switch to
945 * another tcp_cc_y by calling
946 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
947 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
948 * and its refcount may reach 0 which then free its
949 * trampoline image while tcp_cc_x is still running.
951 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
952 * to finish. bpf-tcp-cc prog is non sleepable.
953 * A rcu_tasks gp is to wait for the last few insn
954 * in the tramopline image to finish before releasing
955 * the trampoline image.
957 synchronize_rcu_mult(call_rcu, call_rcu_tasks);
959 __bpf_struct_ops_map_free(map);
962 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
964 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
965 (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
966 !attr->btf_vmlinux_value_type_id)
971 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
975 const struct btf_member *member;
978 for_each_member(i, t, member)
979 if (btf_type_resolve_func_ptr(btf, member->type, NULL))
984 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
986 const struct bpf_struct_ops_desc *st_ops_desc;
988 struct bpf_struct_ops_map *st_map;
989 const struct btf_type *t, *vt;
990 struct module *mod = NULL;
995 if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
996 /* The map holds btf for its whole life time. */
997 btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
999 return ERR_CAST(btf);
1000 if (!btf_is_module(btf)) {
1002 return ERR_PTR(-EINVAL);
1005 mod = btf_try_get_module(btf);
1006 /* mod holds a refcnt to btf. We don't need an extra refcnt
1011 return ERR_PTR(-EINVAL);
1013 btf = bpf_get_btf_vmlinux();
1015 return ERR_CAST(btf);
1017 return ERR_PTR(-ENOTSUPP);
1020 st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1026 vt = st_ops_desc->value_type;
1027 if (attr->value_size != vt->size) {
1032 t = st_ops_desc->type;
1034 st_map_size = sizeof(*st_map) +
1035 /* kvalue stores the
1036 * struct bpf_struct_ops_tcp_congestions_ops
1038 (vt->size - sizeof(struct bpf_struct_ops_value));
1040 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1046 st_map->st_ops_desc = st_ops_desc;
1049 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1050 st_map->funcs_cnt = count_func_ptrs(btf, t);
1052 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1056 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1058 if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1064 mutex_init(&st_map->lock);
1065 bpf_map_init_from_attr(map, attr);
1070 __bpf_struct_ops_map_free(map);
1074 return ERR_PTR(ret);
1077 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1079 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1080 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1081 const struct btf_type *vt = st_ops_desc->value_type;
1084 usage = sizeof(*st_map) +
1085 vt->size - sizeof(struct bpf_struct_ops_value);
1087 usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1088 usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1093 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1094 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1095 .map_alloc_check = bpf_struct_ops_map_alloc_check,
1096 .map_alloc = bpf_struct_ops_map_alloc,
1097 .map_free = bpf_struct_ops_map_free,
1098 .map_get_next_key = bpf_struct_ops_map_get_next_key,
1099 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1100 .map_delete_elem = bpf_struct_ops_map_delete_elem,
1101 .map_update_elem = bpf_struct_ops_map_update_elem,
1102 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1103 .map_mem_usage = bpf_struct_ops_map_mem_usage,
1104 .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1107 /* "const void *" because some subsystem is
1108 * passing a const (e.g. const struct tcp_congestion_ops *)
1110 bool bpf_struct_ops_get(const void *kdata)
1112 struct bpf_struct_ops_value *kvalue;
1113 struct bpf_struct_ops_map *st_map;
1114 struct bpf_map *map;
1116 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1117 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1119 map = __bpf_map_inc_not_zero(&st_map->map, false);
1120 return !IS_ERR(map);
1123 void bpf_struct_ops_put(const void *kdata)
1125 struct bpf_struct_ops_value *kvalue;
1126 struct bpf_struct_ops_map *st_map;
1128 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1129 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1131 bpf_map_put(&st_map->map);
1134 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
1136 void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
1138 return func_ptr ? 0 : -ENOTSUPP;
1141 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1143 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1145 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1146 map->map_flags & BPF_F_LINK &&
1147 /* Pair with smp_store_release() during map_update */
1148 smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1151 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1153 struct bpf_struct_ops_link *st_link;
1154 struct bpf_struct_ops_map *st_map;
1156 st_link = container_of(link, struct bpf_struct_ops_link, link);
1157 st_map = (struct bpf_struct_ops_map *)
1158 rcu_dereference_protected(st_link->map, true);
1160 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1161 bpf_map_put(&st_map->map);
1166 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1167 struct seq_file *seq)
1169 struct bpf_struct_ops_link *st_link;
1170 struct bpf_map *map;
1172 st_link = container_of(link, struct bpf_struct_ops_link, link);
1174 map = rcu_dereference(st_link->map);
1176 seq_printf(seq, "map_id:\t%d\n", map->id);
1180 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1181 struct bpf_link_info *info)
1183 struct bpf_struct_ops_link *st_link;
1184 struct bpf_map *map;
1186 st_link = container_of(link, struct bpf_struct_ops_link, link);
1188 map = rcu_dereference(st_link->map);
1190 info->struct_ops.map_id = map->id;
1195 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1196 struct bpf_map *expected_old_map)
1198 struct bpf_struct_ops_map *st_map, *old_st_map;
1199 struct bpf_map *old_map;
1200 struct bpf_struct_ops_link *st_link;
1203 st_link = container_of(link, struct bpf_struct_ops_link, link);
1204 st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1206 if (!bpf_struct_ops_valid_to_reg(new_map))
1209 if (!st_map->st_ops_desc->st_ops->update)
1212 mutex_lock(&update_mutex);
1214 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1219 if (expected_old_map && old_map != expected_old_map) {
1224 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1225 /* The new and old struct_ops must be the same type. */
1226 if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1231 err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1235 bpf_map_inc(new_map);
1236 rcu_assign_pointer(st_link->map, new_map);
1237 bpf_map_put(old_map);
1240 mutex_unlock(&update_mutex);
1245 static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1247 struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1248 struct bpf_struct_ops_map *st_map;
1249 struct bpf_map *map;
1251 mutex_lock(&update_mutex);
1253 map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1255 mutex_unlock(&update_mutex);
1258 st_map = container_of(map, struct bpf_struct_ops_map, map);
1260 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1262 RCU_INIT_POINTER(st_link->map, NULL);
1263 /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1264 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1266 bpf_map_put(&st_map->map);
1268 mutex_unlock(&update_mutex);
1270 wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1275 static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1276 struct poll_table_struct *pts)
1278 struct bpf_struct_ops_link *st_link = file->private_data;
1280 poll_wait(file, &st_link->wait_hup, pts);
1282 return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1285 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1286 .dealloc = bpf_struct_ops_map_link_dealloc,
1287 .detach = bpf_struct_ops_map_link_detach,
1288 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1289 .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1290 .update_map = bpf_struct_ops_map_link_update,
1291 .poll = bpf_struct_ops_map_link_poll,
1294 int bpf_struct_ops_link_create(union bpf_attr *attr)
1296 struct bpf_struct_ops_link *link = NULL;
1297 struct bpf_link_primer link_primer;
1298 struct bpf_struct_ops_map *st_map;
1299 struct bpf_map *map;
1302 map = bpf_map_get(attr->link_create.map_fd);
1304 return PTR_ERR(map);
1306 st_map = (struct bpf_struct_ops_map *)map;
1308 if (!bpf_struct_ops_valid_to_reg(map)) {
1313 link = kzalloc(sizeof(*link), GFP_USER);
1318 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
1320 err = bpf_link_prime(&link->link, &link_primer);
1324 init_waitqueue_head(&link->wait_hup);
1326 /* Hold the update_mutex such that the subsystem cannot
1327 * do link->ops->detach() before the link is fully initialized.
1329 mutex_lock(&update_mutex);
1330 err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1332 mutex_unlock(&update_mutex);
1333 bpf_link_cleanup(&link_primer);
1337 RCU_INIT_POINTER(link->map, map);
1338 mutex_unlock(&update_mutex);
1340 return bpf_link_settle(&link_primer);
1348 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1350 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1352 info->btf_vmlinux_id = btf_obj_id(st_map->btf);