1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * common eBPF ELF operations.
8 * Copyright (C) 2015 Huawei Inc.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation;
13 * version 2.1 of the License (not later!)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this program; if not, see <http://www.gnu.org/licenses>
28 #include <asm/unistd.h>
30 #include <linux/bpf.h>
31 #include <linux/filter.h>
32 #include <linux/kernel.h>
34 #include <sys/resource.h>
37 #include "libbpf_internal.h"
40 * When building perf, unistd.h is overridden. __NR_bpf is
41 * required to be defined explicitly.
44 # if defined(__i386__)
46 # elif defined(__x86_64__)
48 # elif defined(__aarch64__)
50 # elif defined(__sparc__)
52 # elif defined(__s390__)
54 # elif defined(__arc__)
56 # elif defined(__mips__) && defined(_ABIO32)
57 # define __NR_bpf 4355
58 # elif defined(__mips__) && defined(_ABIN32)
59 # define __NR_bpf 6319
60 # elif defined(__mips__) && defined(_ABI64)
61 # define __NR_bpf 5315
63 # error __NR_bpf not defined. libbpf does not support your arch.
67 static inline __u64 ptr_to_u64(const void *ptr)
69 return (__u64) (unsigned long) ptr;
72 static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
75 return syscall(__NR_bpf, cmd, attr, size);
78 static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
83 fd = sys_bpf(cmd, attr, size);
84 return ensure_good_fd(fd);
87 #define PROG_LOAD_ATTEMPTS 5
89 static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
94 fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
95 } while (fd < 0 && errno == EAGAIN && --attempts > 0);
100 /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
101 * memcg-based memory accounting for BPF maps and progs. This was done in [0].
102 * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
103 * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
106 * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
108 int probe_memcg_account(void)
110 const size_t prog_load_attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
111 struct bpf_insn insns[] = {
112 BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
115 size_t insn_cnt = ARRAY_SIZE(insns);
119 /* attempt loading freplace trying to use custom BTF */
120 memset(&attr, 0, prog_load_attr_sz);
121 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
122 attr.insns = ptr_to_u64(insns);
123 attr.insn_cnt = insn_cnt;
124 attr.license = ptr_to_u64("GPL");
126 prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
134 static bool memlock_bumped;
135 static rlim_t memlock_rlim = RLIM_INFINITY;
137 int libbpf_set_memlock_rlim(size_t memlock_bytes)
140 return libbpf_err(-EBUSY);
142 memlock_rlim = memlock_bytes;
146 int bump_rlimit_memlock(void)
150 /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
151 if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
154 memlock_bumped = true;
156 /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
157 if (memlock_rlim == 0)
160 rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
161 if (setrlimit(RLIMIT_MEMLOCK, &rlim))
167 int bpf_map_create(enum bpf_map_type map_type,
168 const char *map_name,
172 const struct bpf_map_create_opts *opts)
174 const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
178 bump_rlimit_memlock();
180 memset(&attr, 0, attr_sz);
182 if (!OPTS_VALID(opts, bpf_map_create_opts))
183 return libbpf_err(-EINVAL);
185 attr.map_type = map_type;
187 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
188 attr.key_size = key_size;
189 attr.value_size = value_size;
190 attr.max_entries = max_entries;
192 attr.btf_fd = OPTS_GET(opts, btf_fd, 0);
193 attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
194 attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
195 attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
197 attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
198 attr.map_flags = OPTS_GET(opts, map_flags, 0);
199 attr.map_extra = OPTS_GET(opts, map_extra, 0);
200 attr.numa_node = OPTS_GET(opts, numa_node, 0);
201 attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
203 fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
204 return libbpf_err_errno(fd);
208 alloc_zero_tailing_info(const void *orecord, __u32 cnt,
209 __u32 actual_rec_size, __u32 expected_rec_size)
211 __u64 info_len = (__u64)actual_rec_size * cnt;
212 void *info, *nrecord;
215 info = malloc(info_len);
219 /* zero out bytes kernel does not understand */
221 for (i = 0; i < cnt; i++) {
222 memcpy(nrecord, orecord, expected_rec_size);
223 memset(nrecord + expected_rec_size, 0,
224 actual_rec_size - expected_rec_size);
225 orecord += actual_rec_size;
226 nrecord += actual_rec_size;
232 int bpf_prog_load(enum bpf_prog_type prog_type,
233 const char *prog_name, const char *license,
234 const struct bpf_insn *insns, size_t insn_cnt,
235 const struct bpf_prog_load_opts *opts)
237 void *finfo = NULL, *linfo = NULL;
238 const char *func_info, *line_info;
239 __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
240 __u32 func_info_rec_size, line_info_rec_size;
245 bump_rlimit_memlock();
247 if (!OPTS_VALID(opts, bpf_prog_load_opts))
248 return libbpf_err(-EINVAL);
250 attempts = OPTS_GET(opts, attempts, 0);
252 return libbpf_err(-EINVAL);
254 attempts = PROG_LOAD_ATTEMPTS;
256 memset(&attr, 0, sizeof(attr));
258 attr.prog_type = prog_type;
259 attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
261 attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
262 attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
263 attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
264 attr.kern_version = OPTS_GET(opts, kern_version, 0);
267 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
268 attr.license = ptr_to_u64(license);
270 if (insn_cnt > UINT_MAX)
271 return libbpf_err(-E2BIG);
273 attr.insns = ptr_to_u64(insns);
274 attr.insn_cnt = (__u32)insn_cnt;
276 attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
277 attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
279 if (attach_prog_fd && attach_btf_obj_fd)
280 return libbpf_err(-EINVAL);
282 attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
284 attr.attach_prog_fd = attach_prog_fd;
286 attr.attach_btf_obj_fd = attach_btf_obj_fd;
288 log_buf = OPTS_GET(opts, log_buf, NULL);
289 log_size = OPTS_GET(opts, log_size, 0);
290 log_level = OPTS_GET(opts, log_level, 0);
292 if (!!log_buf != !!log_size)
293 return libbpf_err(-EINVAL);
294 if (log_level > (4 | 2 | 1))
295 return libbpf_err(-EINVAL);
296 if (log_level && !log_buf)
297 return libbpf_err(-EINVAL);
299 func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
300 func_info = OPTS_GET(opts, func_info, NULL);
301 attr.func_info_rec_size = func_info_rec_size;
302 attr.func_info = ptr_to_u64(func_info);
303 attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
305 line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
306 line_info = OPTS_GET(opts, line_info, NULL);
307 attr.line_info_rec_size = line_info_rec_size;
308 attr.line_info = ptr_to_u64(line_info);
309 attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
311 attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
314 attr.log_buf = ptr_to_u64(log_buf);
315 attr.log_size = log_size;
316 attr.log_level = log_level;
319 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
323 /* After bpf_prog_load, the kernel may modify certain attributes
324 * to give user space a hint how to deal with loading failure.
325 * Check to see whether we can make some changes and load again.
327 while (errno == E2BIG && (!finfo || !linfo)) {
328 if (!finfo && attr.func_info_cnt &&
329 attr.func_info_rec_size < func_info_rec_size) {
330 /* try with corrected func info records */
331 finfo = alloc_zero_tailing_info(func_info,
334 attr.func_info_rec_size);
340 attr.func_info = ptr_to_u64(finfo);
341 attr.func_info_rec_size = func_info_rec_size;
342 } else if (!linfo && attr.line_info_cnt &&
343 attr.line_info_rec_size < line_info_rec_size) {
344 linfo = alloc_zero_tailing_info(line_info,
347 attr.line_info_rec_size);
353 attr.line_info = ptr_to_u64(linfo);
354 attr.line_info_rec_size = line_info_rec_size;
359 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
364 if (log_level == 0 && log_buf) {
365 /* log_level == 0 with non-NULL log_buf requires retrying on error
366 * with log_level == 1 and log_buf/log_buf_size set, to get details of
369 attr.log_buf = ptr_to_u64(log_buf);
370 attr.log_size = log_size;
373 fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
376 /* free() doesn't affect errno, so we don't need to restore it */
379 return libbpf_err_errno(fd);
382 int bpf_map_update_elem(int fd, const void *key, const void *value,
388 memset(&attr, 0, sizeof(attr));
390 attr.key = ptr_to_u64(key);
391 attr.value = ptr_to_u64(value);
394 ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
395 return libbpf_err_errno(ret);
398 int bpf_map_lookup_elem(int fd, const void *key, void *value)
403 memset(&attr, 0, sizeof(attr));
405 attr.key = ptr_to_u64(key);
406 attr.value = ptr_to_u64(value);
408 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
409 return libbpf_err_errno(ret);
412 int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
417 memset(&attr, 0, sizeof(attr));
419 attr.key = ptr_to_u64(key);
420 attr.value = ptr_to_u64(value);
423 ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
424 return libbpf_err_errno(ret);
427 int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
432 memset(&attr, 0, sizeof(attr));
434 attr.key = ptr_to_u64(key);
435 attr.value = ptr_to_u64(value);
437 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
438 return libbpf_err_errno(ret);
441 int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
446 memset(&attr, 0, sizeof(attr));
448 attr.key = ptr_to_u64(key);
449 attr.value = ptr_to_u64(value);
452 ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
453 return libbpf_err_errno(ret);
456 int bpf_map_delete_elem(int fd, const void *key)
461 memset(&attr, 0, sizeof(attr));
463 attr.key = ptr_to_u64(key);
465 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
466 return libbpf_err_errno(ret);
469 int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
474 memset(&attr, 0, sizeof(attr));
476 attr.key = ptr_to_u64(key);
479 ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
480 return libbpf_err_errno(ret);
483 int bpf_map_get_next_key(int fd, const void *key, void *next_key)
488 memset(&attr, 0, sizeof(attr));
490 attr.key = ptr_to_u64(key);
491 attr.next_key = ptr_to_u64(next_key);
493 ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
494 return libbpf_err_errno(ret);
497 int bpf_map_freeze(int fd)
502 memset(&attr, 0, sizeof(attr));
505 ret = sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
506 return libbpf_err_errno(ret);
509 static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
510 void *out_batch, void *keys, void *values,
512 const struct bpf_map_batch_opts *opts)
517 if (!OPTS_VALID(opts, bpf_map_batch_opts))
518 return libbpf_err(-EINVAL);
520 memset(&attr, 0, sizeof(attr));
521 attr.batch.map_fd = fd;
522 attr.batch.in_batch = ptr_to_u64(in_batch);
523 attr.batch.out_batch = ptr_to_u64(out_batch);
524 attr.batch.keys = ptr_to_u64(keys);
525 attr.batch.values = ptr_to_u64(values);
526 attr.batch.count = *count;
527 attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
528 attr.batch.flags = OPTS_GET(opts, flags, 0);
530 ret = sys_bpf(cmd, &attr, sizeof(attr));
531 *count = attr.batch.count;
533 return libbpf_err_errno(ret);
536 int bpf_map_delete_batch(int fd, const void *keys, __u32 *count,
537 const struct bpf_map_batch_opts *opts)
539 return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
540 NULL, (void *)keys, NULL, count, opts);
543 int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
544 void *values, __u32 *count,
545 const struct bpf_map_batch_opts *opts)
547 return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
548 out_batch, keys, values, count, opts);
551 int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
552 void *keys, void *values, __u32 *count,
553 const struct bpf_map_batch_opts *opts)
555 return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
556 fd, in_batch, out_batch, keys, values,
560 int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count,
561 const struct bpf_map_batch_opts *opts)
563 return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
564 (void *)keys, (void *)values, count, opts);
567 int bpf_obj_pin(int fd, const char *pathname)
572 memset(&attr, 0, sizeof(attr));
573 attr.pathname = ptr_to_u64((void *)pathname);
576 ret = sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
577 return libbpf_err_errno(ret);
580 int bpf_obj_get(const char *pathname)
585 memset(&attr, 0, sizeof(attr));
586 attr.pathname = ptr_to_u64((void *)pathname);
588 fd = sys_bpf_fd(BPF_OBJ_GET, &attr, sizeof(attr));
589 return libbpf_err_errno(fd);
592 int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
595 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
599 return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
602 int bpf_prog_attach_opts(int prog_fd, int target_fd,
603 enum bpf_attach_type type,
604 const struct bpf_prog_attach_opts *opts)
609 if (!OPTS_VALID(opts, bpf_prog_attach_opts))
610 return libbpf_err(-EINVAL);
612 memset(&attr, 0, sizeof(attr));
613 attr.target_fd = target_fd;
614 attr.attach_bpf_fd = prog_fd;
615 attr.attach_type = type;
616 attr.attach_flags = OPTS_GET(opts, flags, 0);
617 attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
619 ret = sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
620 return libbpf_err_errno(ret);
623 __attribute__((alias("bpf_prog_attach_opts")))
624 int bpf_prog_attach_xattr(int prog_fd, int target_fd,
625 enum bpf_attach_type type,
626 const struct bpf_prog_attach_opts *opts);
628 int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
633 memset(&attr, 0, sizeof(attr));
634 attr.target_fd = target_fd;
635 attr.attach_type = type;
637 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
638 return libbpf_err_errno(ret);
641 int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
646 memset(&attr, 0, sizeof(attr));
647 attr.target_fd = target_fd;
648 attr.attach_bpf_fd = prog_fd;
649 attr.attach_type = type;
651 ret = sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
652 return libbpf_err_errno(ret);
655 int bpf_link_create(int prog_fd, int target_fd,
656 enum bpf_attach_type attach_type,
657 const struct bpf_link_create_opts *opts)
659 __u32 target_btf_id, iter_info_len;
663 if (!OPTS_VALID(opts, bpf_link_create_opts))
664 return libbpf_err(-EINVAL);
666 iter_info_len = OPTS_GET(opts, iter_info_len, 0);
667 target_btf_id = OPTS_GET(opts, target_btf_id, 0);
669 /* validate we don't have unexpected combinations of non-zero fields */
670 if (iter_info_len || target_btf_id) {
671 if (iter_info_len && target_btf_id)
672 return libbpf_err(-EINVAL);
673 if (!OPTS_ZEROED(opts, target_btf_id))
674 return libbpf_err(-EINVAL);
677 memset(&attr, 0, sizeof(attr));
678 attr.link_create.prog_fd = prog_fd;
679 attr.link_create.target_fd = target_fd;
680 attr.link_create.attach_type = attach_type;
681 attr.link_create.flags = OPTS_GET(opts, flags, 0);
684 attr.link_create.target_btf_id = target_btf_id;
688 switch (attach_type) {
690 attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
691 attr.link_create.iter_info_len = iter_info_len;
694 attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
695 if (!OPTS_ZEROED(opts, perf_event))
696 return libbpf_err(-EINVAL);
698 case BPF_TRACE_KPROBE_MULTI:
699 attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
700 attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
701 attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
702 attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0));
703 attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0));
704 if (!OPTS_ZEROED(opts, kprobe_multi))
705 return libbpf_err(-EINVAL);
707 case BPF_TRACE_FENTRY:
708 case BPF_TRACE_FEXIT:
709 case BPF_MODIFY_RETURN:
711 attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
712 if (!OPTS_ZEROED(opts, tracing))
713 return libbpf_err(-EINVAL);
716 if (!OPTS_ZEROED(opts, flags))
717 return libbpf_err(-EINVAL);
721 fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
724 /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
725 * and other similar programs
729 return libbpf_err(err);
731 /* if user used features not supported by
732 * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
734 if (attr.link_create.target_fd || attr.link_create.target_btf_id)
735 return libbpf_err(err);
736 if (!OPTS_ZEROED(opts, sz))
737 return libbpf_err(err);
739 /* otherwise, for few select kinds of programs that can be
740 * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
741 * a fallback for older kernels
743 switch (attach_type) {
744 case BPF_TRACE_RAW_TP:
746 case BPF_TRACE_FENTRY:
747 case BPF_TRACE_FEXIT:
748 case BPF_MODIFY_RETURN:
749 return bpf_raw_tracepoint_open(NULL, prog_fd);
751 return libbpf_err(err);
755 int bpf_link_detach(int link_fd)
760 memset(&attr, 0, sizeof(attr));
761 attr.link_detach.link_fd = link_fd;
763 ret = sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr));
764 return libbpf_err_errno(ret);
767 int bpf_link_update(int link_fd, int new_prog_fd,
768 const struct bpf_link_update_opts *opts)
773 if (!OPTS_VALID(opts, bpf_link_update_opts))
774 return libbpf_err(-EINVAL);
776 memset(&attr, 0, sizeof(attr));
777 attr.link_update.link_fd = link_fd;
778 attr.link_update.new_prog_fd = new_prog_fd;
779 attr.link_update.flags = OPTS_GET(opts, flags, 0);
780 attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
782 ret = sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
783 return libbpf_err_errno(ret);
786 int bpf_iter_create(int link_fd)
791 memset(&attr, 0, sizeof(attr));
792 attr.iter_create.link_fd = link_fd;
794 fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, sizeof(attr));
795 return libbpf_err_errno(fd);
798 int bpf_prog_query_opts(int target_fd,
799 enum bpf_attach_type type,
800 struct bpf_prog_query_opts *opts)
805 if (!OPTS_VALID(opts, bpf_prog_query_opts))
806 return libbpf_err(-EINVAL);
808 memset(&attr, 0, sizeof(attr));
810 attr.query.target_fd = target_fd;
811 attr.query.attach_type = type;
812 attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
813 attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
814 attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
815 attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
817 ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
819 OPTS_SET(opts, attach_flags, attr.query.attach_flags);
820 OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
822 return libbpf_err_errno(ret);
825 int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
826 __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
828 LIBBPF_OPTS(bpf_prog_query_opts, opts);
831 opts.query_flags = query_flags;
832 opts.prog_ids = prog_ids;
833 opts.prog_cnt = *prog_cnt;
835 ret = bpf_prog_query_opts(target_fd, type, &opts);
838 *attach_flags = opts.attach_flags;
839 *prog_cnt = opts.prog_cnt;
841 return libbpf_err_errno(ret);
844 int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
849 if (!OPTS_VALID(opts, bpf_test_run_opts))
850 return libbpf_err(-EINVAL);
852 memset(&attr, 0, sizeof(attr));
853 attr.test.prog_fd = prog_fd;
854 attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
855 attr.test.cpu = OPTS_GET(opts, cpu, 0);
856 attr.test.flags = OPTS_GET(opts, flags, 0);
857 attr.test.repeat = OPTS_GET(opts, repeat, 0);
858 attr.test.duration = OPTS_GET(opts, duration, 0);
859 attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
860 attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
861 attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
862 attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
863 attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
864 attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
865 attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
866 attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
868 ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
870 OPTS_SET(opts, data_size_out, attr.test.data_size_out);
871 OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
872 OPTS_SET(opts, duration, attr.test.duration);
873 OPTS_SET(opts, retval, attr.test.retval);
875 return libbpf_err_errno(ret);
878 static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
883 memset(&attr, 0, sizeof(attr));
884 attr.start_id = start_id;
886 err = sys_bpf(cmd, &attr, sizeof(attr));
888 *next_id = attr.next_id;
890 return libbpf_err_errno(err);
893 int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
895 return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
898 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
900 return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
903 int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
905 return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
908 int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
910 return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
913 int bpf_prog_get_fd_by_id(__u32 id)
918 memset(&attr, 0, sizeof(attr));
921 fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
922 return libbpf_err_errno(fd);
925 int bpf_map_get_fd_by_id(__u32 id)
930 memset(&attr, 0, sizeof(attr));
933 fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
934 return libbpf_err_errno(fd);
937 int bpf_btf_get_fd_by_id(__u32 id)
942 memset(&attr, 0, sizeof(attr));
945 fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
946 return libbpf_err_errno(fd);
949 int bpf_link_get_fd_by_id(__u32 id)
954 memset(&attr, 0, sizeof(attr));
957 fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, sizeof(attr));
958 return libbpf_err_errno(fd);
961 int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
966 memset(&attr, 0, sizeof(attr));
967 attr.info.bpf_fd = bpf_fd;
968 attr.info.info_len = *info_len;
969 attr.info.info = ptr_to_u64(info);
971 err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
974 *info_len = attr.info.info_len;
976 return libbpf_err_errno(err);
979 int bpf_raw_tracepoint_open(const char *name, int prog_fd)
984 memset(&attr, 0, sizeof(attr));
985 attr.raw_tracepoint.name = ptr_to_u64(name);
986 attr.raw_tracepoint.prog_fd = prog_fd;
988 fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
989 return libbpf_err_errno(fd);
992 int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
994 const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
1001 bump_rlimit_memlock();
1003 memset(&attr, 0, attr_sz);
1005 if (!OPTS_VALID(opts, bpf_btf_load_opts))
1006 return libbpf_err(-EINVAL);
1008 log_buf = OPTS_GET(opts, log_buf, NULL);
1009 log_size = OPTS_GET(opts, log_size, 0);
1010 log_level = OPTS_GET(opts, log_level, 0);
1012 if (log_size > UINT_MAX)
1013 return libbpf_err(-EINVAL);
1014 if (log_size && !log_buf)
1015 return libbpf_err(-EINVAL);
1017 attr.btf = ptr_to_u64(btf_data);
1018 attr.btf_size = btf_size;
1019 /* log_level == 0 and log_buf != NULL means "try loading without
1020 * log_buf, but retry with log_buf and log_level=1 on error", which is
1021 * consistent across low-level and high-level BTF and program loading
1022 * APIs within libbpf and provides a sensible behavior in practice
1025 attr.btf_log_buf = ptr_to_u64(log_buf);
1026 attr.btf_log_size = (__u32)log_size;
1027 attr.btf_log_level = log_level;
1030 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1031 if (fd < 0 && log_buf && log_level == 0) {
1032 attr.btf_log_buf = ptr_to_u64(log_buf);
1033 attr.btf_log_size = (__u32)log_size;
1034 attr.btf_log_level = 1;
1035 fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
1037 return libbpf_err_errno(fd);
1040 int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
1041 __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
1044 union bpf_attr attr = {};
1047 attr.task_fd_query.pid = pid;
1048 attr.task_fd_query.fd = fd;
1049 attr.task_fd_query.flags = flags;
1050 attr.task_fd_query.buf = ptr_to_u64(buf);
1051 attr.task_fd_query.buf_len = *buf_len;
1053 err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
1055 *buf_len = attr.task_fd_query.buf_len;
1056 *prog_id = attr.task_fd_query.prog_id;
1057 *fd_type = attr.task_fd_query.fd_type;
1058 *probe_offset = attr.task_fd_query.probe_offset;
1059 *probe_addr = attr.task_fd_query.probe_addr;
1061 return libbpf_err_errno(err);
1064 int bpf_enable_stats(enum bpf_stats_type type)
1066 union bpf_attr attr;
1069 memset(&attr, 0, sizeof(attr));
1070 attr.enable_stats.type = type;
1072 fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, sizeof(attr));
1073 return libbpf_err_errno(fd);
1076 int bpf_prog_bind_map(int prog_fd, int map_fd,
1077 const struct bpf_prog_bind_opts *opts)
1079 union bpf_attr attr;
1082 if (!OPTS_VALID(opts, bpf_prog_bind_opts))
1083 return libbpf_err(-EINVAL);
1085 memset(&attr, 0, sizeof(attr));
1086 attr.prog_bind_map.prog_fd = prog_fd;
1087 attr.prog_bind_map.map_fd = map_fd;
1088 attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
1090 ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
1091 return libbpf_err_errno(ret);