1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * Common eBPF ELF object loading operations.
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
22 #include <asm/unistd.h>
23 #include <linux/err.h>
24 #include <linux/kernel.h>
25 #include <linux/bpf.h>
26 #include <linux/btf.h>
27 #include <linux/list.h>
28 #include <linux/limits.h>
29 #include <linux/perf_event.h>
30 #include <linux/ring_buffer.h>
32 #include <sys/types.h>
34 #include <tools/libc_compat.h>
41 #include "str_error.h"
48 #define BPF_FS_MAGIC 0xcafe4a11
51 #define __printf(a, b) __attribute__((format(printf, a, b)))
54 static int __base_pr(const char *format, ...)
59 va_start(args, format);
60 err = vfprintf(stderr, format, args);
65 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
66 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
67 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
69 #define __pr(func, fmt, ...) \
72 (func)("libbpf: " fmt, ##__VA_ARGS__); \
75 #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__)
76 #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__)
77 #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__)
79 void libbpf_set_print(libbpf_print_fn_t warn,
80 libbpf_print_fn_t info,
81 libbpf_print_fn_t debug)
88 #define STRERR_BUFSIZE 128
90 #define CHECK_ERR(action, err, out) do { \
97 /* Copied from tools/perf/util/util.h */
99 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
103 # define zclose(fd) ({ \
106 ___err = close((fd)); \
111 #ifdef HAVE_LIBELF_MMAP_SUPPORT
112 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
114 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
118 * bpf_prog should be a better name but it has been used in
122 /* Index in elf obj file, for relocation use. */
127 struct bpf_insn *insns;
128 size_t insns_cnt, main_prog_cnt;
129 enum bpf_prog_type type;
148 bpf_program_prep_t preprocessor;
150 struct bpf_object *obj;
152 bpf_program_clear_priv_t clear_priv;
154 enum bpf_attach_type expected_attach_type;
162 struct bpf_map_def def;
163 __u32 btf_key_type_id;
164 __u32 btf_value_type_id;
166 bpf_map_clear_priv_t clear_priv;
169 static LIST_HEAD(bpf_objects_list);
175 struct bpf_program *programs;
177 struct bpf_map *maps;
181 bool has_pseudo_calls;
184 * Information when doing elf related work. Only valid if fd
204 * All loaded bpf_object is linked in a list, which is
205 * hidden to caller. bpf_objects__<func> handlers deal with
208 struct list_head list;
213 bpf_object_clear_priv_t clear_priv;
217 #define obj_elf_valid(o) ((o)->efile.elf)
219 void bpf_program__unload(struct bpf_program *prog)
227 * If the object is opened but the program was never loaded,
228 * it is possible that prog->instances.nr == -1.
230 if (prog->instances.nr > 0) {
231 for (i = 0; i < prog->instances.nr; i++)
232 zclose(prog->instances.fds[i]);
233 } else if (prog->instances.nr != -1) {
234 pr_warning("Internal error: instances.nr is %d\n",
238 prog->instances.nr = -1;
239 zfree(&prog->instances.fds);
242 static void bpf_program__exit(struct bpf_program *prog)
247 if (prog->clear_priv)
248 prog->clear_priv(prog, prog->priv);
251 prog->clear_priv = NULL;
253 bpf_program__unload(prog);
255 zfree(&prog->section_name);
257 zfree(&prog->reloc_desc);
265 bpf_program__init(void *data, size_t size, char *section_name, int idx,
266 struct bpf_program *prog)
268 if (size < sizeof(struct bpf_insn)) {
269 pr_warning("corrupted section '%s'\n", section_name);
273 bzero(prog, sizeof(*prog));
275 prog->section_name = strdup(section_name);
276 if (!prog->section_name) {
277 pr_warning("failed to alloc name for prog under section(%d) %s\n",
282 prog->insns = malloc(size);
284 pr_warning("failed to alloc insns for prog under section %s\n",
288 prog->insns_cnt = size / sizeof(struct bpf_insn);
289 memcpy(prog->insns, data,
290 prog->insns_cnt * sizeof(struct bpf_insn));
292 prog->instances.fds = NULL;
293 prog->instances.nr = -1;
294 prog->type = BPF_PROG_TYPE_KPROBE;
298 bpf_program__exit(prog);
303 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
304 char *section_name, int idx)
306 struct bpf_program prog, *progs;
309 err = bpf_program__init(data, size, section_name, idx, &prog);
313 progs = obj->programs;
314 nr_progs = obj->nr_programs;
316 progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
319 * In this case the original obj->programs
320 * is still valid, so don't need special treat for
321 * bpf_close_object().
323 pr_warning("failed to alloc a new program under section '%s'\n",
325 bpf_program__exit(&prog);
329 pr_debug("found program %s\n", prog.section_name);
330 obj->programs = progs;
331 obj->nr_programs = nr_progs + 1;
333 progs[nr_progs] = prog;
338 bpf_object__init_prog_names(struct bpf_object *obj)
340 Elf_Data *symbols = obj->efile.symbols;
341 struct bpf_program *prog;
344 for (pi = 0; pi < obj->nr_programs; pi++) {
345 const char *name = NULL;
347 prog = &obj->programs[pi];
349 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
353 if (!gelf_getsym(symbols, si, &sym))
355 if (sym.st_shndx != prog->idx)
357 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
360 name = elf_strptr(obj->efile.elf,
361 obj->efile.strtabidx,
364 pr_warning("failed to get sym name string for prog %s\n",
366 return -LIBBPF_ERRNO__LIBELF;
370 if (!name && prog->idx == obj->efile.text_shndx)
374 pr_warning("failed to find sym for prog %s\n",
379 prog->name = strdup(name);
381 pr_warning("failed to allocate memory for prog sym %s\n",
390 static struct bpf_object *bpf_object__new(const char *path,
394 struct bpf_object *obj;
396 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
398 pr_warning("alloc memory failed for %s\n", path);
399 return ERR_PTR(-ENOMEM);
402 strcpy(obj->path, path);
406 * Caller of this function should also calls
407 * bpf_object__elf_finish() after data collection to return
408 * obj_buf to user. If not, we should duplicate the buffer to
409 * avoid user freeing them before elf finish.
411 obj->efile.obj_buf = obj_buf;
412 obj->efile.obj_buf_sz = obj_buf_sz;
413 obj->efile.maps_shndx = -1;
417 INIT_LIST_HEAD(&obj->list);
418 list_add(&obj->list, &bpf_objects_list);
422 static void bpf_object__elf_finish(struct bpf_object *obj)
424 if (!obj_elf_valid(obj))
427 if (obj->efile.elf) {
428 elf_end(obj->efile.elf);
429 obj->efile.elf = NULL;
431 obj->efile.symbols = NULL;
433 zfree(&obj->efile.reloc);
434 obj->efile.nr_reloc = 0;
435 zclose(obj->efile.fd);
436 obj->efile.obj_buf = NULL;
437 obj->efile.obj_buf_sz = 0;
440 static int bpf_object__elf_init(struct bpf_object *obj)
445 if (obj_elf_valid(obj)) {
446 pr_warning("elf init: internal error\n");
447 return -LIBBPF_ERRNO__LIBELF;
450 if (obj->efile.obj_buf_sz > 0) {
452 * obj_buf should have been validated by
453 * bpf_object__open_buffer().
455 obj->efile.elf = elf_memory(obj->efile.obj_buf,
456 obj->efile.obj_buf_sz);
458 obj->efile.fd = open(obj->path, O_RDONLY);
459 if (obj->efile.fd < 0) {
460 char errmsg[STRERR_BUFSIZE];
461 char *cp = libbpf_strerror_r(errno, errmsg,
464 pr_warning("failed to open %s: %s\n", obj->path, cp);
468 obj->efile.elf = elf_begin(obj->efile.fd,
469 LIBBPF_ELF_C_READ_MMAP,
473 if (!obj->efile.elf) {
474 pr_warning("failed to open %s as ELF file\n",
476 err = -LIBBPF_ERRNO__LIBELF;
480 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
481 pr_warning("failed to get EHDR from %s\n",
483 err = -LIBBPF_ERRNO__FORMAT;
486 ep = &obj->efile.ehdr;
488 /* Old LLVM set e_machine to EM_NONE */
489 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
490 pr_warning("%s is not an eBPF object file\n",
492 err = -LIBBPF_ERRNO__FORMAT;
498 bpf_object__elf_finish(obj);
503 bpf_object__check_endianness(struct bpf_object *obj)
505 static unsigned int const endian = 1;
507 switch (obj->efile.ehdr.e_ident[EI_DATA]) {
509 /* We are big endian, BPF obj is little endian. */
510 if (*(unsigned char const *)&endian != 1)
515 /* We are little endian, BPF obj is big endian. */
516 if (*(unsigned char const *)&endian != 0)
520 return -LIBBPF_ERRNO__ENDIAN;
526 pr_warning("Error: endianness mismatch.\n");
527 return -LIBBPF_ERRNO__ENDIAN;
531 bpf_object__init_license(struct bpf_object *obj,
532 void *data, size_t size)
534 memcpy(obj->license, data,
535 min(size, sizeof(obj->license) - 1));
536 pr_debug("license of %s is %s\n", obj->path, obj->license);
541 bpf_object__init_kversion(struct bpf_object *obj,
542 void *data, size_t size)
546 if (size != sizeof(kver)) {
547 pr_warning("invalid kver section in %s\n", obj->path);
548 return -LIBBPF_ERRNO__FORMAT;
550 memcpy(&kver, data, sizeof(kver));
551 obj->kern_version = kver;
552 pr_debug("kernel version of %s is %x\n", obj->path,
557 static int compare_bpf_map(const void *_a, const void *_b)
559 const struct bpf_map *a = _a;
560 const struct bpf_map *b = _b;
562 return a->offset - b->offset;
566 bpf_object__init_maps(struct bpf_object *obj, int flags)
568 bool strict = !(flags & MAPS_RELAX_COMPAT);
569 int i, map_idx, map_def_sz, nr_maps = 0;
572 Elf_Data *symbols = obj->efile.symbols;
574 if (obj->efile.maps_shndx < 0)
579 scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
581 data = elf_getdata(scn, NULL);
583 pr_warning("failed to get Elf_Data from map section %d\n",
584 obj->efile.maps_shndx);
589 * Count number of maps. Each map has a name.
590 * Array of maps is not supported: only the first element is
593 * TODO: Detect array of map and report error.
595 for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
598 if (!gelf_getsym(symbols, i, &sym))
600 if (sym.st_shndx != obj->efile.maps_shndx)
605 /* Alloc obj->maps and fill nr_maps. */
606 pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
607 nr_maps, data->d_size);
612 /* Assume equally sized map definitions */
613 map_def_sz = data->d_size / nr_maps;
614 if (!data->d_size || (data->d_size % nr_maps) != 0) {
615 pr_warning("unable to determine map definition size "
616 "section %s, %d maps in %zd bytes\n",
617 obj->path, nr_maps, data->d_size);
621 obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
623 pr_warning("alloc maps for object failed\n");
626 obj->nr_maps = nr_maps;
629 * fill all fd with -1 so won't close incorrect
630 * fd (fd=0 is stdin) when failure (zclose won't close
633 for (i = 0; i < nr_maps; i++)
634 obj->maps[i].fd = -1;
637 * Fill obj->maps using data in "maps" section.
639 for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
641 const char *map_name;
642 struct bpf_map_def *def;
644 if (!gelf_getsym(symbols, i, &sym))
646 if (sym.st_shndx != obj->efile.maps_shndx)
649 map_name = elf_strptr(obj->efile.elf,
650 obj->efile.strtabidx,
652 obj->maps[map_idx].offset = sym.st_value;
653 if (sym.st_value + map_def_sz > data->d_size) {
654 pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
655 obj->path, map_name);
659 obj->maps[map_idx].name = strdup(map_name);
660 if (!obj->maps[map_idx].name) {
661 pr_warning("failed to alloc map name\n");
664 pr_debug("map %d is \"%s\"\n", map_idx,
665 obj->maps[map_idx].name);
666 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
668 * If the definition of the map in the object file fits in
669 * bpf_map_def, copy it. Any extra fields in our version
670 * of bpf_map_def will default to zero as a result of the
673 if (map_def_sz <= sizeof(struct bpf_map_def)) {
674 memcpy(&obj->maps[map_idx].def, def, map_def_sz);
677 * Here the map structure being read is bigger than what
678 * we expect, truncate if the excess bits are all zero.
679 * If they are not zero, reject this map as
683 for (b = ((char *)def) + sizeof(struct bpf_map_def);
684 b < ((char *)def) + map_def_sz; b++) {
686 pr_warning("maps section in %s: \"%s\" "
687 "has unrecognized, non-zero "
689 obj->path, map_name);
694 memcpy(&obj->maps[map_idx].def, def,
695 sizeof(struct bpf_map_def));
700 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
704 static bool section_have_execinstr(struct bpf_object *obj, int idx)
709 scn = elf_getscn(obj->efile.elf, idx);
713 if (gelf_getshdr(scn, &sh) != &sh)
716 if (sh.sh_flags & SHF_EXECINSTR)
722 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
724 Elf *elf = obj->efile.elf;
725 GElf_Ehdr *ep = &obj->efile.ehdr;
727 int idx = 0, err = 0;
729 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
730 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
731 pr_warning("failed to get e_shstrndx from %s\n",
733 return -LIBBPF_ERRNO__FORMAT;
736 while ((scn = elf_nextscn(elf, scn)) != NULL) {
742 if (gelf_getshdr(scn, &sh) != &sh) {
743 pr_warning("failed to get section(%d) header from %s\n",
745 err = -LIBBPF_ERRNO__FORMAT;
749 name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
751 pr_warning("failed to get section(%d) name from %s\n",
753 err = -LIBBPF_ERRNO__FORMAT;
757 data = elf_getdata(scn, 0);
759 pr_warning("failed to get section(%d) data from %s(%s)\n",
760 idx, name, obj->path);
761 err = -LIBBPF_ERRNO__FORMAT;
764 pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
765 idx, name, (unsigned long)data->d_size,
766 (int)sh.sh_link, (unsigned long)sh.sh_flags,
769 if (strcmp(name, "license") == 0)
770 err = bpf_object__init_license(obj,
773 else if (strcmp(name, "version") == 0)
774 err = bpf_object__init_kversion(obj,
777 else if (strcmp(name, "maps") == 0)
778 obj->efile.maps_shndx = idx;
779 else if (strcmp(name, BTF_ELF_SEC) == 0) {
780 obj->btf = btf__new(data->d_buf, data->d_size,
782 if (IS_ERR(obj->btf)) {
783 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
784 BTF_ELF_SEC, PTR_ERR(obj->btf));
787 } else if (sh.sh_type == SHT_SYMTAB) {
788 if (obj->efile.symbols) {
789 pr_warning("bpf: multiple SYMTAB in %s\n",
791 err = -LIBBPF_ERRNO__FORMAT;
793 obj->efile.symbols = data;
794 obj->efile.strtabidx = sh.sh_link;
796 } else if ((sh.sh_type == SHT_PROGBITS) &&
797 (sh.sh_flags & SHF_EXECINSTR) &&
798 (data->d_size > 0)) {
799 if (strcmp(name, ".text") == 0)
800 obj->efile.text_shndx = idx;
801 err = bpf_object__add_program(obj, data->d_buf,
802 data->d_size, name, idx);
804 char errmsg[STRERR_BUFSIZE];
805 char *cp = libbpf_strerror_r(-err, errmsg,
808 pr_warning("failed to alloc program %s (%s): %s",
809 name, obj->path, cp);
811 } else if (sh.sh_type == SHT_REL) {
812 void *reloc = obj->efile.reloc;
813 int nr_reloc = obj->efile.nr_reloc + 1;
814 int sec = sh.sh_info; /* points to other section */
816 /* Only do relo for section with exec instructions */
817 if (!section_have_execinstr(obj, sec)) {
818 pr_debug("skip relo %s(%d) for section(%d)\n",
823 reloc = reallocarray(reloc, nr_reloc,
824 sizeof(*obj->efile.reloc));
826 pr_warning("realloc failed\n");
829 int n = nr_reloc - 1;
831 obj->efile.reloc = reloc;
832 obj->efile.nr_reloc = nr_reloc;
834 obj->efile.reloc[n].shdr = sh;
835 obj->efile.reloc[n].data = data;
838 pr_debug("skip section(%d) %s\n", idx, name);
844 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
845 pr_warning("Corrupted ELF file: index of strtab invalid\n");
846 return LIBBPF_ERRNO__FORMAT;
848 if (obj->efile.maps_shndx >= 0) {
849 err = bpf_object__init_maps(obj, flags);
853 err = bpf_object__init_prog_names(obj);
858 static struct bpf_program *
859 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
861 struct bpf_program *prog;
864 for (i = 0; i < obj->nr_programs; i++) {
865 prog = &obj->programs[i];
866 if (prog->idx == idx)
873 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
875 struct bpf_program *pos;
877 bpf_object__for_each_program(pos, obj) {
878 if (pos->section_name && !strcmp(pos->section_name, title))
885 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
886 Elf_Data *data, struct bpf_object *obj)
888 Elf_Data *symbols = obj->efile.symbols;
889 int text_shndx = obj->efile.text_shndx;
890 int maps_shndx = obj->efile.maps_shndx;
891 struct bpf_map *maps = obj->maps;
892 size_t nr_maps = obj->nr_maps;
895 pr_debug("collecting relocating info for: '%s'\n",
897 nrels = shdr->sh_size / shdr->sh_entsize;
899 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
900 if (!prog->reloc_desc) {
901 pr_warning("failed to alloc memory in relocation\n");
904 prog->nr_reloc = nrels;
906 for (i = 0; i < nrels; i++) {
909 unsigned int insn_idx;
910 struct bpf_insn *insns = prog->insns;
913 if (!gelf_getrel(data, i, &rel)) {
914 pr_warning("relocation: failed to get %d reloc\n", i);
915 return -LIBBPF_ERRNO__FORMAT;
918 if (!gelf_getsym(symbols,
919 GELF_R_SYM(rel.r_info),
921 pr_warning("relocation: symbol %"PRIx64" not found\n",
922 GELF_R_SYM(rel.r_info));
923 return -LIBBPF_ERRNO__FORMAT;
925 pr_debug("relo for %lld value %lld name %d\n",
926 (long long) (rel.r_info >> 32),
927 (long long) sym.st_value, sym.st_name);
929 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
930 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
931 prog->section_name, sym.st_shndx);
932 return -LIBBPF_ERRNO__RELOC;
935 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
936 pr_debug("relocation: insn_idx=%u\n", insn_idx);
938 if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
939 if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
940 pr_warning("incorrect bpf_call opcode\n");
941 return -LIBBPF_ERRNO__RELOC;
943 prog->reloc_desc[i].type = RELO_CALL;
944 prog->reloc_desc[i].insn_idx = insn_idx;
945 prog->reloc_desc[i].text_off = sym.st_value;
946 obj->has_pseudo_calls = true;
950 if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
951 pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
952 insn_idx, insns[insn_idx].code);
953 return -LIBBPF_ERRNO__RELOC;
956 /* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
957 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
958 if (maps[map_idx].offset == sym.st_value) {
959 pr_debug("relocation: find map %zd (%s) for insn %u\n",
960 map_idx, maps[map_idx].name, insn_idx);
965 if (map_idx >= nr_maps) {
966 pr_warning("bpf relocation: map_idx %d large than %d\n",
967 (int)map_idx, (int)nr_maps - 1);
968 return -LIBBPF_ERRNO__RELOC;
971 prog->reloc_desc[i].type = RELO_LD64;
972 prog->reloc_desc[i].insn_idx = insn_idx;
973 prog->reloc_desc[i].map_idx = map_idx;
978 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
980 const struct btf_type *container_type;
981 const struct btf_member *key, *value;
982 struct bpf_map_def *def = &map->def;
983 const size_t max_name = 256;
984 char container_name[max_name];
985 __s64 key_size, value_size;
988 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
990 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
991 map->name, map->name);
995 container_id = btf__find_by_name(btf, container_name);
996 if (container_id < 0) {
997 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
998 map->name, container_name);
1002 container_type = btf__type_by_id(btf, container_id);
1003 if (!container_type) {
1004 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1005 map->name, container_id);
1009 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1010 BTF_INFO_VLEN(container_type->info) < 2) {
1011 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1012 map->name, container_name);
1016 key = (struct btf_member *)(container_type + 1);
1019 key_size = btf__resolve_size(btf, key->type);
1021 pr_warning("map:%s invalid BTF key_type_size\n",
1026 if (def->key_size != key_size) {
1027 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1028 map->name, (__u32)key_size, def->key_size);
1032 value_size = btf__resolve_size(btf, value->type);
1033 if (value_size < 0) {
1034 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1038 if (def->value_size != value_size) {
1039 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1040 map->name, (__u32)value_size, def->value_size);
1044 map->btf_key_type_id = key->type;
1045 map->btf_value_type_id = value->type;
1050 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1052 struct bpf_map_info info = {};
1053 __u32 len = sizeof(info);
1057 err = bpf_obj_get_info_by_fd(fd, &info, &len);
1061 new_name = strdup(info.name);
1065 new_fd = open("/", O_RDONLY | O_CLOEXEC);
1067 goto err_free_new_name;
1069 new_fd = dup3(fd, new_fd, O_CLOEXEC);
1071 goto err_close_new_fd;
1073 err = zclose(map->fd);
1075 goto err_close_new_fd;
1079 map->name = new_name;
1080 map->def.type = info.type;
1081 map->def.key_size = info.key_size;
1082 map->def.value_size = info.value_size;
1083 map->def.max_entries = info.max_entries;
1084 map->def.map_flags = info.map_flags;
1085 map->btf_key_type_id = info.btf_key_type_id;
1086 map->btf_value_type_id = info.btf_value_type_id;
1098 bpf_object__create_maps(struct bpf_object *obj)
1100 struct bpf_create_map_attr create_attr = {};
1104 for (i = 0; i < obj->nr_maps; i++) {
1105 struct bpf_map *map = &obj->maps[i];
1106 struct bpf_map_def *def = &map->def;
1107 char *cp, errmsg[STRERR_BUFSIZE];
1108 int *pfd = &map->fd;
1111 pr_debug("skip map create (preset) %s: fd=%d\n",
1112 map->name, map->fd);
1116 create_attr.name = map->name;
1117 create_attr.map_ifindex = map->map_ifindex;
1118 create_attr.map_type = def->type;
1119 create_attr.map_flags = def->map_flags;
1120 create_attr.key_size = def->key_size;
1121 create_attr.value_size = def->value_size;
1122 create_attr.max_entries = def->max_entries;
1123 create_attr.btf_fd = 0;
1124 create_attr.btf_key_type_id = 0;
1125 create_attr.btf_value_type_id = 0;
1127 if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1128 create_attr.btf_fd = btf__fd(obj->btf);
1129 create_attr.btf_key_type_id = map->btf_key_type_id;
1130 create_attr.btf_value_type_id = map->btf_value_type_id;
1133 *pfd = bpf_create_map_xattr(&create_attr);
1134 if (*pfd < 0 && create_attr.btf_key_type_id) {
1135 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1136 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1137 map->name, cp, errno);
1138 create_attr.btf_fd = 0;
1139 create_attr.btf_key_type_id = 0;
1140 create_attr.btf_value_type_id = 0;
1141 map->btf_key_type_id = 0;
1142 map->btf_value_type_id = 0;
1143 *pfd = bpf_create_map_xattr(&create_attr);
1150 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1151 pr_warning("failed to create map (name: '%s'): %s\n",
1153 for (j = 0; j < i; j++)
1154 zclose(obj->maps[j].fd);
1157 pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1164 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1165 struct reloc_desc *relo)
1167 struct bpf_insn *insn, *new_insn;
1168 struct bpf_program *text;
1171 if (relo->type != RELO_CALL)
1172 return -LIBBPF_ERRNO__RELOC;
1174 if (prog->idx == obj->efile.text_shndx) {
1175 pr_warning("relo in .text insn %d into off %d\n",
1176 relo->insn_idx, relo->text_off);
1177 return -LIBBPF_ERRNO__RELOC;
1180 if (prog->main_prog_cnt == 0) {
1181 text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1183 pr_warning("no .text section found yet relo into text exist\n");
1184 return -LIBBPF_ERRNO__RELOC;
1186 new_cnt = prog->insns_cnt + text->insns_cnt;
1187 new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1189 pr_warning("oom in prog realloc\n");
1192 memcpy(new_insn + prog->insns_cnt, text->insns,
1193 text->insns_cnt * sizeof(*insn));
1194 prog->insns = new_insn;
1195 prog->main_prog_cnt = prog->insns_cnt;
1196 prog->insns_cnt = new_cnt;
1197 pr_debug("added %zd insn from %s to prog %s\n",
1198 text->insns_cnt, text->section_name,
1199 prog->section_name);
1201 insn = &prog->insns[relo->insn_idx];
1202 insn->imm += prog->main_prog_cnt - relo->insn_idx;
1207 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1211 if (!prog || !prog->reloc_desc)
1214 for (i = 0; i < prog->nr_reloc; i++) {
1215 if (prog->reloc_desc[i].type == RELO_LD64) {
1216 struct bpf_insn *insns = prog->insns;
1217 int insn_idx, map_idx;
1219 insn_idx = prog->reloc_desc[i].insn_idx;
1220 map_idx = prog->reloc_desc[i].map_idx;
1222 if (insn_idx >= (int)prog->insns_cnt) {
1223 pr_warning("relocation out of range: '%s'\n",
1224 prog->section_name);
1225 return -LIBBPF_ERRNO__RELOC;
1227 insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1228 insns[insn_idx].imm = obj->maps[map_idx].fd;
1230 err = bpf_program__reloc_text(prog, obj,
1231 &prog->reloc_desc[i]);
1237 zfree(&prog->reloc_desc);
1244 bpf_object__relocate(struct bpf_object *obj)
1246 struct bpf_program *prog;
1250 for (i = 0; i < obj->nr_programs; i++) {
1251 prog = &obj->programs[i];
1253 err = bpf_program__relocate(prog, obj);
1255 pr_warning("failed to relocate '%s'\n",
1256 prog->section_name);
1263 static int bpf_object__collect_reloc(struct bpf_object *obj)
1267 if (!obj_elf_valid(obj)) {
1268 pr_warning("Internal error: elf object is closed\n");
1269 return -LIBBPF_ERRNO__INTERNAL;
1272 for (i = 0; i < obj->efile.nr_reloc; i++) {
1273 GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1274 Elf_Data *data = obj->efile.reloc[i].data;
1275 int idx = shdr->sh_info;
1276 struct bpf_program *prog;
1278 if (shdr->sh_type != SHT_REL) {
1279 pr_warning("internal error at %d\n", __LINE__);
1280 return -LIBBPF_ERRNO__INTERNAL;
1283 prog = bpf_object__find_prog_by_idx(obj, idx);
1285 pr_warning("relocation failed: no section(%d)\n", idx);
1286 return -LIBBPF_ERRNO__RELOC;
1289 err = bpf_program__collect_reloc(prog,
1299 load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
1300 const char *name, struct bpf_insn *insns, int insns_cnt,
1301 char *license, __u32 kern_version, int *pfd, int prog_ifindex)
1303 struct bpf_load_program_attr load_attr;
1304 char *cp, errmsg[STRERR_BUFSIZE];
1308 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1309 load_attr.prog_type = type;
1310 load_attr.expected_attach_type = expected_attach_type;
1311 load_attr.name = name;
1312 load_attr.insns = insns;
1313 load_attr.insns_cnt = insns_cnt;
1314 load_attr.license = license;
1315 load_attr.kern_version = kern_version;
1316 load_attr.prog_ifindex = prog_ifindex;
1318 if (!load_attr.insns || !load_attr.insns_cnt)
1321 log_buf = malloc(BPF_LOG_BUF_SIZE);
1323 pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1325 ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1333 ret = -LIBBPF_ERRNO__LOAD;
1334 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1335 pr_warning("load bpf program failed: %s\n", cp);
1337 if (log_buf && log_buf[0] != '\0') {
1338 ret = -LIBBPF_ERRNO__VERIFY;
1339 pr_warning("-- BEGIN DUMP LOG ---\n");
1340 pr_warning("\n%s\n", log_buf);
1341 pr_warning("-- END LOG --\n");
1342 } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1343 pr_warning("Program too large (%zu insns), at most %d insns\n",
1344 load_attr.insns_cnt, BPF_MAXINSNS);
1345 ret = -LIBBPF_ERRNO__PROG2BIG;
1347 /* Wrong program type? */
1348 if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1351 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1352 load_attr.expected_attach_type = 0;
1353 fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1356 ret = -LIBBPF_ERRNO__PROGTYPE;
1362 ret = -LIBBPF_ERRNO__KVER;
1371 bpf_program__load(struct bpf_program *prog,
1372 char *license, __u32 kern_version)
1376 if (prog->instances.nr < 0 || !prog->instances.fds) {
1377 if (prog->preprocessor) {
1378 pr_warning("Internal error: can't load program '%s'\n",
1379 prog->section_name);
1380 return -LIBBPF_ERRNO__INTERNAL;
1383 prog->instances.fds = malloc(sizeof(int));
1384 if (!prog->instances.fds) {
1385 pr_warning("Not enough memory for BPF fds\n");
1388 prog->instances.nr = 1;
1389 prog->instances.fds[0] = -1;
1392 if (!prog->preprocessor) {
1393 if (prog->instances.nr != 1) {
1394 pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1395 prog->section_name, prog->instances.nr);
1397 err = load_program(prog->type, prog->expected_attach_type,
1398 prog->name, prog->insns, prog->insns_cnt,
1399 license, kern_version, &fd,
1400 prog->prog_ifindex);
1402 prog->instances.fds[0] = fd;
1406 for (i = 0; i < prog->instances.nr; i++) {
1407 struct bpf_prog_prep_result result;
1408 bpf_program_prep_t preprocessor = prog->preprocessor;
1410 bzero(&result, sizeof(result));
1411 err = preprocessor(prog, i, prog->insns,
1412 prog->insns_cnt, &result);
1414 pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1415 i, prog->section_name);
1419 if (!result.new_insn_ptr || !result.new_insn_cnt) {
1420 pr_debug("Skip loading the %dth instance of program '%s'\n",
1421 i, prog->section_name);
1422 prog->instances.fds[i] = -1;
1428 err = load_program(prog->type, prog->expected_attach_type,
1429 prog->name, result.new_insn_ptr,
1430 result.new_insn_cnt,
1431 license, kern_version, &fd,
1432 prog->prog_ifindex);
1435 pr_warning("Loading the %dth instance of program '%s' failed\n",
1436 i, prog->section_name);
1442 prog->instances.fds[i] = fd;
1446 pr_warning("failed to load program '%s'\n",
1447 prog->section_name);
1448 zfree(&prog->insns);
1449 prog->insns_cnt = 0;
1453 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1454 struct bpf_object *obj)
1456 return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1460 bpf_object__load_progs(struct bpf_object *obj)
1465 for (i = 0; i < obj->nr_programs; i++) {
1466 if (bpf_program__is_function_storage(&obj->programs[i], obj))
1468 err = bpf_program__load(&obj->programs[i],
1477 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1480 case BPF_PROG_TYPE_SOCKET_FILTER:
1481 case BPF_PROG_TYPE_SCHED_CLS:
1482 case BPF_PROG_TYPE_SCHED_ACT:
1483 case BPF_PROG_TYPE_XDP:
1484 case BPF_PROG_TYPE_CGROUP_SKB:
1485 case BPF_PROG_TYPE_CGROUP_SOCK:
1486 case BPF_PROG_TYPE_LWT_IN:
1487 case BPF_PROG_TYPE_LWT_OUT:
1488 case BPF_PROG_TYPE_LWT_XMIT:
1489 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1490 case BPF_PROG_TYPE_SOCK_OPS:
1491 case BPF_PROG_TYPE_SK_SKB:
1492 case BPF_PROG_TYPE_CGROUP_DEVICE:
1493 case BPF_PROG_TYPE_SK_MSG:
1494 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1495 case BPF_PROG_TYPE_LIRC_MODE2:
1496 case BPF_PROG_TYPE_SK_REUSEPORT:
1497 case BPF_PROG_TYPE_FLOW_DISSECTOR:
1499 case BPF_PROG_TYPE_UNSPEC:
1500 case BPF_PROG_TYPE_KPROBE:
1501 case BPF_PROG_TYPE_TRACEPOINT:
1502 case BPF_PROG_TYPE_PERF_EVENT:
1503 case BPF_PROG_TYPE_RAW_TRACEPOINT:
1509 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1511 if (needs_kver && obj->kern_version == 0) {
1512 pr_warning("%s doesn't provide kernel version\n",
1514 return -LIBBPF_ERRNO__KVERSION;
1519 static struct bpf_object *
1520 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1521 bool needs_kver, int flags)
1523 struct bpf_object *obj;
1526 if (elf_version(EV_CURRENT) == EV_NONE) {
1527 pr_warning("failed to init libelf for %s\n", path);
1528 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1531 obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1535 CHECK_ERR(bpf_object__elf_init(obj), err, out);
1536 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1537 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1538 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1539 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1541 bpf_object__elf_finish(obj);
1544 bpf_object__close(obj);
1545 return ERR_PTR(err);
1548 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1551 /* param validation */
1555 pr_debug("loading %s\n", attr->file);
1557 return __bpf_object__open(attr->file, NULL, 0,
1558 bpf_prog_type__needs_kver(attr->prog_type),
1562 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1564 return __bpf_object__open_xattr(attr, 0);
1567 struct bpf_object *bpf_object__open(const char *path)
1569 struct bpf_object_open_attr attr = {
1571 .prog_type = BPF_PROG_TYPE_UNSPEC,
1574 return bpf_object__open_xattr(&attr);
1577 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1583 /* param validation */
1584 if (!obj_buf || obj_buf_sz <= 0)
1588 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1589 (unsigned long)obj_buf,
1590 (unsigned long)obj_buf_sz);
1591 tmp_name[sizeof(tmp_name) - 1] = '\0';
1594 pr_debug("loading object '%s' from buffer\n",
1597 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1600 int bpf_object__unload(struct bpf_object *obj)
1607 for (i = 0; i < obj->nr_maps; i++)
1608 zclose(obj->maps[i].fd);
1610 for (i = 0; i < obj->nr_programs; i++)
1611 bpf_program__unload(&obj->programs[i]);
1616 int bpf_object__load(struct bpf_object *obj)
1624 pr_warning("object should not be loaded twice\n");
1630 CHECK_ERR(bpf_object__create_maps(obj), err, out);
1631 CHECK_ERR(bpf_object__relocate(obj), err, out);
1632 CHECK_ERR(bpf_object__load_progs(obj), err, out);
1636 bpf_object__unload(obj);
1637 pr_warning("failed to load object '%s'\n", obj->path);
1641 static int check_path(const char *path)
1643 char *cp, errmsg[STRERR_BUFSIZE];
1644 struct statfs st_fs;
1651 dname = strdup(path);
1655 dir = dirname(dname);
1656 if (statfs(dir, &st_fs)) {
1657 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1658 pr_warning("failed to statfs %s: %s\n", dir, cp);
1663 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1664 pr_warning("specified path %s is not on BPF FS\n", path);
1671 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1674 char *cp, errmsg[STRERR_BUFSIZE];
1677 err = check_path(path);
1682 pr_warning("invalid program pointer\n");
1686 if (instance < 0 || instance >= prog->instances.nr) {
1687 pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1688 instance, prog->section_name, prog->instances.nr);
1692 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1693 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1694 pr_warning("failed to pin program: %s\n", cp);
1697 pr_debug("pinned program '%s'\n", path);
1702 static int make_dir(const char *path)
1704 char *cp, errmsg[STRERR_BUFSIZE];
1707 if (mkdir(path, 0700) && errno != EEXIST)
1711 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1712 pr_warning("failed to mkdir %s: %s\n", path, cp);
1717 int bpf_program__pin(struct bpf_program *prog, const char *path)
1721 err = check_path(path);
1726 pr_warning("invalid program pointer\n");
1730 if (prog->instances.nr <= 0) {
1731 pr_warning("no instances of prog %s to pin\n",
1732 prog->section_name);
1736 err = make_dir(path);
1740 for (i = 0; i < prog->instances.nr; i++) {
1744 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1747 else if (len >= PATH_MAX)
1748 return -ENAMETOOLONG;
1750 err = bpf_program__pin_instance(prog, buf, i);
1758 int bpf_map__pin(struct bpf_map *map, const char *path)
1760 char *cp, errmsg[STRERR_BUFSIZE];
1763 err = check_path(path);
1768 pr_warning("invalid map pointer\n");
1772 if (bpf_obj_pin(map->fd, path)) {
1773 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1774 pr_warning("failed to pin map: %s\n", cp);
1778 pr_debug("pinned map '%s'\n", path);
1782 int bpf_object__pin(struct bpf_object *obj, const char *path)
1784 struct bpf_program *prog;
1785 struct bpf_map *map;
1792 pr_warning("object not yet loaded; load it first\n");
1796 err = make_dir(path);
1800 bpf_map__for_each(map, obj) {
1804 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1805 bpf_map__name(map));
1808 else if (len >= PATH_MAX)
1809 return -ENAMETOOLONG;
1811 err = bpf_map__pin(map, buf);
1816 bpf_object__for_each_program(prog, obj) {
1820 len = snprintf(buf, PATH_MAX, "%s/%s", path,
1821 prog->section_name);
1824 else if (len >= PATH_MAX)
1825 return -ENAMETOOLONG;
1827 err = bpf_program__pin(prog, buf);
1835 void bpf_object__close(struct bpf_object *obj)
1842 if (obj->clear_priv)
1843 obj->clear_priv(obj, obj->priv);
1845 bpf_object__elf_finish(obj);
1846 bpf_object__unload(obj);
1847 btf__free(obj->btf);
1849 for (i = 0; i < obj->nr_maps; i++) {
1850 zfree(&obj->maps[i].name);
1851 if (obj->maps[i].clear_priv)
1852 obj->maps[i].clear_priv(&obj->maps[i],
1854 obj->maps[i].priv = NULL;
1855 obj->maps[i].clear_priv = NULL;
1860 if (obj->programs && obj->nr_programs) {
1861 for (i = 0; i < obj->nr_programs; i++)
1862 bpf_program__exit(&obj->programs[i]);
1864 zfree(&obj->programs);
1866 list_del(&obj->list);
1871 bpf_object__next(struct bpf_object *prev)
1873 struct bpf_object *next;
1876 next = list_first_entry(&bpf_objects_list,
1880 next = list_next_entry(prev, list);
1882 /* Empty list is noticed here so don't need checking on entry. */
1883 if (&next->list == &bpf_objects_list)
1889 const char *bpf_object__name(struct bpf_object *obj)
1891 return obj ? obj->path : ERR_PTR(-EINVAL);
1894 unsigned int bpf_object__kversion(struct bpf_object *obj)
1896 return obj ? obj->kern_version : 0;
1899 int bpf_object__btf_fd(const struct bpf_object *obj)
1901 return obj->btf ? btf__fd(obj->btf) : -1;
1904 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
1905 bpf_object_clear_priv_t clear_priv)
1907 if (obj->priv && obj->clear_priv)
1908 obj->clear_priv(obj, obj->priv);
1911 obj->clear_priv = clear_priv;
1915 void *bpf_object__priv(struct bpf_object *obj)
1917 return obj ? obj->priv : ERR_PTR(-EINVAL);
1920 static struct bpf_program *
1921 __bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1929 return &obj->programs[0];
1931 if (prev->obj != obj) {
1932 pr_warning("error: program handler doesn't match object\n");
1936 idx = (prev - obj->programs) + 1;
1937 if (idx >= obj->nr_programs)
1939 return &obj->programs[idx];
1942 struct bpf_program *
1943 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
1945 struct bpf_program *prog = prev;
1948 prog = __bpf_program__next(prog, obj);
1949 } while (prog && bpf_program__is_function_storage(prog, obj));
1954 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
1955 bpf_program_clear_priv_t clear_priv)
1957 if (prog->priv && prog->clear_priv)
1958 prog->clear_priv(prog, prog->priv);
1961 prog->clear_priv = clear_priv;
1965 void *bpf_program__priv(struct bpf_program *prog)
1967 return prog ? prog->priv : ERR_PTR(-EINVAL);
1970 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
1972 prog->prog_ifindex = ifindex;
1975 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
1979 title = prog->section_name;
1981 title = strdup(title);
1983 pr_warning("failed to strdup program title\n");
1984 return ERR_PTR(-ENOMEM);
1991 int bpf_program__fd(struct bpf_program *prog)
1993 return bpf_program__nth_fd(prog, 0);
1996 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
1997 bpf_program_prep_t prep)
2001 if (nr_instances <= 0 || !prep)
2004 if (prog->instances.nr > 0 || prog->instances.fds) {
2005 pr_warning("Can't set pre-processor after loading\n");
2009 instances_fds = malloc(sizeof(int) * nr_instances);
2010 if (!instances_fds) {
2011 pr_warning("alloc memory failed for fds\n");
2015 /* fill all fd with -1 */
2016 memset(instances_fds, -1, sizeof(int) * nr_instances);
2018 prog->instances.nr = nr_instances;
2019 prog->instances.fds = instances_fds;
2020 prog->preprocessor = prep;
2024 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2031 if (n >= prog->instances.nr || n < 0) {
2032 pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2033 n, prog->section_name, prog->instances.nr);
2037 fd = prog->instances.fds[n];
2039 pr_warning("%dth instance of program '%s' is invalid\n",
2040 n, prog->section_name);
2047 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2052 static bool bpf_program__is_type(struct bpf_program *prog,
2053 enum bpf_prog_type type)
2055 return prog ? (prog->type == type) : false;
2058 #define BPF_PROG_TYPE_FNS(NAME, TYPE) \
2059 int bpf_program__set_##NAME(struct bpf_program *prog) \
2063 bpf_program__set_type(prog, TYPE); \
2067 bool bpf_program__is_##NAME(struct bpf_program *prog) \
2069 return bpf_program__is_type(prog, TYPE); \
2072 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2073 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2074 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2075 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2076 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2077 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2078 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2079 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2081 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2082 enum bpf_attach_type type)
2084 prog->expected_attach_type = type;
2087 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2088 { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
2090 /* Programs that can NOT be attached. */
2091 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
2093 /* Programs that can be attached. */
2094 #define BPF_APROG_SEC(string, ptype, atype) \
2095 BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
2097 /* Programs that must specify expected attach type at load time. */
2098 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2099 BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
2101 /* Programs that can be attached but attach type can't be identified by section
2102 * name. Kept for backward compatibility.
2104 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2106 static const struct {
2109 enum bpf_prog_type prog_type;
2110 enum bpf_attach_type expected_attach_type;
2112 enum bpf_attach_type attach_type;
2113 } section_names[] = {
2114 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
2115 BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
2116 BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
2117 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
2118 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
2119 BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
2120 BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT),
2121 BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
2122 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
2123 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
2124 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
2125 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
2126 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
2127 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
2128 BPF_CGROUP_INET_INGRESS),
2129 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
2130 BPF_CGROUP_INET_EGRESS),
2131 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
2132 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
2133 BPF_CGROUP_INET_SOCK_CREATE),
2134 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
2135 BPF_CGROUP_INET4_POST_BIND),
2136 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
2137 BPF_CGROUP_INET6_POST_BIND),
2138 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
2140 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
2141 BPF_CGROUP_SOCK_OPS),
2142 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
2143 BPF_SK_SKB_STREAM_PARSER),
2144 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
2145 BPF_SK_SKB_STREAM_VERDICT),
2146 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
2147 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
2148 BPF_SK_MSG_VERDICT),
2149 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
2151 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
2152 BPF_FLOW_DISSECTOR),
2153 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2154 BPF_CGROUP_INET4_BIND),
2155 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2156 BPF_CGROUP_INET6_BIND),
2157 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2158 BPF_CGROUP_INET4_CONNECT),
2159 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2160 BPF_CGROUP_INET6_CONNECT),
2161 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2162 BPF_CGROUP_UDP4_SENDMSG),
2163 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2164 BPF_CGROUP_UDP6_SENDMSG),
2167 #undef BPF_PROG_SEC_IMPL
2169 #undef BPF_APROG_SEC
2170 #undef BPF_EAPROG_SEC
2171 #undef BPF_APROG_COMPAT
2173 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2174 enum bpf_attach_type *expected_attach_type)
2181 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2182 if (strncmp(name, section_names[i].sec, section_names[i].len))
2184 *prog_type = section_names[i].prog_type;
2185 *expected_attach_type = section_names[i].expected_attach_type;
2191 int libbpf_attach_type_by_name(const char *name,
2192 enum bpf_attach_type *attach_type)
2199 for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2200 if (strncmp(name, section_names[i].sec, section_names[i].len))
2202 if (!section_names[i].is_attachable)
2204 *attach_type = section_names[i].attach_type;
2211 bpf_program__identify_section(struct bpf_program *prog,
2212 enum bpf_prog_type *prog_type,
2213 enum bpf_attach_type *expected_attach_type)
2215 return libbpf_prog_type_by_name(prog->section_name, prog_type,
2216 expected_attach_type);
2219 int bpf_map__fd(struct bpf_map *map)
2221 return map ? map->fd : -EINVAL;
2224 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2226 return map ? &map->def : ERR_PTR(-EINVAL);
2229 const char *bpf_map__name(struct bpf_map *map)
2231 return map ? map->name : NULL;
2234 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2236 return map ? map->btf_key_type_id : 0;
2239 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2241 return map ? map->btf_value_type_id : 0;
2244 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2245 bpf_map_clear_priv_t clear_priv)
2251 if (map->clear_priv)
2252 map->clear_priv(map, map->priv);
2256 map->clear_priv = clear_priv;
2260 void *bpf_map__priv(struct bpf_map *map)
2262 return map ? map->priv : ERR_PTR(-EINVAL);
2265 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2267 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2270 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2272 map->map_ifindex = ifindex;
2276 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2279 struct bpf_map *s, *e;
2281 if (!obj || !obj->maps)
2285 e = obj->maps + obj->nr_maps;
2290 if ((prev < s) || (prev >= e)) {
2291 pr_warning("error in %s: map handler doesn't belong to object\n",
2296 idx = (prev - obj->maps) + 1;
2297 if (idx >= obj->nr_maps)
2299 return &obj->maps[idx];
2303 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2305 struct bpf_map *pos;
2307 bpf_map__for_each(pos, obj) {
2308 if (pos->name && !strcmp(pos->name, name))
2315 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2319 for (i = 0; i < obj->nr_maps; i++) {
2320 if (obj->maps[i].offset == offset)
2321 return &obj->maps[i];
2323 return ERR_PTR(-ENOENT);
2326 long libbpf_get_error(const void *ptr)
2329 return PTR_ERR(ptr);
2333 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2334 struct bpf_object **pobj, int *prog_fd)
2336 struct bpf_prog_load_attr attr;
2338 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2340 attr.prog_type = type;
2341 attr.expected_attach_type = 0;
2343 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2346 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2347 struct bpf_object **pobj, int *prog_fd)
2349 struct bpf_object_open_attr open_attr = {
2351 .prog_type = attr->prog_type,
2353 struct bpf_program *prog, *first_prog = NULL;
2354 enum bpf_attach_type expected_attach_type;
2355 enum bpf_prog_type prog_type;
2356 struct bpf_object *obj;
2357 struct bpf_map *map;
2365 obj = bpf_object__open_xattr(&open_attr);
2366 if (IS_ERR_OR_NULL(obj))
2369 bpf_object__for_each_program(prog, obj) {
2371 * If type is not specified, try to guess it based on
2374 prog_type = attr->prog_type;
2375 prog->prog_ifindex = attr->ifindex;
2376 expected_attach_type = attr->expected_attach_type;
2377 if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2378 err = bpf_program__identify_section(prog, &prog_type,
2379 &expected_attach_type);
2381 pr_warning("failed to guess program type based on section name %s\n",
2382 prog->section_name);
2383 bpf_object__close(obj);
2388 bpf_program__set_type(prog, prog_type);
2389 bpf_program__set_expected_attach_type(prog,
2390 expected_attach_type);
2396 bpf_map__for_each(map, obj) {
2397 if (!bpf_map__is_offload_neutral(map))
2398 map->map_ifindex = attr->ifindex;
2402 pr_warning("object file doesn't contain bpf program\n");
2403 bpf_object__close(obj);
2407 err = bpf_object__load(obj);
2409 bpf_object__close(obj);
2414 *prog_fd = bpf_program__fd(first_prog);
2418 enum bpf_perf_event_ret
2419 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2420 void **copy_mem, size_t *copy_size,
2421 bpf_perf_event_print_t fn, void *private_data)
2423 struct perf_event_mmap_page *header = mmap_mem;
2424 __u64 data_head = ring_buffer_read_head(header);
2425 __u64 data_tail = header->data_tail;
2426 void *base = ((__u8 *)header) + page_size;
2427 int ret = LIBBPF_PERF_EVENT_CONT;
2428 struct perf_event_header *ehdr;
2431 while (data_head != data_tail) {
2432 ehdr = base + (data_tail & (mmap_size - 1));
2433 ehdr_size = ehdr->size;
2435 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2436 void *copy_start = ehdr;
2437 size_t len_first = base + mmap_size - copy_start;
2438 size_t len_secnd = ehdr_size - len_first;
2440 if (*copy_size < ehdr_size) {
2442 *copy_mem = malloc(ehdr_size);
2445 ret = LIBBPF_PERF_EVENT_ERROR;
2448 *copy_size = ehdr_size;
2451 memcpy(*copy_mem, copy_start, len_first);
2452 memcpy(*copy_mem + len_first, base, len_secnd);
2456 ret = fn(ehdr, private_data);
2457 data_tail += ehdr_size;
2458 if (ret != LIBBPF_PERF_EVENT_CONT)
2462 ring_buffer_write_tail(header, data_tail);