1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Facebook */
10 #include <linux/err.h>
16 #include <bpf/libbpf.h>
17 #include <bpf/libbpf_internal.h>
18 #include <sys/types.h>
23 #include "json_writer.h"
26 #define MAX_OBJ_NAME_LEN 64
28 static void sanitize_identifier(char *name)
32 for (i = 0; name[i]; i++)
33 if (!isalnum(name[i]) && name[i] != '_')
37 static bool str_has_prefix(const char *str, const char *prefix)
39 return strncmp(str, prefix, strlen(prefix)) == 0;
42 static bool str_has_suffix(const char *str, const char *suffix)
44 size_t i, n1 = strlen(str), n2 = strlen(suffix);
49 for (i = 0; i < n2; i++) {
50 if (str[n1 - i - 1] != suffix[n2 - i - 1])
57 static void get_obj_name(char *name, const char *file)
59 /* Using basename() GNU version which doesn't modify arg. */
60 strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
61 name[MAX_OBJ_NAME_LEN - 1] = '\0';
62 if (str_has_suffix(name, ".o"))
63 name[strlen(name) - 2] = '\0';
64 sanitize_identifier(name);
67 static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
71 sprintf(guard, "__%s_%s__", obj_name, suffix);
72 for (i = 0; guard[i]; i++)
73 guard[i] = toupper(guard[i]);
76 static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
78 static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
79 const char *name = bpf_map__name(map);
82 if (!bpf_map__is_internal(map)) {
83 snprintf(buf, buf_sz, "%s", name);
87 for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
88 const char *sfx = sfxs[i], *p;
90 p = strstr(name, sfx);
92 snprintf(buf, buf_sz, "%s", p + 1);
93 sanitize_identifier(buf);
101 static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
103 static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
106 for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
107 const char *pfx = pfxs[i];
109 if (str_has_prefix(sec_name, pfx)) {
110 snprintf(buf, buf_sz, "%s", sec_name + 1);
111 sanitize_identifier(buf);
119 static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
124 static int codegen_datasec_def(struct bpf_object *obj,
127 const struct btf_type *sec,
128 const char *obj_name)
130 const char *sec_name = btf__name_by_offset(btf, sec->name_off);
131 const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
132 int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
133 char var_ident[256], sec_ident[256];
134 bool strip_mods = false;
136 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
139 if (strcmp(sec_name, ".kconfig") != 0)
142 printf(" struct %s__%s {\n", obj_name, sec_ident);
143 for (i = 0; i < vlen; i++, sec_var++) {
144 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
145 const char *var_name = btf__name_by_offset(btf, var->name_off);
146 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
147 .field_name = var_ident,
149 .strip_mods = strip_mods,
151 int need_off = sec_var->offset, align_off, align;
152 __u32 var_type_id = var->type;
154 /* static variables are not exposed through BPF skeleton */
155 if (btf_var(var)->linkage == BTF_VAR_STATIC)
158 if (off > need_off) {
159 p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
160 sec_name, i, need_off, off);
164 align = btf__align_of(btf, var->type);
166 p_err("Failed to determine alignment of variable '%s': %d",
170 /* Assume 32-bit architectures when generating data section
171 * struct memory layout. Given bpftool can't know which target
172 * host architecture it's emitting skeleton for, we need to be
173 * conservative and assume 32-bit one to ensure enough padding
174 * bytes are generated for pointer and long types. This will
175 * still work correctly for 64-bit architectures, because in
176 * the worst case we'll generate unnecessary padding field,
177 * which on 64-bit architectures is not strictly necessary and
178 * would be handled by natural 8-byte alignment. But it still
179 * will be a correct memory layout, based on recorded offsets
185 align_off = (off + align - 1) / align * align;
186 if (align_off != need_off) {
187 printf("\t\tchar __pad%d[%d];\n",
188 pad_cnt, need_off - off);
192 /* sanitize variable name, e.g., for static vars inside
193 * a function, it's name is '<function name>.<variable name>',
194 * which we'll turn into a '<function name>_<variable name>'
197 strncat(var_ident, var_name, sizeof(var_ident) - 1);
198 sanitize_identifier(var_ident);
201 err = btf_dump__emit_type_decl(d, var_type_id, &opts);
206 off = sec_var->offset + sec_var->size;
208 printf(" } *%s;\n", sec_ident);
212 static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
214 int n = btf__type_cnt(btf), i;
217 for (i = 1; i < n; i++) {
218 const struct btf_type *t = btf__type_by_id(btf, i);
221 if (!btf_is_datasec(t))
224 name = btf__str_by_offset(btf, t->name_off);
225 if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
228 if (strcmp(sec_ident, map_ident) == 0)
234 static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
236 if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
239 if (!get_map_ident(map, buf, sz))
245 static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
247 struct btf *btf = bpf_object__btf(obj);
250 const struct btf_type *sec;
254 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
258 bpf_object__for_each_map(map, obj) {
259 /* only generate definitions for memory-mapped internal maps */
260 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
263 sec = find_type_for_map(btf, map_ident);
265 /* In some cases (e.g., sections like .rodata.cst16 containing
266 * compiler allocated string constants only) there will be
267 * special internal maps with no corresponding DATASEC BTF
268 * type. In such case, generate empty structs for each such
269 * map. It will still be memory-mapped and its contents
270 * accessible from user-space through BPF skeleton.
273 printf(" struct %s__%s {\n", obj_name, map_ident);
274 printf(" } *%s;\n", map_ident);
276 err = codegen_datasec_def(obj, btf, d, sec, obj_name);
288 static bool btf_is_ptr_to_func_proto(const struct btf *btf,
289 const struct btf_type *v)
291 return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
294 static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
296 struct btf *btf = bpf_object__btf(obj);
299 const struct btf_type *sec, *var;
300 const struct btf_var_secinfo *sec_var;
301 int i, err = 0, vlen;
302 char map_ident[256], sec_ident[256];
303 bool strip_mods = false, needs_typeof = false;
304 const char *sec_name, *var_name;
307 d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
311 bpf_object__for_each_map(map, obj) {
312 /* only generate definitions for memory-mapped internal maps */
313 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
316 sec = find_type_for_map(btf, map_ident);
320 sec_name = btf__name_by_offset(btf, sec->name_off);
321 if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
324 strip_mods = strcmp(sec_name, ".kconfig") != 0;
325 printf(" struct %s__%s {\n", obj_name, sec_ident);
327 sec_var = btf_var_secinfos(sec);
328 vlen = btf_vlen(sec);
329 for (i = 0; i < vlen; i++, sec_var++) {
330 DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
332 .strip_mods = strip_mods,
333 /* we'll print the name separately */
337 var = btf__type_by_id(btf, sec_var->type);
338 var_name = btf__name_by_offset(btf, var->name_off);
339 var_type_id = var->type;
341 /* static variables are not exposed through BPF skeleton */
342 if (btf_var(var)->linkage == BTF_VAR_STATIC)
345 /* The datasec member has KIND_VAR but we want the
346 * underlying type of the variable (e.g. KIND_INT).
348 var = skip_mods_and_typedefs(btf, var->type, NULL);
351 /* Func and array members require special handling.
352 * Instead of producing `typename *var`, they produce
353 * `typeof(typename) *var`. This allows us to keep a
354 * similar syntax where the identifier is just prefixed
355 * by *, allowing us to ignore C declaration minutiae.
357 needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
361 err = btf_dump__emit_type_decl(d, var_type_id, &opts);
368 printf(" *%s;\n", var_name);
370 printf(" } %s;\n", sec_ident);
378 static void codegen(const char *template, ...)
380 const char *src, *end;
381 int skip_tabs = 0, n;
386 n = strlen(template);
393 /* find out "baseline" indentation to skip */
394 while ((c = *src++)) {
397 } else if (c == '\n') {
400 p_err("unrecognized character at pos %td in template '%s': '%c'",
401 src - template - 1, template, c);
408 /* skip baseline indentation tabs */
409 for (n = skip_tabs; n > 0; n--, src++) {
411 p_err("not enough tabs at pos %td in template '%s'",
412 src - template - 1, template);
417 /* trim trailing whitespace */
418 end = strchrnul(src, '\n');
419 for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
425 src = *end ? end + 1 : end;
429 /* print out using adjusted template */
430 va_start(args, template);
431 n = vprintf(s, args);
437 static void print_hex(const char *data, int data_sz)
441 for (i = 0, len = 0; i < data_sz; i++) {
442 int w = data[i] ? 4 : 2;
452 printf("\\x%02x", (unsigned char)data[i]);
456 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
458 long page_sz = sysconf(_SC_PAGE_SIZE);
461 map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
462 map_sz = roundup(map_sz, page_sz);
466 /* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
467 static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
469 struct btf *btf = bpf_object__btf(obj);
471 struct btf_var_secinfo *sec_var;
473 const struct btf_type *sec;
474 char map_ident[256], var_ident[256];
481 __attribute__((unused)) static void \n\
482 %1$s__assert(struct %1$s *s __attribute__((unused))) \n\
484 #ifdef __cplusplus \n\
485 #define _Static_assert static_assert \n\
489 bpf_object__for_each_map(map, obj) {
490 if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
493 sec = find_type_for_map(btf, map_ident);
495 /* best effort, couldn't find the type for this map */
499 sec_var = btf_var_secinfos(sec);
500 vlen = btf_vlen(sec);
502 for (i = 0; i < vlen; i++, sec_var++) {
503 const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
504 const char *var_name = btf__name_by_offset(btf, var->name_off);
507 /* static variables are not exposed through BPF skeleton */
508 if (btf_var(var)->linkage == BTF_VAR_STATIC)
511 var_size = btf__resolve_size(btf, var->type);
516 strncat(var_ident, var_name, sizeof(var_ident) - 1);
517 sanitize_identifier(var_ident);
519 printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
520 map_ident, var_ident, var_size, var_ident);
525 #ifdef __cplusplus \n\
526 #undef _Static_assert \n\
532 static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
534 struct bpf_program *prog;
536 bpf_object__for_each_program(prog, obj) {
542 static inline int \n\
543 %1$s__%2$s__attach(struct %1$s *skel) \n\
545 int prog_fd = skel->progs.%2$s.prog_fd; \n\
546 ", obj_name, bpf_program__name(prog));
548 switch (bpf_program__type(prog)) {
549 case BPF_PROG_TYPE_RAW_TRACEPOINT:
550 tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
551 printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
553 case BPF_PROG_TYPE_TRACING:
554 case BPF_PROG_TYPE_LSM:
555 if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
556 printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
558 printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
561 printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
568 skel->links.%1$s_fd = fd; \n\
571 ", bpf_program__name(prog));
577 static inline int \n\
578 %1$s__attach(struct %1$s *skel) \n\
584 bpf_object__for_each_program(prog, obj) {
587 ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
588 ", obj_name, bpf_program__name(prog));
593 return ret < 0 ? ret : 0; \n\
596 static inline void \n\
597 %1$s__detach(struct %1$s *skel) \n\
601 bpf_object__for_each_program(prog, obj) {
604 skel_closenz(skel->links.%1$s_fd); \n\
605 ", bpf_program__name(prog));
614 static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
616 struct bpf_program *prog;
623 %1$s__destroy(struct %1$s *skel) \n\
627 %1$s__detach(skel); \n\
631 bpf_object__for_each_program(prog, obj) {
634 skel_closenz(skel->progs.%1$s.prog_fd); \n\
635 ", bpf_program__name(prog));
638 bpf_object__for_each_map(map, obj) {
639 if (!get_map_ident(map, ident, sizeof(ident)))
641 if (bpf_map__is_internal(map) &&
642 (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
643 printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
644 ident, bpf_map_mmap_sz(map));
647 skel_closenz(skel->maps.%1$s.map_fd); \n\
658 static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
660 DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
665 err = bpf_object__gen_loader(obj, &opts);
669 err = bpf_object__load(obj);
671 p_err("failed to load object file");
674 /* If there was no error during load then gen_loader_opts
675 * are populated with the loader program.
678 /* finish generating 'struct skel' */
685 codegen_attach_detach(obj, obj_name);
687 codegen_destroy(obj, obj_name);
691 static inline struct %1$s * \n\
694 struct %1$s *skel; \n\
696 skel = skel_alloc(sizeof(*skel)); \n\
699 skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
701 obj_name, opts.data_sz);
702 bpf_object__for_each_map(map, obj) {
703 const void *mmap_data = NULL;
704 size_t mmap_size = 0;
706 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
711 skel->%1$s = skel_prep_map_data((void *)\"\\ \n\
713 mmap_data = bpf_map__initial_value(map, &mmap_size);
714 print_hex(mmap_data, mmap_size);
717 \", %1$zd, %2$zd); \n\
720 skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
721 ", bpf_map_mmap_sz(map), mmap_size, ident);
727 %1$s__destroy(skel); \n\
731 static inline int \n\
732 %1$s__load(struct %1$s *skel) \n\
734 struct bpf_load_and_run_opts opts = {}; \n\
737 opts.ctx = (struct bpf_loader_ctx *)skel; \n\
738 opts.data_sz = %2$d; \n\
739 opts.data = (void *)\"\\ \n\
741 obj_name, opts.data_sz);
742 print_hex(opts.data, opts.data_sz);
750 opts.insns_sz = %d; \n\
751 opts.insns = (void *)\"\\ \n\
754 print_hex(opts.insns, opts.insns_sz);
758 err = bpf_load_and_run(&opts); \n\
762 bpf_object__for_each_map(map, obj) {
763 const char *mmap_flags;
765 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
768 if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
769 mmap_flags = "PROT_READ";
771 mmap_flags = "PROT_READ | PROT_WRITE";
775 skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
776 %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
780 ident, bpf_map_mmap_sz(map), mmap_flags);
787 static inline struct %1$s * \n\
788 %1$s__open_and_load(void) \n\
790 struct %1$s *skel; \n\
792 skel = %1$s__open(); \n\
795 if (%1$s__load(skel)) { \n\
796 %1$s__destroy(skel); \n\
804 codegen_asserts(obj, obj_name);
818 codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
831 s->map_cnt = %zu; \n\
832 s->map_skel_sz = sizeof(*s->maps); \n\
833 s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
842 bpf_object__for_each_map(map, obj) {
843 if (!get_map_ident(map, ident, sizeof(ident)))
849 s->maps[%zu].name = \"%s\"; \n\
850 s->maps[%zu].map = &obj->maps.%s; \n\
852 i, bpf_map__name(map), i, ident);
853 /* memory-mapped internal maps */
854 if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
855 printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
863 codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
865 struct bpf_program *prog;
875 s->prog_cnt = %zu; \n\
876 s->prog_skel_sz = sizeof(*s->progs); \n\
877 s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
886 bpf_object__for_each_program(prog, obj) {
890 s->progs[%1$zu].name = \"%2$s\"; \n\
891 s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
893 i, bpf_program__name(prog));
895 if (populate_links) {
898 s->progs[%1$zu].link = &obj->links.%2$s;\n\
900 i, bpf_program__name(prog));
906 static int do_skeleton(int argc, char **argv)
908 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
909 size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
910 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
911 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
912 struct bpf_object *obj = NULL;
915 struct bpf_program *prog;
931 if (is_prefix(*argv, "name")) {
934 if (obj_name[0] != '\0') {
935 p_err("object name already specified");
939 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
940 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
942 p_err("unknown arg %s", *argv);
950 p_err("extra unknown arguments");
954 if (stat(file, &st)) {
955 p_err("failed to stat() %s: %s", file, strerror(errno));
958 file_sz = st.st_size;
959 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
960 fd = open(file, O_RDONLY);
962 p_err("failed to open() %s: %s", file, strerror(errno));
965 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
966 if (obj_data == MAP_FAILED) {
968 p_err("failed to mmap() %s: %s", file, strerror(errno));
971 if (obj_name[0] == '\0')
972 get_obj_name(obj_name, file);
973 opts.object_name = obj_name;
975 /* log_level1 + log_level2 + stats, but not stable UAPI */
976 opts.kernel_log_level = 1 + 2 + 4;
977 obj = bpf_object__open_mem(obj_data, file_sz, &opts);
982 libbpf_strerror(err, err_buf, sizeof(err_buf));
983 p_err("failed to open BPF object file: %s", err_buf);
987 bpf_object__for_each_map(map, obj) {
988 if (!get_map_ident(map, ident, sizeof(ident))) {
989 p_err("ignoring unrecognized internal map '%s'...",
995 bpf_object__for_each_program(prog, obj) {
999 get_header_guard(header_guard, obj_name, "SKEL_H");
1003 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1004 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1008 #include <bpf/skel_internal.h> \n\
1011 struct bpf_loader_ctx ctx; \n\
1013 obj_name, header_guard
1018 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1020 /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
1024 #include <errno.h> \n\
1025 #include <stdlib.h> \n\
1026 #include <bpf/libbpf.h> \n\
1029 struct bpf_object_skeleton *skeleton; \n\
1030 struct bpf_object *obj; \n\
1032 obj_name, header_guard
1037 printf("\tstruct {\n");
1038 bpf_object__for_each_map(map, obj) {
1039 if (!get_map_ident(map, ident, sizeof(ident)))
1042 printf("\t\tstruct bpf_map_desc %s;\n", ident);
1044 printf("\t\tstruct bpf_map *%s;\n", ident);
1046 printf("\t} maps;\n");
1050 printf("\tstruct {\n");
1051 bpf_object__for_each_program(prog, obj) {
1053 printf("\t\tstruct bpf_prog_desc %s;\n",
1054 bpf_program__name(prog));
1056 printf("\t\tstruct bpf_program *%s;\n",
1057 bpf_program__name(prog));
1059 printf("\t} progs;\n");
1060 printf("\tstruct {\n");
1061 bpf_object__for_each_program(prog, obj) {
1063 printf("\t\tint %s_fd;\n",
1064 bpf_program__name(prog));
1066 printf("\t\tstruct bpf_link *%s;\n",
1067 bpf_program__name(prog));
1069 printf("\t} links;\n");
1072 btf = bpf_object__btf(obj);
1074 err = codegen_datasecs(obj, obj_name);
1079 err = gen_trace(obj, obj_name, header_guard);
1086 #ifdef __cplusplus \n\
1087 static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
1088 static inline struct %1$s *open_and_load(); \n\
1089 static inline int load(struct %1$s *skel); \n\
1090 static inline int attach(struct %1$s *skel); \n\
1091 static inline void detach(struct %1$s *skel); \n\
1092 static inline void destroy(struct %1$s *skel); \n\
1093 static inline const void *elf_bytes(size_t *sz); \n\
1094 #endif /* __cplusplus */ \n\
1098 %1$s__destroy(struct %1$s *obj) \n\
1102 if (obj->skeleton) \n\
1103 bpf_object__destroy_skeleton(obj->skeleton);\n\
1107 static inline int \n\
1108 %1$s__create_skeleton(struct %1$s *obj); \n\
1110 static inline struct %1$s * \n\
1111 %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
1113 struct %1$s *obj; \n\
1116 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1122 err = %1$s__create_skeleton(obj); \n\
1126 err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
1132 %1$s__destroy(obj); \n\
1137 static inline struct %1$s * \n\
1138 %1$s__open(void) \n\
1140 return %1$s__open_opts(NULL); \n\
1143 static inline int \n\
1144 %1$s__load(struct %1$s *obj) \n\
1146 return bpf_object__load_skeleton(obj->skeleton); \n\
1149 static inline struct %1$s * \n\
1150 %1$s__open_and_load(void) \n\
1152 struct %1$s *obj; \n\
1155 obj = %1$s__open(); \n\
1158 err = %1$s__load(obj); \n\
1160 %1$s__destroy(obj); \n\
1167 static inline int \n\
1168 %1$s__attach(struct %1$s *obj) \n\
1170 return bpf_object__attach_skeleton(obj->skeleton); \n\
1173 static inline void \n\
1174 %1$s__detach(struct %1$s *obj) \n\
1176 bpf_object__detach_skeleton(obj->skeleton); \n\
1185 static inline const void *%1$s__elf_bytes(size_t *sz); \n\
1187 static inline int \n\
1188 %1$s__create_skeleton(struct %1$s *obj) \n\
1190 struct bpf_object_skeleton *s; \n\
1193 s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
1199 s->sz = sizeof(*s); \n\
1200 s->name = \"%1$s\"; \n\
1201 s->obj = &obj->obj; \n\
1206 codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
1207 codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
1212 s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
1214 obj->skeleton = s; \n\
1217 bpf_object__destroy_skeleton(s); \n\
1221 static inline const void *%2$s__elf_bytes(size_t *sz) \n\
1224 return (const void *)\"\\ \n\
1226 , file_sz, obj_name);
1228 /* embed contents of BPF object file */
1229 print_hex(obj_data, file_sz);
1236 #ifdef __cplusplus \n\
1237 struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
1238 struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
1239 int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
1240 int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
1241 void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
1242 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
1243 const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
1244 #endif /* __cplusplus */ \n\
1249 codegen_asserts(obj, obj_name);
1254 #endif /* %1$s */ \n\
1259 bpf_object__close(obj);
1261 munmap(obj_data, mmap_sz);
1266 /* Subskeletons are like skeletons, except they don't own the bpf_object,
1267 * associated maps, links, etc. Instead, they know about the existence of
1268 * variables, maps, programs and are able to find their locations
1269 * _at runtime_ from an already loaded bpf_object.
1271 * This allows for library-like BPF objects to have userspace counterparts
1272 * with access to their own items without having to know anything about the
1273 * final BPF object that the library was linked into.
1275 static int do_subskeleton(int argc, char **argv)
1277 char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
1278 size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
1279 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
1280 char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
1281 struct bpf_object *obj = NULL;
1282 const char *file, *var_name;
1284 int fd, err = -1, map_type_id;
1285 const struct bpf_map *map;
1286 struct bpf_program *prog;
1288 const struct btf_type *map_type, *var_type;
1289 const struct btf_var_secinfo *var;
1302 if (is_prefix(*argv, "name")) {
1305 if (obj_name[0] != '\0') {
1306 p_err("object name already specified");
1310 strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
1311 obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
1313 p_err("unknown arg %s", *argv);
1321 p_err("extra unknown arguments");
1326 p_err("cannot use loader for subskeletons");
1330 if (stat(file, &st)) {
1331 p_err("failed to stat() %s: %s", file, strerror(errno));
1334 file_sz = st.st_size;
1335 mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
1336 fd = open(file, O_RDONLY);
1338 p_err("failed to open() %s: %s", file, strerror(errno));
1341 obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
1342 if (obj_data == MAP_FAILED) {
1344 p_err("failed to mmap() %s: %s", file, strerror(errno));
1347 if (obj_name[0] == '\0')
1348 get_obj_name(obj_name, file);
1350 /* The empty object name allows us to use bpf_map__name and produce
1351 * ELF section names out of it. (".data" instead of "obj.data")
1353 opts.object_name = "";
1354 obj = bpf_object__open_mem(obj_data, file_sz, &opts);
1358 libbpf_strerror(errno, err_buf, sizeof(err_buf));
1359 p_err("failed to open BPF object file: %s", err_buf);
1364 btf = bpf_object__btf(obj);
1367 p_err("need btf type information for %s", obj_name);
1371 bpf_object__for_each_program(prog, obj) {
1375 /* First, count how many variables we have to find.
1376 * We need this in advance so the subskel can allocate the right
1377 * amount of storage.
1379 bpf_object__for_each_map(map, obj) {
1380 if (!get_map_ident(map, ident, sizeof(ident)))
1383 /* Also count all maps that have a name */
1386 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1389 map_type_id = bpf_map__btf_value_type_id(map);
1390 if (map_type_id <= 0) {
1394 map_type = btf__type_by_id(btf, map_type_id);
1396 var = btf_var_secinfos(map_type);
1397 len = btf_vlen(map_type);
1398 for (i = 0; i < len; i++, var++) {
1399 var_type = btf__type_by_id(btf, var->type);
1401 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1408 get_header_guard(header_guard, obj_name, "SUBSKEL_H");
1411 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
1413 /* THIS FILE IS AUTOGENERATED! */ \n\
1417 #include <errno.h> \n\
1418 #include <stdlib.h> \n\
1419 #include <bpf/libbpf.h> \n\
1422 struct bpf_object *obj; \n\
1423 struct bpf_object_subskeleton *subskel; \n\
1424 ", obj_name, header_guard);
1427 printf("\tstruct {\n");
1428 bpf_object__for_each_map(map, obj) {
1429 if (!get_map_ident(map, ident, sizeof(ident)))
1431 printf("\t\tstruct bpf_map *%s;\n", ident);
1433 printf("\t} maps;\n");
1437 printf("\tstruct {\n");
1438 bpf_object__for_each_program(prog, obj) {
1439 printf("\t\tstruct bpf_program *%s;\n",
1440 bpf_program__name(prog));
1442 printf("\t} progs;\n");
1445 err = codegen_subskel_datasecs(obj, obj_name);
1449 /* emit code that will allocate enough storage for all symbols */
1453 #ifdef __cplusplus \n\
1454 static inline struct %1$s *open(const struct bpf_object *src);\n\
1455 static inline void destroy(struct %1$s *skel); \n\
1456 #endif /* __cplusplus */ \n\
1459 static inline void \n\
1460 %1$s__destroy(struct %1$s *skel) \n\
1464 if (skel->subskel) \n\
1465 bpf_object__destroy_subskeleton(skel->subskel);\n\
1469 static inline struct %1$s * \n\
1470 %1$s__open(const struct bpf_object *src) \n\
1472 struct %1$s *obj; \n\
1473 struct bpf_object_subskeleton *s; \n\
1476 obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
1481 s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
1486 s->sz = sizeof(*s); \n\
1488 s->var_skel_sz = sizeof(*s->vars); \n\
1489 obj->subskel = s; \n\
1492 s->var_cnt = %2$d; \n\
1493 s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
1502 /* walk through each symbol and emit the runtime representation */
1503 bpf_object__for_each_map(map, obj) {
1504 if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
1507 map_type_id = bpf_map__btf_value_type_id(map);
1508 if (map_type_id <= 0)
1509 /* skip over internal maps with no type*/
1512 map_type = btf__type_by_id(btf, map_type_id);
1513 var = btf_var_secinfos(map_type);
1514 len = btf_vlen(map_type);
1515 for (i = 0; i < len; i++, var++) {
1516 var_type = btf__type_by_id(btf, var->type);
1517 var_name = btf__name_by_offset(btf, var_type->name_off);
1519 if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
1522 /* Note that we use the dot prefix in .data as the
1523 * field access operator i.e. maps%s becomes maps.data
1528 s->vars[%3$d].name = \"%1$s\"; \n\
1529 s->vars[%3$d].map = &obj->maps.%2$s; \n\
1530 s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
1531 ", var_name, ident, var_idx);
1537 codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
1538 codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
1543 err = bpf_object__open_subskeleton(s); \n\
1549 %1$s__destroy(obj); \n\
1554 #ifdef __cplusplus \n\
1555 struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
1556 void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
1557 #endif /* __cplusplus */ \n\
1559 #endif /* %2$s */ \n\
1561 obj_name, header_guard);
1564 bpf_object__close(obj);
1566 munmap(obj_data, mmap_sz);
1571 static int do_object(int argc, char **argv)
1573 struct bpf_linker *linker;
1574 const char *output_file, *file;
1582 output_file = GET_ARG();
1584 linker = bpf_linker__new(output_file, NULL);
1586 p_err("failed to create BPF linker instance");
1593 err = bpf_linker__add_file(linker, file, NULL);
1595 p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
1600 err = bpf_linker__finalize(linker);
1602 p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
1608 bpf_linker__free(linker);
1612 static int do_help(int argc, char **argv)
1615 jsonw_null(json_wtr);
1620 "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
1621 " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
1622 " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
1623 " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
1626 " " HELP_SPEC_OPTIONS " |\n"
1627 " {-L|--use-loader} }\n"
1634 static int btf_save_raw(const struct btf *btf, const char *path)
1641 data = btf__raw_data(btf, &data_sz);
1645 f = fopen(path, "wb");
1649 if (fwrite(data, 1, data_sz, f) != data_sz)
1656 struct btfgen_info {
1657 struct btf *src_btf;
1658 struct btf *marked_btf; /* btf structure used to mark used types */
1661 static size_t btfgen_hash_fn(long key, void *ctx)
1666 static bool btfgen_equal_fn(long k1, long k2, void *ctx)
1671 static void btfgen_free_info(struct btfgen_info *info)
1676 btf__free(info->src_btf);
1677 btf__free(info->marked_btf);
1682 static struct btfgen_info *
1683 btfgen_new_info(const char *targ_btf_path)
1685 struct btfgen_info *info;
1688 info = calloc(1, sizeof(*info));
1692 info->src_btf = btf__parse(targ_btf_path, NULL);
1693 if (!info->src_btf) {
1695 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1699 info->marked_btf = btf__parse(targ_btf_path, NULL);
1700 if (!info->marked_btf) {
1702 p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
1709 btfgen_free_info(info);
1714 #define MARKED UINT32_MAX
1716 static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
1718 const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
1719 struct btf_member *m = btf_members(t) + idx;
1721 m->name_off = MARKED;
1725 btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
1727 const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
1728 struct btf_type *cloned_type;
1729 struct btf_param *param;
1730 struct btf_array *array;
1736 /* mark type on cloned BTF as used */
1737 cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
1738 cloned_type->name_off = MARKED;
1740 /* recursively mark other types needed by it */
1741 switch (btf_kind(btf_type)) {
1744 case BTF_KIND_FLOAT:
1746 case BTF_KIND_ENUM64:
1747 case BTF_KIND_STRUCT:
1748 case BTF_KIND_UNION:
1751 if (follow_pointers) {
1752 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1757 case BTF_KIND_CONST:
1758 case BTF_KIND_RESTRICT:
1759 case BTF_KIND_VOLATILE:
1760 case BTF_KIND_TYPEDEF:
1761 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1765 case BTF_KIND_ARRAY:
1766 array = btf_array(btf_type);
1768 /* mark array type */
1769 err = btfgen_mark_type(info, array->type, follow_pointers);
1770 /* mark array's index type */
1771 err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
1775 case BTF_KIND_FUNC_PROTO:
1777 err = btfgen_mark_type(info, btf_type->type, follow_pointers);
1781 /* mark parameters types */
1782 param = btf_params(btf_type);
1783 for (i = 0; i < btf_vlen(btf_type); i++) {
1784 err = btfgen_mark_type(info, param->type, follow_pointers);
1790 /* tells if some other type needs to be handled */
1792 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
1799 static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1801 struct btf *btf = info->src_btf;
1802 const struct btf_type *btf_type;
1803 struct btf_member *btf_member;
1804 struct btf_array *array;
1805 unsigned int type_id = targ_spec->root_type_id;
1808 /* mark root type */
1809 btf_type = btf__type_by_id(btf, type_id);
1810 err = btfgen_mark_type(info, type_id, false);
1814 /* mark types for complex types (arrays, unions, structures) */
1815 for (int i = 1; i < targ_spec->raw_len; i++) {
1816 /* skip typedefs and mods */
1817 while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
1818 type_id = btf_type->type;
1819 btf_type = btf__type_by_id(btf, type_id);
1822 switch (btf_kind(btf_type)) {
1823 case BTF_KIND_STRUCT:
1824 case BTF_KIND_UNION:
1825 idx = targ_spec->raw_spec[i];
1826 btf_member = btf_members(btf_type) + idx;
1829 btfgen_mark_member(info, type_id, idx);
1831 /* mark member's type */
1832 type_id = btf_member->type;
1833 btf_type = btf__type_by_id(btf, type_id);
1834 err = btfgen_mark_type(info, type_id, false);
1838 case BTF_KIND_ARRAY:
1839 array = btf_array(btf_type);
1840 type_id = array->type;
1841 btf_type = btf__type_by_id(btf, type_id);
1844 p_err("unsupported kind: %s (%d)",
1845 btf_kind_str(btf_type), btf_type->type);
1853 /* Mark types, members, and member types. Compared to btfgen_record_field_relo,
1854 * this function does not rely on the target spec for inferring members, but
1855 * uses the associated BTF.
1857 * The `behind_ptr` argument is used to stop marking of composite types reached
1858 * through a pointer. This way, we can keep BTF size in check while providing
1859 * reasonable match semantics.
1861 static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
1863 const struct btf_type *btf_type;
1864 struct btf *btf = info->src_btf;
1865 struct btf_type *cloned_type;
1871 btf_type = btf__type_by_id(btf, type_id);
1872 /* mark type on cloned BTF as used */
1873 cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
1874 cloned_type->name_off = MARKED;
1876 switch (btf_kind(btf_type)) {
1879 case BTF_KIND_FLOAT:
1881 case BTF_KIND_ENUM64:
1883 case BTF_KIND_STRUCT:
1884 case BTF_KIND_UNION: {
1885 struct btf_member *m = btf_members(btf_type);
1886 __u16 vlen = btf_vlen(btf_type);
1891 for (i = 0; i < vlen; i++, m++) {
1893 btfgen_mark_member(info, type_id, i);
1895 /* mark member's type */
1896 err = btfgen_mark_type_match(info, m->type, false);
1902 case BTF_KIND_CONST:
1904 case BTF_KIND_RESTRICT:
1905 case BTF_KIND_TYPEDEF:
1906 case BTF_KIND_VOLATILE:
1907 return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
1909 return btfgen_mark_type_match(info, btf_type->type, true);
1910 case BTF_KIND_ARRAY: {
1911 struct btf_array *array;
1913 array = btf_array(btf_type);
1914 /* mark array type */
1915 err = btfgen_mark_type_match(info, array->type, false);
1916 /* mark array's index type */
1917 err = err ? : btfgen_mark_type_match(info, array->index_type, false);
1922 case BTF_KIND_FUNC_PROTO: {
1923 __u16 vlen = btf_vlen(btf_type);
1924 struct btf_param *param;
1927 err = btfgen_mark_type_match(info, btf_type->type, false);
1931 /* mark parameters types */
1932 param = btf_params(btf_type);
1933 for (i = 0; i < vlen; i++) {
1934 err = btfgen_mark_type_match(info, param->type, false);
1941 /* tells if some other type needs to be handled */
1943 p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
1950 /* Mark types, members, and member types. Compared to btfgen_record_field_relo,
1951 * this function does not rely on the target spec for inferring members, but
1952 * uses the associated BTF.
1954 static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1956 return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
1959 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1961 return btfgen_mark_type(info, targ_spec->root_type_id, true);
1964 static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
1966 return btfgen_mark_type(info, targ_spec->root_type_id, false);
1969 static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
1971 switch (res->relo_kind) {
1972 case BPF_CORE_FIELD_BYTE_OFFSET:
1973 case BPF_CORE_FIELD_BYTE_SIZE:
1974 case BPF_CORE_FIELD_EXISTS:
1975 case BPF_CORE_FIELD_SIGNED:
1976 case BPF_CORE_FIELD_LSHIFT_U64:
1977 case BPF_CORE_FIELD_RSHIFT_U64:
1978 return btfgen_record_field_relo(info, res);
1979 case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
1981 case BPF_CORE_TYPE_ID_TARGET:
1982 case BPF_CORE_TYPE_EXISTS:
1983 case BPF_CORE_TYPE_SIZE:
1984 return btfgen_record_type_relo(info, res);
1985 case BPF_CORE_TYPE_MATCHES:
1986 return btfgen_record_type_match_relo(info, res);
1987 case BPF_CORE_ENUMVAL_EXISTS:
1988 case BPF_CORE_ENUMVAL_VALUE:
1989 return btfgen_record_enumval_relo(info, res);
1995 static struct bpf_core_cand_list *
1996 btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
1998 const struct btf_type *local_type;
1999 struct bpf_core_cand_list *cands = NULL;
2000 struct bpf_core_cand local_cand = {};
2001 size_t local_essent_len;
2002 const char *local_name;
2005 local_cand.btf = local_btf;
2006 local_cand.id = local_id;
2008 local_type = btf__type_by_id(local_btf, local_id);
2014 local_name = btf__name_by_offset(local_btf, local_type->name_off);
2019 local_essent_len = bpf_core_essential_name_len(local_name);
2021 cands = calloc(1, sizeof(*cands));
2025 err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
2032 bpf_core_free_cands(cands);
2037 /* Record relocation information for a single BPF object */
2038 static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
2040 const struct btf_ext_info_sec *sec;
2041 const struct bpf_core_relo *relo;
2042 const struct btf_ext_info *seg;
2043 struct hashmap_entry *entry;
2044 struct hashmap *cand_cache = NULL;
2045 struct btf_ext *btf_ext = NULL;
2046 unsigned int relo_idx;
2047 struct btf *btf = NULL;
2051 btf = btf__parse(obj_path, &btf_ext);
2054 p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
2059 p_err("failed to parse BPF object '%s': section %s not found",
2060 obj_path, BTF_EXT_ELF_SEC);
2065 if (btf_ext->core_relo_info.len == 0) {
2070 cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
2071 if (IS_ERR(cand_cache)) {
2072 err = PTR_ERR(cand_cache);
2076 seg = &btf_ext->core_relo_info;
2077 for_each_btf_ext_sec(seg, sec) {
2078 for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
2079 struct bpf_core_spec specs_scratch[3] = {};
2080 struct bpf_core_relo_res targ_res = {};
2081 struct bpf_core_cand_list *cands = NULL;
2082 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
2084 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
2085 !hashmap__find(cand_cache, relo->type_id, &cands)) {
2086 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
2092 err = hashmap__set(cand_cache, relo->type_id, cands,
2098 err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
2099 specs_scratch, &targ_res);
2103 /* specs_scratch[2] is the target spec */
2104 err = btfgen_record_reloc(info, &specs_scratch[2]);
2112 btf_ext__free(btf_ext);
2114 if (!IS_ERR_OR_NULL(cand_cache)) {
2115 hashmap__for_each_entry(cand_cache, entry, i) {
2116 bpf_core_free_cands(entry->pvalue);
2118 hashmap__free(cand_cache);
2124 static int btfgen_remap_id(__u32 *type_id, void *ctx)
2126 unsigned int *ids = ctx;
2128 *type_id = ids[*type_id];
2133 /* Generate BTF from relocation information previously recorded */
2134 static struct btf *btfgen_get_btf(struct btfgen_info *info)
2136 struct btf *btf_new = NULL;
2137 unsigned int *ids = NULL;
2138 unsigned int i, n = btf__type_cnt(info->marked_btf);
2141 btf_new = btf__new_empty();
2147 ids = calloc(n, sizeof(*ids));
2153 /* first pass: add all marked types to btf_new and add their new ids to the ids map */
2154 for (i = 1; i < n; i++) {
2155 const struct btf_type *cloned_type, *type;
2159 cloned_type = btf__type_by_id(info->marked_btf, i);
2161 if (cloned_type->name_off != MARKED)
2164 type = btf__type_by_id(info->src_btf, i);
2166 /* add members for struct and union */
2167 if (btf_is_composite(type)) {
2168 struct btf_member *cloned_m, *m;
2169 unsigned short vlen;
2172 name = btf__str_by_offset(info->src_btf, type->name_off);
2174 if (btf_is_struct(type))
2175 err = btf__add_struct(btf_new, name, type->size);
2177 err = btf__add_union(btf_new, name, type->size);
2183 cloned_m = btf_members(cloned_type);
2184 m = btf_members(type);
2185 vlen = btf_vlen(cloned_type);
2186 for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
2187 /* add only members that are marked as used */
2188 if (cloned_m->name_off != MARKED)
2191 name = btf__str_by_offset(info->src_btf, m->name_off);
2192 err = btf__add_field(btf_new, name, m->type,
2193 btf_member_bit_offset(cloned_type, idx_src),
2194 btf_member_bitfield_size(cloned_type, idx_src));
2199 err = btf__add_type(btf_new, info->src_btf, type);
2205 /* add ID mapping */
2209 /* second pass: fix up type ids */
2210 for (i = 1; i < btf__type_cnt(btf_new); i++) {
2211 struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
2213 err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
2228 /* Create minimized BTF file for a set of BPF objects.
2230 * The BTFGen algorithm is divided in two main parts: (1) collect the
2231 * BTF types that are involved in relocations and (2) generate the BTF
2232 * object using the collected types.
2234 * In order to collect the types involved in the relocations, we parse
2235 * the BTF and BTF.ext sections of the BPF objects and use
2236 * bpf_core_calc_relo_insn() to get the target specification, this
2237 * indicates how the types and fields are used in a relocation.
2239 * Types are recorded in different ways according to the kind of the
2240 * relocation. For field-based relocations only the members that are
2241 * actually used are saved in order to reduce the size of the generated
2242 * BTF file. For type-based relocations empty struct / unions are
2243 * generated and for enum-based relocations the whole type is saved.
2245 * The second part of the algorithm generates the BTF object. It creates
2246 * an empty BTF object and fills it with the types recorded in the
2247 * previous step. This function takes care of only adding the structure
2248 * and union members that were marked as used and it also fixes up the
2249 * type IDs on the generated BTF object.
2251 static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
2253 struct btfgen_info *info;
2254 struct btf *btf_new = NULL;
2257 info = btfgen_new_info(src_btf);
2260 p_err("failed to allocate info structure: %s", strerror(errno));
2264 for (i = 0; objspaths[i] != NULL; i++) {
2265 err = btfgen_record_obj(info, objspaths[i]);
2267 p_err("error recording relocations for %s: %s", objspaths[i],
2273 btf_new = btfgen_get_btf(info);
2276 p_err("error generating BTF: %s", strerror(errno));
2280 err = btf_save_raw(btf_new, dst_btf);
2282 p_err("error saving btf file: %s", strerror(errno));
2288 btfgen_free_info(info);
2293 static int do_min_core_btf(int argc, char **argv)
2295 const char *input, *output, **objs;
2306 objs = (const char **) calloc(argc + 1, sizeof(*objs));
2308 p_err("failed to allocate array for object names");
2314 objs[i++] = GET_ARG();
2316 err = minimize_btf(input, output, objs);
2321 static const struct cmd cmds[] = {
2322 { "object", do_object },
2323 { "skeleton", do_skeleton },
2324 { "subskeleton", do_subskeleton },
2325 { "min_core_btf", do_min_core_btf},
2326 { "help", do_help },
2330 int do_gen(int argc, char **argv)
2332 return cmd_select(cmds, argc, argv, do_help);