]> Git Repo - linux.git/blob - tools/testing/selftests/bpf/prog_tests/map_btf.c
Linux 6.14-rc3
[linux.git] / tools / testing / selftests / bpf / prog_tests / map_btf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3 #include <test_progs.h>
4
5 #include "normal_map_btf.skel.h"
6 #include "map_in_map_btf.skel.h"
7
8 static void do_test_normal_map_btf(void)
9 {
10         struct normal_map_btf *skel;
11         int i, err, new_fd = -1;
12         int map_fd_arr[64];
13
14         skel = normal_map_btf__open_and_load();
15         if (!ASSERT_OK_PTR(skel, "open_load"))
16                 return;
17
18         err = normal_map_btf__attach(skel);
19         if (!ASSERT_OK(err, "attach"))
20                 goto out;
21
22         skel->bss->pid = getpid();
23         usleep(1);
24         ASSERT_TRUE(skel->bss->done, "done");
25
26         /* Use percpu_array to slow bpf_map_free_deferred() down.
27          * The memory allocation may fail, so doesn't check the returned fd.
28          */
29         for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++)
30                 map_fd_arr[i] = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, 4, 4, 256, NULL);
31
32         /* Close array fd later */
33         new_fd = dup(bpf_map__fd(skel->maps.array));
34 out:
35         normal_map_btf__destroy(skel);
36         if (new_fd < 0)
37                 return;
38         /* Use kern_sync_rcu() to wait for the start of the free of the bpf
39          * program and use an assumed delay to wait for the release of the map
40          * btf which is held by other maps (e.g, bss). After that, array map
41          * holds the last reference of map btf.
42          */
43         kern_sync_rcu();
44         usleep(4000);
45         /* Spawn multiple kworkers to delay the invocation of
46          * bpf_map_free_deferred() for array map.
47          */
48         for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++) {
49                 if (map_fd_arr[i] < 0)
50                         continue;
51                 close(map_fd_arr[i]);
52         }
53         close(new_fd);
54 }
55
56 static void do_test_map_in_map_btf(void)
57 {
58         int err, zero = 0, new_fd = -1;
59         struct map_in_map_btf *skel;
60
61         skel = map_in_map_btf__open_and_load();
62         if (!ASSERT_OK_PTR(skel, "open_load"))
63                 return;
64
65         err = map_in_map_btf__attach(skel);
66         if (!ASSERT_OK(err, "attach"))
67                 goto out;
68
69         skel->bss->pid = getpid();
70         usleep(1);
71         ASSERT_TRUE(skel->bss->done, "done");
72
73         /* Close inner_array fd later */
74         new_fd = dup(bpf_map__fd(skel->maps.inner_array));
75         /* Defer the free of inner_array */
76         err = bpf_map__delete_elem(skel->maps.outer_array, &zero, sizeof(zero), 0);
77         ASSERT_OK(err, "delete inner map");
78 out:
79         map_in_map_btf__destroy(skel);
80         if (new_fd < 0)
81                 return;
82         /* Use kern_sync_rcu() to wait for the start of the free of the bpf
83          * program and use an assumed delay to wait for the free of the outer
84          * map and the release of map btf. After that, inner map holds the last
85          * reference of map btf.
86          */
87         kern_sync_rcu();
88         usleep(10000);
89         close(new_fd);
90 }
91
92 void test_map_btf(void)
93 {
94         if (test__start_subtest("array_btf"))
95                 do_test_normal_map_btf();
96         if (test__start_subtest("inner_array_btf"))
97                 do_test_map_in_map_btf();
98 }
This page took 0.037213 seconds and 4 git commands to generate.