]> Git Repo - J-linux.git/blob - tools/testing/selftests/bpf/prog_tests/for_each.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / tools / testing / selftests / bpf / prog_tests / for_each.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "for_each_hash_map_elem.skel.h"
6 #include "for_each_array_map_elem.skel.h"
7 #include "for_each_map_elem_write_key.skel.h"
8 #include "for_each_multi_maps.skel.h"
9
10 static unsigned int duration;
11
12 static void test_hash_map(void)
13 {
14         int i, err, max_entries;
15         struct for_each_hash_map_elem *skel;
16         __u64 *percpu_valbuf = NULL;
17         size_t percpu_val_sz;
18         __u32 key, num_cpus;
19         __u64 val;
20         LIBBPF_OPTS(bpf_test_run_opts, topts,
21                 .data_in = &pkt_v4,
22                 .data_size_in = sizeof(pkt_v4),
23                 .repeat = 1,
24         );
25
26         skel = for_each_hash_map_elem__open_and_load();
27         if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
28                 return;
29
30         max_entries = bpf_map__max_entries(skel->maps.hashmap);
31         for (i = 0; i < max_entries; i++) {
32                 key = i;
33                 val = i + 1;
34                 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
35                                            &val, sizeof(val), BPF_ANY);
36                 if (!ASSERT_OK(err, "map_update"))
37                         goto out;
38         }
39
40         num_cpus = bpf_num_possible_cpus();
41         percpu_val_sz = sizeof(__u64) * num_cpus;
42         percpu_valbuf = malloc(percpu_val_sz);
43         if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
44                 goto out;
45
46         key = 1;
47         for (i = 0; i < num_cpus; i++)
48                 percpu_valbuf[i] = i + 1;
49         err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
50                                    percpu_valbuf, percpu_val_sz, BPF_ANY);
51         if (!ASSERT_OK(err, "percpu_map_update"))
52                 goto out;
53
54         err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
55         duration = topts.duration;
56         if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
57                   err, errno, topts.retval))
58                 goto out;
59
60         ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
61         ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
62
63         key = 1;
64         err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
65         ASSERT_ERR(err, "hashmap_lookup");
66
67         ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
68         ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
69         ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
70         ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
71         ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
72         ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
73 out:
74         free(percpu_valbuf);
75         for_each_hash_map_elem__destroy(skel);
76 }
77
78 static void test_array_map(void)
79 {
80         __u32 key, num_cpus, max_entries;
81         int i, err;
82         struct for_each_array_map_elem *skel;
83         __u64 *percpu_valbuf = NULL;
84         size_t percpu_val_sz;
85         __u64 val, expected_total;
86         LIBBPF_OPTS(bpf_test_run_opts, topts,
87                 .data_in = &pkt_v4,
88                 .data_size_in = sizeof(pkt_v4),
89                 .repeat = 1,
90         );
91
92         skel = for_each_array_map_elem__open_and_load();
93         if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
94                 return;
95
96         expected_total = 0;
97         max_entries = bpf_map__max_entries(skel->maps.arraymap);
98         for (i = 0; i < max_entries; i++) {
99                 key = i;
100                 val = i + 1;
101                 /* skip the last iteration for expected total */
102                 if (i != max_entries - 1)
103                         expected_total += val;
104                 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
105                                            &val, sizeof(val), BPF_ANY);
106                 if (!ASSERT_OK(err, "map_update"))
107                         goto out;
108         }
109
110         num_cpus = bpf_num_possible_cpus();
111         percpu_val_sz = sizeof(__u64) * num_cpus;
112         percpu_valbuf = malloc(percpu_val_sz);
113         if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
114                 goto out;
115
116         key = 0;
117         for (i = 0; i < num_cpus; i++)
118                 percpu_valbuf[i] = i + 1;
119         err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
120                                    percpu_valbuf, percpu_val_sz, BPF_ANY);
121         if (!ASSERT_OK(err, "percpu_map_update"))
122                 goto out;
123
124         err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
125         duration = topts.duration;
126         if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
127                   err, errno, topts.retval))
128                 goto out;
129
130         ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
131         ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
132
133 out:
134         free(percpu_valbuf);
135         for_each_array_map_elem__destroy(skel);
136 }
137
138 static void test_write_map_key(void)
139 {
140         struct for_each_map_elem_write_key *skel;
141
142         skel = for_each_map_elem_write_key__open_and_load();
143         if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
144                 for_each_map_elem_write_key__destroy(skel);
145 }
146
147 static void test_multi_maps(void)
148 {
149         struct for_each_multi_maps *skel;
150         __u64 val, array_total, hash_total;
151         __u32 key, max_entries;
152         int i, err;
153
154         LIBBPF_OPTS(bpf_test_run_opts, topts,
155                 .data_in = &pkt_v4,
156                 .data_size_in = sizeof(pkt_v4),
157                 .repeat = 1,
158         );
159
160         skel = for_each_multi_maps__open_and_load();
161         if (!ASSERT_OK_PTR(skel, "for_each_multi_maps__open_and_load"))
162                 return;
163
164         array_total = 0;
165         max_entries = bpf_map__max_entries(skel->maps.arraymap);
166         for (i = 0; i < max_entries; i++) {
167                 key = i;
168                 val = i + 1;
169                 array_total += val;
170                 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
171                                            &val, sizeof(val), BPF_ANY);
172                 if (!ASSERT_OK(err, "array_map_update"))
173                         goto out;
174         }
175
176         hash_total = 0;
177         max_entries = bpf_map__max_entries(skel->maps.hashmap);
178         for (i = 0; i < max_entries; i++) {
179                 key = i + 100;
180                 val = i + 1;
181                 hash_total += val;
182                 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
183                                            &val, sizeof(val), BPF_ANY);
184                 if (!ASSERT_OK(err, "hash_map_update"))
185                         goto out;
186         }
187
188         skel->bss->data_output = 0;
189         skel->bss->use_array = 1;
190         err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
191         ASSERT_OK(err, "bpf_prog_test_run_opts");
192         ASSERT_OK(topts.retval, "retval");
193         ASSERT_EQ(skel->bss->data_output, array_total, "array output");
194
195         skel->bss->data_output = 0;
196         skel->bss->use_array = 0;
197         err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
198         ASSERT_OK(err, "bpf_prog_test_run_opts");
199         ASSERT_OK(topts.retval, "retval");
200         ASSERT_EQ(skel->bss->data_output, hash_total, "hash output");
201
202 out:
203         for_each_multi_maps__destroy(skel);
204 }
205
206 void test_for_each(void)
207 {
208         if (test__start_subtest("hash_map"))
209                 test_hash_map();
210         if (test__start_subtest("array_map"))
211                 test_array_map();
212         if (test__start_subtest("write_map_key"))
213                 test_write_map_key();
214         if (test__start_subtest("multi_maps"))
215                 test_multi_maps();
216 }
This page took 0.042703 seconds and 4 git commands to generate.