]> Git Repo - J-linux.git/blob - tools/testing/selftests/bpf/prog_tests/mmap.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / tools / testing / selftests / bpf / prog_tests / mmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <sys/mman.h>
4 #include "test_mmap.skel.h"
5
6 struct map_data {
7         __u64 val[512 * 4];
8 };
9
10 static size_t roundup_page(size_t sz)
11 {
12         long page_size = sysconf(_SC_PAGE_SIZE);
13         return (sz + page_size - 1) / page_size * page_size;
14 }
15
16 void test_mmap(void)
17 {
18         const size_t bss_sz = roundup_page(sizeof(struct test_mmap__bss));
19         const size_t map_sz = roundup_page(sizeof(struct map_data));
20         const int zero = 0, one = 1, two = 2, far = 1500;
21         const long page_size = sysconf(_SC_PAGE_SIZE);
22         int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd;
23         struct bpf_map *data_map, *bss_map;
24         void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2;
25         struct test_mmap__bss *bss_data;
26         struct bpf_map_info map_info;
27         __u32 map_info_sz = sizeof(map_info);
28         struct map_data *map_data;
29         struct test_mmap *skel;
30         __u64 val = 0;
31
32         skel = test_mmap__open();
33         if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
34                 return;
35
36         err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
37         if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
38                 goto cleanup;
39
40         /* at least 4 pages of data */
41         err = bpf_map__set_max_entries(skel->maps.data_map,
42                                        4 * (page_size / sizeof(u64)));
43         if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
44                 goto cleanup;
45
46         err = test_mmap__load(skel);
47         if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
48                 goto cleanup;
49
50         bss_map = skel->maps.bss;
51         data_map = skel->maps.data_map;
52         data_map_fd = bpf_map__fd(data_map);
53
54         rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
55         tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
56         if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
57                 munmap(tmp1, page_size);
58                 goto cleanup;
59         }
60         /* now double-check if it's mmap()'able at all */
61         tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
62         if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
63                 goto cleanup;
64
65         /* get map's ID */
66         memset(&map_info, 0, map_info_sz);
67         err = bpf_map_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
68         if (CHECK(err, "map_get_info", "failed %d\n", errno))
69                 goto cleanup;
70         data_map_id = map_info.id;
71
72         /* mmap BSS map */
73         bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
74                           bpf_map__fd(bss_map), 0);
75         if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
76                   ".bss mmap failed: %d\n", errno)) {
77                 bss_mmaped = NULL;
78                 goto cleanup;
79         }
80         /* map as R/W first */
81         map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
82                           data_map_fd, 0);
83         if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
84                   "data_map mmap failed: %d\n", errno)) {
85                 map_mmaped = NULL;
86                 goto cleanup;
87         }
88
89         bss_data = bss_mmaped;
90         map_data = map_mmaped;
91
92         CHECK_FAIL(bss_data->in_val);
93         CHECK_FAIL(bss_data->out_val);
94         CHECK_FAIL(skel->bss->in_val);
95         CHECK_FAIL(skel->bss->out_val);
96         CHECK_FAIL(map_data->val[0]);
97         CHECK_FAIL(map_data->val[1]);
98         CHECK_FAIL(map_data->val[2]);
99         CHECK_FAIL(map_data->val[far]);
100
101         err = test_mmap__attach(skel);
102         if (CHECK(err, "attach_raw_tp", "err %d\n", err))
103                 goto cleanup;
104
105         bss_data->in_val = 123;
106         val = 111;
107         CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0));
108
109         usleep(1);
110
111         CHECK_FAIL(bss_data->in_val != 123);
112         CHECK_FAIL(bss_data->out_val != 123);
113         CHECK_FAIL(skel->bss->in_val != 123);
114         CHECK_FAIL(skel->bss->out_val != 123);
115         CHECK_FAIL(map_data->val[0] != 111);
116         CHECK_FAIL(map_data->val[1] != 222);
117         CHECK_FAIL(map_data->val[2] != 123);
118         CHECK_FAIL(map_data->val[far] != 3 * 123);
119
120         CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val));
121         CHECK_FAIL(val != 111);
122         CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val));
123         CHECK_FAIL(val != 222);
124         CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val));
125         CHECK_FAIL(val != 123);
126         CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val));
127         CHECK_FAIL(val != 3 * 123);
128
129         /* data_map freeze should fail due to R/W mmap() */
130         err = bpf_map_freeze(data_map_fd);
131         if (CHECK(!err || errno != EBUSY, "no_freeze",
132                   "data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
133                 goto cleanup;
134
135         err = mprotect(map_mmaped, map_sz, PROT_READ);
136         if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
137                 goto cleanup;
138
139         /* unmap R/W mapping */
140         err = munmap(map_mmaped, map_sz);
141         map_mmaped = NULL;
142         if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno))
143                 goto cleanup;
144
145         /* re-map as R/O now */
146         map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
147         if (CHECK(map_mmaped == MAP_FAILED, "data_mmap",
148                   "data_map R/O mmap failed: %d\n", errno)) {
149                 map_mmaped = NULL;
150                 goto cleanup;
151         }
152         err = mprotect(map_mmaped, map_sz, PROT_WRITE);
153         if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
154                 goto cleanup;
155         err = mprotect(map_mmaped, map_sz, PROT_EXEC);
156         if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
157                 goto cleanup;
158         map_data = map_mmaped;
159
160         /* map/unmap in a loop to test ref counting */
161         for (i = 0; i < 10; i++) {
162                 int flags = i % 2 ? PROT_READ : PROT_WRITE;
163                 void *p;
164
165                 p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0);
166                 if (CHECK_FAIL(p == MAP_FAILED))
167                         goto cleanup;
168                 err = munmap(p, map_sz);
169                 if (CHECK_FAIL(err))
170                         goto cleanup;
171         }
172
173         /* data_map freeze should now succeed due to no R/W mapping */
174         err = bpf_map_freeze(data_map_fd);
175         if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n",
176                   err, errno))
177                 goto cleanup;
178
179         /* mapping as R/W now should fail */
180         tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
181                     data_map_fd, 0);
182         if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) {
183                 munmap(tmp1, map_sz);
184                 goto cleanup;
185         }
186
187         bss_data->in_val = 321;
188         usleep(1);
189         CHECK_FAIL(bss_data->in_val != 321);
190         CHECK_FAIL(bss_data->out_val != 321);
191         CHECK_FAIL(skel->bss->in_val != 321);
192         CHECK_FAIL(skel->bss->out_val != 321);
193         CHECK_FAIL(map_data->val[0] != 111);
194         CHECK_FAIL(map_data->val[1] != 222);
195         CHECK_FAIL(map_data->val[2] != 321);
196         CHECK_FAIL(map_data->val[far] != 3 * 321);
197
198         /* check some more advanced mmap() manipulations */
199
200         tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS,
201                           -1, 0);
202         if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno))
203                 goto cleanup;
204
205         /* map all but last page: pages 1-3 mapped */
206         tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
207                           data_map_fd, 0);
208         if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) {
209                 munmap(tmp0, 4 * page_size);
210                 goto cleanup;
211         }
212
213         /* unmap second page: pages 1, 3 mapped */
214         err = munmap(tmp1 + page_size, page_size);
215         if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) {
216                 munmap(tmp1, 4 * page_size);
217                 goto cleanup;
218         }
219
220         /* map page 2 back */
221         tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ,
222                     MAP_SHARED | MAP_FIXED, data_map_fd, 0);
223         if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) {
224                 munmap(tmp1, page_size);
225                 munmap(tmp1 + 2*page_size, 2 * page_size);
226                 goto cleanup;
227         }
228         CHECK(tmp1 + page_size != tmp2, "adv_mmap4",
229               "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
230
231         /* re-map all 4 pages */
232         tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
233                     data_map_fd, 0);
234         if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) {
235                 munmap(tmp1, 4 * page_size); /* unmap page 1 */
236                 goto cleanup;
237         }
238         CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2);
239
240         map_data = tmp2;
241         CHECK_FAIL(bss_data->in_val != 321);
242         CHECK_FAIL(bss_data->out_val != 321);
243         CHECK_FAIL(skel->bss->in_val != 321);
244         CHECK_FAIL(skel->bss->out_val != 321);
245         CHECK_FAIL(map_data->val[0] != 111);
246         CHECK_FAIL(map_data->val[1] != 222);
247         CHECK_FAIL(map_data->val[2] != 321);
248         CHECK_FAIL(map_data->val[far] != 3 * 321);
249
250         munmap(tmp2, 4 * page_size);
251
252         /* map all 4 pages, but with pg_off=1 page, should fail */
253         tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
254                     data_map_fd, page_size /* initial page shift */);
255         if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
256                 munmap(tmp1, 4 * page_size);
257                 goto cleanup;
258         }
259
260         tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
261         if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
262                 goto cleanup;
263
264         test_mmap__destroy(skel);
265         skel = NULL;
266         CHECK_FAIL(munmap(bss_mmaped, bss_sz));
267         bss_mmaped = NULL;
268         CHECK_FAIL(munmap(map_mmaped, map_sz));
269         map_mmaped = NULL;
270
271         /* map should be still held by active mmap */
272         tmp_fd = bpf_map_get_fd_by_id(data_map_id);
273         if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
274                 munmap(tmp1, map_sz);
275                 goto cleanup;
276         }
277         close(tmp_fd);
278
279         /* this should release data map finally */
280         munmap(tmp1, map_sz);
281
282         /* we need to wait for RCU grace period */
283         for (i = 0; i < 10000; i++) {
284                 __u32 id = data_map_id - 1;
285                 if (bpf_map_get_next_id(id, &id) || id > data_map_id)
286                         break;
287                 usleep(1);
288         }
289
290         /* should fail to get map FD by non-existing ID */
291         tmp_fd = bpf_map_get_fd_by_id(data_map_id);
292         if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
293                   "unexpectedly succeeded %d\n", tmp_fd)) {
294                 close(tmp_fd);
295                 goto cleanup;
296         }
297
298 cleanup:
299         if (bss_mmaped)
300                 CHECK_FAIL(munmap(bss_mmaped, bss_sz));
301         if (map_mmaped)
302                 CHECK_FAIL(munmap(map_mmaped, map_sz));
303         test_mmap__destroy(skel);
304 }
This page took 0.044445 seconds and 4 git commands to generate.