]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2017 Netronome Systems, Inc. | |
3 | * | |
4 | * This software is licensed under the GNU General License Version 2, | |
5 | * June 1991 as shown in the file COPYING in the top-level directory of this | |
6 | * source tree. | |
7 | * | |
8 | * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" | |
9 | * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, | |
10 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
11 | * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE | |
12 | * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME | |
13 | * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. | |
14 | */ | |
15 | ||
16 | #include <linux/bpf.h> | |
17 | #include <linux/bpf_verifier.h> | |
18 | #include <linux/debugfs.h> | |
19 | #include <linux/kernel.h> | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/rtnetlink.h> | |
22 | #include <net/pkt_cls.h> | |
23 | ||
24 | #include "netdevsim.h" | |
25 | ||
26 | #define pr_vlog(env, fmt, ...) \ | |
27 | bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__) | |
28 | ||
29 | struct nsim_bpf_bound_prog { | |
30 | struct netdevsim *ns; | |
31 | struct bpf_prog *prog; | |
32 | struct dentry *ddir; | |
33 | const char *state; | |
34 | bool is_loaded; | |
35 | struct list_head l; | |
36 | }; | |
37 | ||
38 | #define NSIM_BPF_MAX_KEYS 2 | |
39 | ||
40 | struct nsim_bpf_bound_map { | |
41 | struct netdevsim *ns; | |
42 | struct bpf_offloaded_map *map; | |
43 | struct mutex mutex; | |
44 | struct nsim_map_entry { | |
45 | void *key; | |
46 | void *value; | |
47 | } entry[NSIM_BPF_MAX_KEYS]; | |
48 | struct list_head l; | |
49 | }; | |
50 | ||
51 | static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data) | |
52 | { | |
53 | const char **str = file->private; | |
54 | ||
55 | if (*str) | |
56 | seq_printf(file, "%s\n", *str); | |
57 | ||
58 | return 0; | |
59 | } | |
60 | ||
61 | static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f) | |
62 | { | |
63 | return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private); | |
64 | } | |
65 | ||
66 | static const struct file_operations nsim_bpf_string_fops = { | |
67 | .owner = THIS_MODULE, | |
68 | .open = nsim_debugfs_bpf_string_open, | |
69 | .release = single_release, | |
70 | .read = seq_read, | |
71 | .llseek = seq_lseek | |
72 | }; | |
73 | ||
74 | static int | |
75 | nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) | |
76 | { | |
77 | struct nsim_bpf_bound_prog *state; | |
78 | ||
79 | state = env->prog->aux->offload->dev_priv; | |
80 | if (state->ns->bpf_bind_verifier_delay && !insn_idx) | |
81 | msleep(state->ns->bpf_bind_verifier_delay); | |
82 | ||
83 | if (insn_idx == env->prog->len - 1) | |
84 | pr_vlog(env, "Hello from netdevsim!\n"); | |
85 | ||
86 | return 0; | |
87 | } | |
88 | ||
89 | static int nsim_bpf_finalize(struct bpf_verifier_env *env) | |
90 | { | |
91 | return 0; | |
92 | } | |
93 | ||
94 | static bool nsim_xdp_offload_active(struct netdevsim *ns) | |
95 | { | |
96 | return ns->xdp_hw.prog; | |
97 | } | |
98 | ||
99 | static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded) | |
100 | { | |
101 | struct nsim_bpf_bound_prog *state; | |
102 | ||
103 | if (!prog || !prog->aux->offload) | |
104 | return; | |
105 | ||
106 | state = prog->aux->offload->dev_priv; | |
107 | state->is_loaded = loaded; | |
108 | } | |
109 | ||
110 | static int | |
111 | nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog) | |
112 | { | |
113 | nsim_prog_set_loaded(ns->bpf_offloaded, false); | |
114 | ||
115 | WARN(!!ns->bpf_offloaded != oldprog, | |
116 | "bad offload state, expected offload %sto be active", | |
117 | oldprog ? "" : "not "); | |
118 | ns->bpf_offloaded = prog; | |
119 | ns->bpf_offloaded_id = prog ? prog->aux->id : 0; | |
120 | nsim_prog_set_loaded(prog, true); | |
121 | ||
122 | return 0; | |
123 | } | |
124 | ||
125 | int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, | |
126 | void *type_data, void *cb_priv) | |
127 | { | |
128 | struct tc_cls_bpf_offload *cls_bpf = type_data; | |
129 | struct bpf_prog *prog = cls_bpf->prog; | |
130 | struct netdevsim *ns = cb_priv; | |
131 | struct bpf_prog *oldprog; | |
132 | ||
133 | if (type != TC_SETUP_CLSBPF) { | |
134 | NSIM_EA(cls_bpf->common.extack, | |
135 | "only offload of BPF classifiers supported"); | |
136 | return -EOPNOTSUPP; | |
137 | } | |
138 | ||
139 | if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common)) | |
140 | return -EOPNOTSUPP; | |
141 | ||
142 | if (cls_bpf->common.protocol != htons(ETH_P_ALL)) { | |
143 | NSIM_EA(cls_bpf->common.extack, | |
144 | "only ETH_P_ALL supported as filter protocol"); | |
145 | return -EOPNOTSUPP; | |
146 | } | |
147 | ||
148 | if (!ns->bpf_tc_accept) { | |
149 | NSIM_EA(cls_bpf->common.extack, | |
150 | "netdevsim configured to reject BPF TC offload"); | |
151 | return -EOPNOTSUPP; | |
152 | } | |
153 | /* Note: progs without skip_sw will probably not be dev bound */ | |
154 | if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) { | |
155 | NSIM_EA(cls_bpf->common.extack, | |
156 | "netdevsim configured to reject unbound programs"); | |
157 | return -EOPNOTSUPP; | |
158 | } | |
159 | ||
160 | if (cls_bpf->command != TC_CLSBPF_OFFLOAD) | |
161 | return -EOPNOTSUPP; | |
162 | ||
163 | oldprog = cls_bpf->oldprog; | |
164 | ||
165 | /* Don't remove if oldprog doesn't match driver's state */ | |
166 | if (ns->bpf_offloaded != oldprog) { | |
167 | oldprog = NULL; | |
168 | if (!cls_bpf->prog) | |
169 | return 0; | |
170 | if (ns->bpf_offloaded) { | |
171 | NSIM_EA(cls_bpf->common.extack, | |
172 | "driver and netdev offload states mismatch"); | |
173 | return -EBUSY; | |
174 | } | |
175 | } | |
176 | ||
177 | return nsim_bpf_offload(ns, cls_bpf->prog, oldprog); | |
178 | } | |
179 | ||
180 | int nsim_bpf_disable_tc(struct netdevsim *ns) | |
181 | { | |
182 | if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns)) | |
183 | return -EBUSY; | |
184 | return 0; | |
185 | } | |
186 | ||
187 | static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf) | |
188 | { | |
189 | if (!nsim_xdp_offload_active(ns) && !bpf->prog) | |
190 | return 0; | |
191 | if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) { | |
192 | NSIM_EA(bpf->extack, "TC program is already loaded"); | |
193 | return -EBUSY; | |
194 | } | |
195 | ||
196 | return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns)); | |
197 | } | |
198 | ||
199 | static int | |
200 | nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf, | |
201 | struct xdp_attachment_info *xdp) | |
202 | { | |
203 | int err; | |
204 | ||
205 | if (!xdp_attachment_flags_ok(xdp, bpf)) | |
206 | return -EBUSY; | |
207 | ||
208 | if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) { | |
209 | NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS"); | |
210 | return -EOPNOTSUPP; | |
211 | } | |
212 | if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) { | |
213 | NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS"); | |
214 | return -EOPNOTSUPP; | |
215 | } | |
216 | ||
217 | if (bpf->command == XDP_SETUP_PROG_HW) { | |
218 | err = nsim_xdp_offload_prog(ns, bpf); | |
219 | if (err) | |
220 | return err; | |
221 | } | |
222 | ||
223 | xdp_attachment_setup(xdp, bpf); | |
224 | ||
225 | return 0; | |
226 | } | |
227 | ||
228 | static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) | |
229 | { | |
230 | struct nsim_bpf_bound_prog *state; | |
231 | char name[16]; | |
232 | ||
233 | state = kzalloc(sizeof(*state), GFP_KERNEL); | |
234 | if (!state) | |
235 | return -ENOMEM; | |
236 | ||
237 | state->ns = ns; | |
238 | state->prog = prog; | |
239 | state->state = "verify"; | |
240 | ||
241 | /* Program id is not populated yet when we create the state. */ | |
242 | sprintf(name, "%u", ns->sdev->prog_id_gen++); | |
243 | state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs); | |
244 | if (IS_ERR_OR_NULL(state->ddir)) { | |
245 | kfree(state); | |
246 | return -ENOMEM; | |
247 | } | |
248 | ||
249 | debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id); | |
250 | debugfs_create_file("state", 0400, state->ddir, | |
251 | &state->state, &nsim_bpf_string_fops); | |
252 | debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded); | |
253 | ||
254 | list_add_tail(&state->l, &ns->sdev->bpf_bound_progs); | |
255 | ||
256 | prog->aux->offload->dev_priv = state; | |
257 | ||
258 | return 0; | |
259 | } | |
260 | ||
261 | static int nsim_bpf_verifier_prep(struct bpf_prog *prog) | |
262 | { | |
263 | struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev); | |
264 | ||
265 | if (!ns->bpf_bind_accept) | |
266 | return -EOPNOTSUPP; | |
267 | ||
268 | return nsim_bpf_create_prog(ns, prog); | |
269 | } | |
270 | ||
271 | static int nsim_bpf_translate(struct bpf_prog *prog) | |
272 | { | |
273 | struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv; | |
274 | ||
275 | state->state = "xlated"; | |
276 | return 0; | |
277 | } | |
278 | ||
279 | static void nsim_bpf_destroy_prog(struct bpf_prog *prog) | |
280 | { | |
281 | struct nsim_bpf_bound_prog *state; | |
282 | ||
283 | state = prog->aux->offload->dev_priv; | |
284 | WARN(state->is_loaded, | |
285 | "offload state destroyed while program still bound"); | |
286 | debugfs_remove_recursive(state->ddir); | |
287 | list_del(&state->l); | |
288 | kfree(state); | |
289 | } | |
290 | ||
291 | static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = { | |
292 | .insn_hook = nsim_bpf_verify_insn, | |
293 | .finalize = nsim_bpf_finalize, | |
294 | .prepare = nsim_bpf_verifier_prep, | |
295 | .translate = nsim_bpf_translate, | |
296 | .destroy = nsim_bpf_destroy_prog, | |
297 | }; | |
298 | ||
299 | static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) | |
300 | { | |
301 | if (bpf->prog && bpf->prog->aux->offload) { | |
302 | NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv"); | |
303 | return -EINVAL; | |
304 | } | |
305 | if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) { | |
306 | NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled"); | |
307 | return -EINVAL; | |
308 | } | |
309 | return 0; | |
310 | } | |
311 | ||
312 | static int | |
313 | nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf) | |
314 | { | |
315 | struct nsim_bpf_bound_prog *state; | |
316 | ||
317 | if (!bpf->prog) | |
318 | return 0; | |
319 | ||
320 | if (!bpf->prog->aux->offload) { | |
321 | NSIM_EA(bpf->extack, "xdpoffload of non-bound program"); | |
322 | return -EINVAL; | |
323 | } | |
324 | if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) { | |
325 | NSIM_EA(bpf->extack, "program bound to different dev"); | |
326 | return -EINVAL; | |
327 | } | |
328 | ||
329 | state = bpf->prog->aux->offload->dev_priv; | |
330 | if (WARN_ON(strcmp(state->state, "xlated"))) { | |
331 | NSIM_EA(bpf->extack, "offloading program in bad state"); | |
332 | return -EINVAL; | |
333 | } | |
334 | return 0; | |
335 | } | |
336 | ||
337 | static bool | |
338 | nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key) | |
339 | { | |
340 | return e->key && !memcmp(key, e->key, map->key_size); | |
341 | } | |
342 | ||
343 | static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key) | |
344 | { | |
345 | struct nsim_bpf_bound_map *nmap = offmap->dev_priv; | |
346 | unsigned int i; | |
347 | ||
348 | for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) | |
349 | if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key)) | |
350 | return i; | |
351 | ||
352 | return -ENOENT; | |
353 | } | |
354 | ||
355 | static int | |
356 | nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx) | |
357 | { | |
358 | struct nsim_bpf_bound_map *nmap = offmap->dev_priv; | |
359 | ||
360 | nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER); | |
361 | if (!nmap->entry[idx].key) | |
362 | return -ENOMEM; | |
363 | nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER); | |
364 | if (!nmap->entry[idx].value) { | |
365 | kfree(nmap->entry[idx].key); | |
366 | nmap->entry[idx].key = NULL; | |
367 | return -ENOMEM; | |
368 | } | |
369 | ||
370 | return 0; | |
371 | } | |
372 | ||
373 | static int | |
374 | nsim_map_get_next_key(struct bpf_offloaded_map *offmap, | |
375 | void *key, void *next_key) | |
376 | { | |
377 | struct nsim_bpf_bound_map *nmap = offmap->dev_priv; | |
378 | int idx = -ENOENT; | |
379 | ||
380 | mutex_lock(&nmap->mutex); | |
381 | ||
382 | if (key) | |
383 | idx = nsim_map_key_find(offmap, key); | |
384 | if (idx == -ENOENT) | |
385 | idx = 0; | |
386 | else | |
387 | idx++; | |
388 | ||
389 | for (; idx < ARRAY_SIZE(nmap->entry); idx++) { | |
390 | if (nmap->entry[idx].key) { | |
391 | memcpy(next_key, nmap->entry[idx].key, | |
392 | offmap->map.key_size); | |
393 | break; | |
394 | } | |
395 | } | |
396 | ||
397 | mutex_unlock(&nmap->mutex); | |
398 | ||
399 | if (idx == ARRAY_SIZE(nmap->entry)) | |
400 | return -ENOENT; | |
401 | return 0; | |
402 | } | |
403 | ||
404 | static int | |
405 | nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value) | |
406 | { | |
407 | struct nsim_bpf_bound_map *nmap = offmap->dev_priv; | |
408 | int idx; | |
409 | ||
410 | mutex_lock(&nmap->mutex); | |
411 | ||
412 | idx = nsim_map_key_find(offmap, key); | |
413 | if (idx >= 0) | |
414 | memcpy(value, nmap->entry[idx].value, offmap->map.value_size); | |
415 | ||
416 | mutex_unlock(&nmap->mutex); | |
417 | ||
418 | return idx < 0 ? idx : 0; | |
419 | } | |
420 | ||
421 | static int | |
422 | nsim_map_update_elem(struct bpf_offloaded_map *offmap, | |
423 | void *key, void *value, u64 flags) | |
424 | { | |
425 | struct nsim_bpf_bound_map *nmap = offmap->dev_priv; | |
426 | int idx, err = 0; | |
427 | ||
428 | mutex_lock(&nmap->mutex); | |
429 | ||
430 | idx = nsim_map_key_find(offmap, key); | |
431 | if (idx < 0 && flags == BPF_EXIST) { | |
432 | err = idx; | |
433 | goto exit_unlock; | |
434 | } | |
435 | if (idx >= 0 && flags == BPF_NOEXIST) { | |
436 | err = -EEXIST; | |
437 | goto exit_unlock; | |
438 | } | |
439 | ||
440 | if (idx < 0) { | |
441 | for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++) | |
442 | if (!nmap->entry[idx].key) | |
443 | break; | |
444 | if (idx == ARRAY_SIZE(nmap->entry)) { | |
445 | err = -E2BIG; | |
446 | goto exit_unlock; | |
447 | } | |
448 | ||
449 | err = nsim_map_alloc_elem(offmap, idx); | |
450 | if (err) | |
451 | goto exit_unlock; | |
452 | } | |
453 | ||
454 | memcpy(nmap->entry[idx].key, key, offmap->map.key_size); | |
455 | memcpy(nmap->entry[idx].value, value, offmap->map.value_size); | |
456 | exit_unlock: | |
457 | mutex_unlock(&nmap->mutex); | |
458 | ||
459 | return err; | |
460 | } | |
461 | ||
462 | static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key) | |
463 | { | |
464 | struct nsim_bpf_bound_map *nmap = offmap->dev_priv; | |
465 | int idx; | |
466 | ||
467 | if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) | |
468 | return -EINVAL; | |
469 | ||
470 | mutex_lock(&nmap->mutex); | |
471 | ||
472 | idx = nsim_map_key_find(offmap, key); | |
473 | if (idx >= 0) { | |
474 | kfree(nmap->entry[idx].key); | |
475 | kfree(nmap->entry[idx].value); | |
476 | memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx])); | |
477 | } | |
478 | ||
479 | mutex_unlock(&nmap->mutex); | |
480 | ||
481 | return idx < 0 ? idx : 0; | |
482 | } | |
483 | ||
484 | static const struct bpf_map_dev_ops nsim_bpf_map_ops = { | |
485 | .map_get_next_key = nsim_map_get_next_key, | |
486 | .map_lookup_elem = nsim_map_lookup_elem, | |
487 | .map_update_elem = nsim_map_update_elem, | |
488 | .map_delete_elem = nsim_map_delete_elem, | |
489 | }; | |
490 | ||
491 | static int | |
492 | nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap) | |
493 | { | |
494 | struct nsim_bpf_bound_map *nmap; | |
495 | int i, err; | |
496 | ||
497 | if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY && | |
498 | offmap->map.map_type != BPF_MAP_TYPE_HASH)) | |
499 | return -EINVAL; | |
500 | if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS) | |
501 | return -ENOMEM; | |
502 | if (offmap->map.map_flags) | |
503 | return -EINVAL; | |
504 | ||
505 | nmap = kzalloc(sizeof(*nmap), GFP_USER); | |
506 | if (!nmap) | |
507 | return -ENOMEM; | |
508 | ||
509 | offmap->dev_priv = nmap; | |
510 | nmap->ns = ns; | |
511 | nmap->map = offmap; | |
512 | mutex_init(&nmap->mutex); | |
513 | ||
514 | if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) { | |
515 | for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) { | |
516 | u32 *key; | |
517 | ||
518 | err = nsim_map_alloc_elem(offmap, i); | |
519 | if (err) | |
520 | goto err_free; | |
521 | key = nmap->entry[i].key; | |
522 | *key = i; | |
523 | } | |
524 | } | |
525 | ||
526 | offmap->dev_ops = &nsim_bpf_map_ops; | |
527 | list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps); | |
528 | ||
529 | return 0; | |
530 | ||
531 | err_free: | |
532 | while (--i >= 0) { | |
533 | kfree(nmap->entry[i].key); | |
534 | kfree(nmap->entry[i].value); | |
535 | } | |
536 | kfree(nmap); | |
537 | return err; | |
538 | } | |
539 | ||
540 | static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap) | |
541 | { | |
542 | struct nsim_bpf_bound_map *nmap = offmap->dev_priv; | |
543 | unsigned int i; | |
544 | ||
545 | for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) { | |
546 | kfree(nmap->entry[i].key); | |
547 | kfree(nmap->entry[i].value); | |
548 | } | |
549 | list_del_init(&nmap->l); | |
550 | mutex_destroy(&nmap->mutex); | |
551 | kfree(nmap); | |
552 | } | |
553 | ||
554 | int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) | |
555 | { | |
556 | struct netdevsim *ns = netdev_priv(dev); | |
557 | int err; | |
558 | ||
559 | ASSERT_RTNL(); | |
560 | ||
561 | switch (bpf->command) { | |
562 | case XDP_QUERY_PROG: | |
563 | return xdp_attachment_query(&ns->xdp, bpf); | |
564 | case XDP_QUERY_PROG_HW: | |
565 | return xdp_attachment_query(&ns->xdp_hw, bpf); | |
566 | case XDP_SETUP_PROG: | |
567 | err = nsim_setup_prog_checks(ns, bpf); | |
568 | if (err) | |
569 | return err; | |
570 | ||
571 | return nsim_xdp_set_prog(ns, bpf, &ns->xdp); | |
572 | case XDP_SETUP_PROG_HW: | |
573 | err = nsim_setup_prog_hw_checks(ns, bpf); | |
574 | if (err) | |
575 | return err; | |
576 | ||
577 | return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw); | |
578 | case BPF_OFFLOAD_MAP_ALLOC: | |
579 | if (!ns->bpf_map_accept) | |
580 | return -EOPNOTSUPP; | |
581 | ||
582 | return nsim_bpf_map_alloc(ns, bpf->offmap); | |
583 | case BPF_OFFLOAD_MAP_FREE: | |
584 | nsim_bpf_map_free(bpf->offmap); | |
585 | return 0; | |
586 | default: | |
587 | return -EINVAL; | |
588 | } | |
589 | } | |
590 | ||
591 | int nsim_bpf_init(struct netdevsim *ns) | |
592 | { | |
593 | int err; | |
594 | ||
595 | if (ns->sdev->refcnt == 1) { | |
596 | INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs); | |
597 | INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps); | |
598 | ||
599 | ns->sdev->ddir_bpf_bound_progs = | |
600 | debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir); | |
601 | if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) | |
602 | return -ENOMEM; | |
603 | ||
604 | ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops); | |
605 | err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); | |
606 | if (err) | |
607 | return err; | |
608 | } | |
609 | ||
610 | err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev); | |
611 | if (err) | |
612 | goto err_destroy_bdev; | |
613 | ||
614 | debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir, | |
615 | &ns->bpf_offloaded_id); | |
616 | ||
617 | ns->bpf_bind_accept = true; | |
618 | debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir, | |
619 | &ns->bpf_bind_accept); | |
620 | debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir, | |
621 | &ns->bpf_bind_verifier_delay); | |
622 | ||
623 | ns->bpf_tc_accept = true; | |
624 | debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir, | |
625 | &ns->bpf_tc_accept); | |
626 | debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir, | |
627 | &ns->bpf_tc_non_bound_accept); | |
628 | ns->bpf_xdpdrv_accept = true; | |
629 | debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir, | |
630 | &ns->bpf_xdpdrv_accept); | |
631 | ns->bpf_xdpoffload_accept = true; | |
632 | debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir, | |
633 | &ns->bpf_xdpoffload_accept); | |
634 | ||
635 | ns->bpf_map_accept = true; | |
636 | debugfs_create_bool("bpf_map_accept", 0600, ns->ddir, | |
637 | &ns->bpf_map_accept); | |
638 | ||
639 | return 0; | |
640 | ||
641 | err_destroy_bdev: | |
642 | if (ns->sdev->refcnt == 1) | |
643 | bpf_offload_dev_destroy(ns->sdev->bpf_dev); | |
644 | return err; | |
645 | } | |
646 | ||
647 | void nsim_bpf_uninit(struct netdevsim *ns) | |
648 | { | |
649 | WARN_ON(ns->xdp.prog); | |
650 | WARN_ON(ns->xdp_hw.prog); | |
651 | WARN_ON(ns->bpf_offloaded); | |
652 | bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev); | |
653 | ||
654 | if (ns->sdev->refcnt == 1) { | |
655 | WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs)); | |
656 | WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps)); | |
657 | bpf_offload_dev_destroy(ns->sdev->bpf_dev); | |
658 | } | |
659 | } |