]>
Commit | Line | Data |
---|---|---|
f85d2086 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
30070984 DM |
2 | /* |
3 | * Functions to manage eBPF programs attached to cgroups | |
4 | * | |
5 | * Copyright (c) 2016 Daniel Mack | |
30070984 DM |
6 | */ |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/atomic.h> | |
10 | #include <linux/cgroup.h> | |
7b146ceb | 11 | #include <linux/filter.h> |
30070984 | 12 | #include <linux/slab.h> |
7b146ceb | 13 | #include <linux/sysctl.h> |
808649fb | 14 | #include <linux/string.h> |
30070984 DM |
15 | #include <linux/bpf.h> |
16 | #include <linux/bpf-cgroup.h> | |
17 | #include <net/sock.h> | |
0d01da6a | 18 | #include <net/bpf_sk_storage.h> |
30070984 | 19 | |
e5c891a3 RG |
20 | #include "../cgroup/cgroup-internal.h" |
21 | ||
30070984 DM |
22 | DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); |
23 | EXPORT_SYMBOL(cgroup_bpf_enabled_key); | |
24 | ||
4bfc0bb2 RG |
25 | void cgroup_bpf_offline(struct cgroup *cgrp) |
26 | { | |
27 | cgroup_get(cgrp); | |
28 | percpu_ref_kill(&cgrp->bpf.refcnt); | |
29 | } | |
30 | ||
00c4eddf AN |
31 | static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) |
32 | { | |
33 | enum bpf_cgroup_storage_type stype; | |
34 | ||
35 | for_each_cgroup_storage_type(stype) | |
36 | bpf_cgroup_storage_free(storages[stype]); | |
37 | } | |
38 | ||
39 | static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], | |
7d9c3427 YZ |
40 | struct bpf_cgroup_storage *new_storages[], |
41 | enum bpf_attach_type type, | |
42 | struct bpf_prog *prog, | |
43 | struct cgroup *cgrp) | |
00c4eddf AN |
44 | { |
45 | enum bpf_cgroup_storage_type stype; | |
7d9c3427 YZ |
46 | struct bpf_cgroup_storage_key key; |
47 | struct bpf_map *map; | |
48 | ||
49 | key.cgroup_inode_id = cgroup_id(cgrp); | |
50 | key.attach_type = type; | |
00c4eddf AN |
51 | |
52 | for_each_cgroup_storage_type(stype) { | |
7d9c3427 YZ |
53 | map = prog->aux->cgroup_storage[stype]; |
54 | if (!map) | |
55 | continue; | |
56 | ||
57 | storages[stype] = cgroup_storage_lookup((void *)map, &key, false); | |
58 | if (storages[stype]) | |
59 | continue; | |
60 | ||
00c4eddf AN |
61 | storages[stype] = bpf_cgroup_storage_alloc(prog, stype); |
62 | if (IS_ERR(storages[stype])) { | |
7d9c3427 | 63 | bpf_cgroup_storages_free(new_storages); |
00c4eddf AN |
64 | return -ENOMEM; |
65 | } | |
7d9c3427 YZ |
66 | |
67 | new_storages[stype] = storages[stype]; | |
00c4eddf AN |
68 | } |
69 | ||
70 | return 0; | |
71 | } | |
72 | ||
73 | static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], | |
74 | struct bpf_cgroup_storage *src[]) | |
75 | { | |
76 | enum bpf_cgroup_storage_type stype; | |
77 | ||
78 | for_each_cgroup_storage_type(stype) | |
79 | dst[stype] = src[stype]; | |
80 | } | |
81 | ||
82 | static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], | |
7d9c3427 | 83 | struct cgroup *cgrp, |
00c4eddf AN |
84 | enum bpf_attach_type attach_type) |
85 | { | |
86 | enum bpf_cgroup_storage_type stype; | |
87 | ||
88 | for_each_cgroup_storage_type(stype) | |
89 | bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); | |
90 | } | |
91 | ||
af6eea57 AN |
92 | /* Called when bpf_cgroup_link is auto-detached from dying cgroup. |
93 | * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It | |
94 | * doesn't free link memory, which will eventually be done by bpf_link's | |
95 | * release() callback, when its last FD is closed. | |
96 | */ | |
97 | static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) | |
98 | { | |
99 | cgroup_put(link->cgroup); | |
100 | link->cgroup = NULL; | |
101 | } | |
102 | ||
30070984 | 103 | /** |
4bfc0bb2 RG |
104 | * cgroup_bpf_release() - put references of all bpf programs and |
105 | * release all cgroup bpf data | |
106 | * @work: work structure embedded into the cgroup to modify | |
30070984 | 107 | */ |
4bfc0bb2 | 108 | static void cgroup_bpf_release(struct work_struct *work) |
30070984 | 109 | { |
e10360f8 RG |
110 | struct cgroup *p, *cgrp = container_of(work, struct cgroup, |
111 | bpf.release_work); | |
dbcc1ba2 | 112 | struct bpf_prog_array *old_array; |
7d9c3427 YZ |
113 | struct list_head *storages = &cgrp->bpf.storages; |
114 | struct bpf_cgroup_storage *storage, *stmp; | |
115 | ||
30070984 DM |
116 | unsigned int type; |
117 | ||
e5c891a3 RG |
118 | mutex_lock(&cgroup_mutex); |
119 | ||
324bda9e AS |
120 | for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { |
121 | struct list_head *progs = &cgrp->bpf.progs[type]; | |
7d9c3427 | 122 | struct bpf_prog_list *pl, *pltmp; |
324bda9e | 123 | |
7d9c3427 | 124 | list_for_each_entry_safe(pl, pltmp, progs, node) { |
324bda9e | 125 | list_del(&pl->node); |
af6eea57 AN |
126 | if (pl->prog) |
127 | bpf_prog_put(pl->prog); | |
128 | if (pl->link) | |
129 | bpf_cgroup_link_auto_detach(pl->link); | |
324bda9e | 130 | kfree(pl); |
30070984 DM |
131 | static_branch_dec(&cgroup_bpf_enabled_key); |
132 | } | |
dbcc1ba2 SF |
133 | old_array = rcu_dereference_protected( |
134 | cgrp->bpf.effective[type], | |
e5c891a3 | 135 | lockdep_is_held(&cgroup_mutex)); |
dbcc1ba2 | 136 | bpf_prog_array_free(old_array); |
324bda9e | 137 | } |
4bfc0bb2 | 138 | |
7d9c3427 YZ |
139 | list_for_each_entry_safe(storage, stmp, storages, list_cg) { |
140 | bpf_cgroup_storage_unlink(storage); | |
141 | bpf_cgroup_storage_free(storage); | |
142 | } | |
143 | ||
e5c891a3 RG |
144 | mutex_unlock(&cgroup_mutex); |
145 | ||
e10360f8 RG |
146 | for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) |
147 | cgroup_bpf_put(p); | |
148 | ||
4bfc0bb2 RG |
149 | percpu_ref_exit(&cgrp->bpf.refcnt); |
150 | cgroup_put(cgrp); | |
151 | } | |
152 | ||
153 | /** | |
154 | * cgroup_bpf_release_fn() - callback used to schedule releasing | |
155 | * of bpf cgroup data | |
156 | * @ref: percpu ref counter structure | |
157 | */ | |
158 | static void cgroup_bpf_release_fn(struct percpu_ref *ref) | |
159 | { | |
160 | struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); | |
161 | ||
162 | INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); | |
163 | queue_work(system_wq, &cgrp->bpf.release_work); | |
324bda9e AS |
164 | } |
165 | ||
af6eea57 AN |
166 | /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through |
167 | * link or direct prog. | |
168 | */ | |
169 | static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) | |
170 | { | |
171 | if (pl->prog) | |
172 | return pl->prog; | |
173 | if (pl->link) | |
174 | return pl->link->link.prog; | |
175 | return NULL; | |
176 | } | |
177 | ||
324bda9e AS |
178 | /* count number of elements in the list. |
179 | * it's slow but the list cannot be long | |
180 | */ | |
181 | static u32 prog_list_length(struct list_head *head) | |
182 | { | |
183 | struct bpf_prog_list *pl; | |
184 | u32 cnt = 0; | |
185 | ||
186 | list_for_each_entry(pl, head, node) { | |
af6eea57 | 187 | if (!prog_list_prog(pl)) |
324bda9e AS |
188 | continue; |
189 | cnt++; | |
30070984 | 190 | } |
324bda9e AS |
191 | return cnt; |
192 | } | |
193 | ||
194 | /* if parent has non-overridable prog attached, | |
195 | * disallow attaching new programs to the descendent cgroup. | |
196 | * if parent has overridable or multi-prog, allow attaching | |
197 | */ | |
198 | static bool hierarchy_allows_attach(struct cgroup *cgrp, | |
9fab329d | 199 | enum bpf_attach_type type) |
324bda9e AS |
200 | { |
201 | struct cgroup *p; | |
202 | ||
203 | p = cgroup_parent(cgrp); | |
204 | if (!p) | |
205 | return true; | |
206 | do { | |
207 | u32 flags = p->bpf.flags[type]; | |
208 | u32 cnt; | |
209 | ||
210 | if (flags & BPF_F_ALLOW_MULTI) | |
211 | return true; | |
212 | cnt = prog_list_length(&p->bpf.progs[type]); | |
213 | WARN_ON_ONCE(cnt > 1); | |
214 | if (cnt == 1) | |
215 | return !!(flags & BPF_F_ALLOW_OVERRIDE); | |
216 | p = cgroup_parent(p); | |
217 | } while (p); | |
218 | return true; | |
219 | } | |
220 | ||
221 | /* compute a chain of effective programs for a given cgroup: | |
222 | * start from the list of programs in this cgroup and add | |
223 | * all parent programs. | |
224 | * Note that parent's F_ALLOW_OVERRIDE-type program is yielding | |
225 | * to programs in this cgroup | |
226 | */ | |
227 | static int compute_effective_progs(struct cgroup *cgrp, | |
228 | enum bpf_attach_type type, | |
dbcc1ba2 | 229 | struct bpf_prog_array **array) |
324bda9e | 230 | { |
00c4eddf | 231 | struct bpf_prog_array_item *item; |
3960f4fd | 232 | struct bpf_prog_array *progs; |
324bda9e AS |
233 | struct bpf_prog_list *pl; |
234 | struct cgroup *p = cgrp; | |
235 | int cnt = 0; | |
236 | ||
237 | /* count number of effective programs by walking parents */ | |
238 | do { | |
239 | if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) | |
240 | cnt += prog_list_length(&p->bpf.progs[type]); | |
241 | p = cgroup_parent(p); | |
242 | } while (p); | |
243 | ||
244 | progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); | |
245 | if (!progs) | |
246 | return -ENOMEM; | |
247 | ||
248 | /* populate the array with effective progs */ | |
249 | cnt = 0; | |
250 | p = cgrp; | |
251 | do { | |
394e40a2 RG |
252 | if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) |
253 | continue; | |
254 | ||
255 | list_for_each_entry(pl, &p->bpf.progs[type], node) { | |
af6eea57 | 256 | if (!prog_list_prog(pl)) |
394e40a2 RG |
257 | continue; |
258 | ||
00c4eddf | 259 | item = &progs->items[cnt]; |
af6eea57 | 260 | item->prog = prog_list_prog(pl); |
00c4eddf AN |
261 | bpf_cgroup_storages_assign(item->cgroup_storage, |
262 | pl->storage); | |
394e40a2 RG |
263 | cnt++; |
264 | } | |
265 | } while ((p = cgroup_parent(p))); | |
324bda9e | 266 | |
dbcc1ba2 | 267 | *array = progs; |
324bda9e AS |
268 | return 0; |
269 | } | |
270 | ||
271 | static void activate_effective_progs(struct cgroup *cgrp, | |
272 | enum bpf_attach_type type, | |
dbcc1ba2 | 273 | struct bpf_prog_array *old_array) |
324bda9e | 274 | { |
6092f726 PM |
275 | old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array, |
276 | lockdep_is_held(&cgroup_mutex)); | |
324bda9e AS |
277 | /* free prog array after grace period, since __cgroup_bpf_run_*() |
278 | * might be still walking the array | |
279 | */ | |
280 | bpf_prog_array_free(old_array); | |
30070984 DM |
281 | } |
282 | ||
283 | /** | |
284 | * cgroup_bpf_inherit() - inherit effective programs from parent | |
285 | * @cgrp: the cgroup to modify | |
30070984 | 286 | */ |
324bda9e | 287 | int cgroup_bpf_inherit(struct cgroup *cgrp) |
30070984 | 288 | { |
324bda9e AS |
289 | /* has to use marco instead of const int, since compiler thinks |
290 | * that array below is variable length | |
291 | */ | |
292 | #define NR ARRAY_SIZE(cgrp->bpf.effective) | |
dbcc1ba2 | 293 | struct bpf_prog_array *arrays[NR] = {}; |
e10360f8 | 294 | struct cgroup *p; |
4bfc0bb2 RG |
295 | int ret, i; |
296 | ||
297 | ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, | |
298 | GFP_KERNEL); | |
299 | if (ret) | |
300 | return ret; | |
30070984 | 301 | |
e10360f8 RG |
302 | for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) |
303 | cgroup_bpf_get(p); | |
304 | ||
324bda9e AS |
305 | for (i = 0; i < NR; i++) |
306 | INIT_LIST_HEAD(&cgrp->bpf.progs[i]); | |
30070984 | 307 | |
7d9c3427 YZ |
308 | INIT_LIST_HEAD(&cgrp->bpf.storages); |
309 | ||
324bda9e AS |
310 | for (i = 0; i < NR; i++) |
311 | if (compute_effective_progs(cgrp, i, &arrays[i])) | |
312 | goto cleanup; | |
313 | ||
314 | for (i = 0; i < NR; i++) | |
315 | activate_effective_progs(cgrp, i, arrays[i]); | |
316 | ||
317 | return 0; | |
318 | cleanup: | |
319 | for (i = 0; i < NR; i++) | |
320 | bpf_prog_array_free(arrays[i]); | |
4bfc0bb2 | 321 | |
1d8006ab AN |
322 | for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) |
323 | cgroup_bpf_put(p); | |
324 | ||
4bfc0bb2 RG |
325 | percpu_ref_exit(&cgrp->bpf.refcnt); |
326 | ||
324bda9e | 327 | return -ENOMEM; |
30070984 DM |
328 | } |
329 | ||
85fc4b16 RG |
330 | static int update_effective_progs(struct cgroup *cgrp, |
331 | enum bpf_attach_type type) | |
332 | { | |
333 | struct cgroup_subsys_state *css; | |
334 | int err; | |
335 | ||
336 | /* allocate and recompute effective prog arrays */ | |
337 | css_for_each_descendant_pre(css, &cgrp->self) { | |
338 | struct cgroup *desc = container_of(css, struct cgroup, self); | |
339 | ||
e5c891a3 RG |
340 | if (percpu_ref_is_zero(&desc->bpf.refcnt)) |
341 | continue; | |
342 | ||
85fc4b16 RG |
343 | err = compute_effective_progs(desc, type, &desc->bpf.inactive); |
344 | if (err) | |
345 | goto cleanup; | |
346 | } | |
347 | ||
348 | /* all allocations were successful. Activate all prog arrays */ | |
349 | css_for_each_descendant_pre(css, &cgrp->self) { | |
350 | struct cgroup *desc = container_of(css, struct cgroup, self); | |
351 | ||
e5c891a3 RG |
352 | if (percpu_ref_is_zero(&desc->bpf.refcnt)) { |
353 | if (unlikely(desc->bpf.inactive)) { | |
354 | bpf_prog_array_free(desc->bpf.inactive); | |
355 | desc->bpf.inactive = NULL; | |
356 | } | |
357 | continue; | |
358 | } | |
359 | ||
85fc4b16 RG |
360 | activate_effective_progs(desc, type, desc->bpf.inactive); |
361 | desc->bpf.inactive = NULL; | |
362 | } | |
363 | ||
364 | return 0; | |
365 | ||
366 | cleanup: | |
367 | /* oom while computing effective. Free all computed effective arrays | |
368 | * since they were not activated | |
369 | */ | |
370 | css_for_each_descendant_pre(css, &cgrp->self) { | |
371 | struct cgroup *desc = container_of(css, struct cgroup, self); | |
372 | ||
373 | bpf_prog_array_free(desc->bpf.inactive); | |
374 | desc->bpf.inactive = NULL; | |
375 | } | |
376 | ||
377 | return err; | |
378 | } | |
379 | ||
324bda9e AS |
380 | #define BPF_CGROUP_MAX_PROGS 64 |
381 | ||
af6eea57 AN |
382 | static struct bpf_prog_list *find_attach_entry(struct list_head *progs, |
383 | struct bpf_prog *prog, | |
384 | struct bpf_cgroup_link *link, | |
385 | struct bpf_prog *replace_prog, | |
386 | bool allow_multi) | |
387 | { | |
388 | struct bpf_prog_list *pl; | |
389 | ||
390 | /* single-attach case */ | |
391 | if (!allow_multi) { | |
392 | if (list_empty(progs)) | |
393 | return NULL; | |
394 | return list_first_entry(progs, typeof(*pl), node); | |
395 | } | |
396 | ||
397 | list_for_each_entry(pl, progs, node) { | |
248e00ac | 398 | if (prog && pl->prog == prog && prog != replace_prog) |
af6eea57 AN |
399 | /* disallow attaching the same prog twice */ |
400 | return ERR_PTR(-EINVAL); | |
401 | if (link && pl->link == link) | |
402 | /* disallow attaching the same link twice */ | |
403 | return ERR_PTR(-EINVAL); | |
404 | } | |
405 | ||
406 | /* direct prog multi-attach w/ replacement case */ | |
407 | if (replace_prog) { | |
408 | list_for_each_entry(pl, progs, node) { | |
409 | if (pl->prog == replace_prog) | |
410 | /* a match found */ | |
411 | return pl; | |
412 | } | |
413 | /* prog to replace not found for cgroup */ | |
414 | return ERR_PTR(-ENOENT); | |
415 | } | |
416 | ||
417 | return NULL; | |
418 | } | |
419 | ||
30070984 | 420 | /** |
af6eea57 | 421 | * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and |
30070984 DM |
422 | * propagate the change to descendants |
423 | * @cgrp: The cgroup which descendants to traverse | |
324bda9e | 424 | * @prog: A program to attach |
af6eea57 | 425 | * @link: A link to attach |
7dd68b32 | 426 | * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set |
324bda9e | 427 | * @type: Type of attach operation |
1832f4ef | 428 | * @flags: Option flags |
30070984 | 429 | * |
af6eea57 | 430 | * Exactly one of @prog or @link can be non-null. |
30070984 DM |
431 | * Must be called with cgroup_mutex held. |
432 | */ | |
af6eea57 AN |
433 | int __cgroup_bpf_attach(struct cgroup *cgrp, |
434 | struct bpf_prog *prog, struct bpf_prog *replace_prog, | |
435 | struct bpf_cgroup_link *link, | |
324bda9e | 436 | enum bpf_attach_type type, u32 flags) |
30070984 | 437 | { |
7dd68b32 | 438 | u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); |
324bda9e AS |
439 | struct list_head *progs = &cgrp->bpf.progs[type]; |
440 | struct bpf_prog *old_prog = NULL; | |
62039c30 | 441 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; |
7d9c3427 | 442 | struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; |
af6eea57 | 443 | struct bpf_prog_list *pl; |
324bda9e AS |
444 | int err; |
445 | ||
7dd68b32 AI |
446 | if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || |
447 | ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) | |
324bda9e AS |
448 | /* invalid combination */ |
449 | return -EINVAL; | |
af6eea57 AN |
450 | if (link && (prog || replace_prog)) |
451 | /* only either link or prog/replace_prog can be specified */ | |
452 | return -EINVAL; | |
453 | if (!!replace_prog != !!(flags & BPF_F_REPLACE)) | |
454 | /* replace_prog implies BPF_F_REPLACE, and vice versa */ | |
455 | return -EINVAL; | |
324bda9e | 456 | |
9fab329d | 457 | if (!hierarchy_allows_attach(cgrp, type)) |
7f677633 AS |
458 | return -EPERM; |
459 | ||
7dd68b32 | 460 | if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) |
324bda9e AS |
461 | /* Disallow attaching non-overridable on top |
462 | * of existing overridable in this cgroup. | |
463 | * Disallow attaching multi-prog if overridable or none | |
7f677633 AS |
464 | */ |
465 | return -EPERM; | |
466 | ||
324bda9e AS |
467 | if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) |
468 | return -E2BIG; | |
469 | ||
af6eea57 AN |
470 | pl = find_attach_entry(progs, prog, link, replace_prog, |
471 | flags & BPF_F_ALLOW_MULTI); | |
472 | if (IS_ERR(pl)) | |
473 | return PTR_ERR(pl); | |
1020c1f2 | 474 | |
7d9c3427 YZ |
475 | if (bpf_cgroup_storages_alloc(storage, new_storage, type, |
476 | prog ? : link->link.prog, cgrp)) | |
00c4eddf | 477 | return -ENOMEM; |
d7bf2c10 | 478 | |
af6eea57 | 479 | if (pl) { |
1020c1f2 | 480 | old_prog = pl->prog; |
1020c1f2 | 481 | } else { |
324bda9e | 482 | pl = kmalloc(sizeof(*pl), GFP_KERNEL); |
d7bf2c10 | 483 | if (!pl) { |
7d9c3427 | 484 | bpf_cgroup_storages_free(new_storage); |
324bda9e | 485 | return -ENOMEM; |
d7bf2c10 | 486 | } |
324bda9e | 487 | list_add_tail(&pl->node, progs); |
7f677633 | 488 | } |
30070984 | 489 | |
1020c1f2 | 490 | pl->prog = prog; |
af6eea57 | 491 | pl->link = link; |
00c4eddf | 492 | bpf_cgroup_storages_assign(pl->storage, storage); |
7dd68b32 | 493 | cgrp->bpf.flags[type] = saved_flags; |
7f677633 | 494 | |
85fc4b16 RG |
495 | err = update_effective_progs(cgrp, type); |
496 | if (err) | |
497 | goto cleanup; | |
324bda9e | 498 | |
af6eea57 | 499 | if (old_prog) |
30070984 | 500 | bpf_prog_put(old_prog); |
af6eea57 AN |
501 | else |
502 | static_branch_inc(&cgroup_bpf_enabled_key); | |
7d9c3427 | 503 | bpf_cgroup_storages_link(new_storage, cgrp, type); |
7f677633 | 504 | return 0; |
324bda9e AS |
505 | |
506 | cleanup: | |
af6eea57 AN |
507 | if (old_prog) { |
508 | pl->prog = old_prog; | |
509 | pl->link = NULL; | |
8bad74f9 | 510 | } |
7d9c3427 | 511 | bpf_cgroup_storages_free(new_storage); |
af6eea57 | 512 | if (!old_prog) { |
324bda9e AS |
513 | list_del(&pl->node); |
514 | kfree(pl); | |
515 | } | |
516 | return err; | |
517 | } | |
518 | ||
0c991ebc AN |
519 | /* Swap updated BPF program for given link in effective program arrays across |
520 | * all descendant cgroups. This function is guaranteed to succeed. | |
521 | */ | |
522 | static void replace_effective_prog(struct cgroup *cgrp, | |
523 | enum bpf_attach_type type, | |
524 | struct bpf_cgroup_link *link) | |
525 | { | |
526 | struct bpf_prog_array_item *item; | |
527 | struct cgroup_subsys_state *css; | |
528 | struct bpf_prog_array *progs; | |
529 | struct bpf_prog_list *pl; | |
530 | struct list_head *head; | |
531 | struct cgroup *cg; | |
532 | int pos; | |
533 | ||
534 | css_for_each_descendant_pre(css, &cgrp->self) { | |
535 | struct cgroup *desc = container_of(css, struct cgroup, self); | |
536 | ||
537 | if (percpu_ref_is_zero(&desc->bpf.refcnt)) | |
538 | continue; | |
539 | ||
540 | /* find position of link in effective progs array */ | |
541 | for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { | |
542 | if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) | |
543 | continue; | |
544 | ||
545 | head = &cg->bpf.progs[type]; | |
546 | list_for_each_entry(pl, head, node) { | |
547 | if (!prog_list_prog(pl)) | |
548 | continue; | |
549 | if (pl->link == link) | |
550 | goto found; | |
551 | pos++; | |
552 | } | |
553 | } | |
554 | found: | |
555 | BUG_ON(!cg); | |
556 | progs = rcu_dereference_protected( | |
557 | desc->bpf.effective[type], | |
558 | lockdep_is_held(&cgroup_mutex)); | |
559 | item = &progs->items[pos]; | |
560 | WRITE_ONCE(item->prog, link->link.prog); | |
561 | } | |
562 | } | |
563 | ||
564 | /** | |
565 | * __cgroup_bpf_replace() - Replace link's program and propagate the change | |
566 | * to descendants | |
567 | * @cgrp: The cgroup which descendants to traverse | |
568 | * @link: A link for which to replace BPF program | |
569 | * @type: Type of attach operation | |
570 | * | |
571 | * Must be called with cgroup_mutex held. | |
572 | */ | |
f9d04127 AN |
573 | static int __cgroup_bpf_replace(struct cgroup *cgrp, |
574 | struct bpf_cgroup_link *link, | |
575 | struct bpf_prog *new_prog) | |
0c991ebc AN |
576 | { |
577 | struct list_head *progs = &cgrp->bpf.progs[link->type]; | |
578 | struct bpf_prog *old_prog; | |
579 | struct bpf_prog_list *pl; | |
580 | bool found = false; | |
581 | ||
582 | if (link->link.prog->type != new_prog->type) | |
583 | return -EINVAL; | |
584 | ||
585 | list_for_each_entry(pl, progs, node) { | |
586 | if (pl->link == link) { | |
587 | found = true; | |
588 | break; | |
589 | } | |
590 | } | |
591 | if (!found) | |
592 | return -ENOENT; | |
593 | ||
594 | old_prog = xchg(&link->link.prog, new_prog); | |
595 | replace_effective_prog(cgrp, link->type, link); | |
596 | bpf_prog_put(old_prog); | |
597 | return 0; | |
598 | } | |
599 | ||
f9d04127 AN |
600 | static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, |
601 | struct bpf_prog *old_prog) | |
602 | { | |
603 | struct bpf_cgroup_link *cg_link; | |
604 | int ret; | |
605 | ||
606 | cg_link = container_of(link, struct bpf_cgroup_link, link); | |
607 | ||
608 | mutex_lock(&cgroup_mutex); | |
609 | /* link might have been auto-released by dying cgroup, so fail */ | |
610 | if (!cg_link->cgroup) { | |
0c047ecb | 611 | ret = -ENOLINK; |
f9d04127 AN |
612 | goto out_unlock; |
613 | } | |
614 | if (old_prog && link->prog != old_prog) { | |
615 | ret = -EPERM; | |
616 | goto out_unlock; | |
617 | } | |
618 | ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); | |
619 | out_unlock: | |
620 | mutex_unlock(&cgroup_mutex); | |
621 | return ret; | |
622 | } | |
623 | ||
af6eea57 AN |
624 | static struct bpf_prog_list *find_detach_entry(struct list_head *progs, |
625 | struct bpf_prog *prog, | |
626 | struct bpf_cgroup_link *link, | |
627 | bool allow_multi) | |
628 | { | |
629 | struct bpf_prog_list *pl; | |
630 | ||
631 | if (!allow_multi) { | |
632 | if (list_empty(progs)) | |
633 | /* report error when trying to detach and nothing is attached */ | |
634 | return ERR_PTR(-ENOENT); | |
635 | ||
636 | /* to maintain backward compatibility NONE and OVERRIDE cgroups | |
637 | * allow detaching with invalid FD (prog==NULL) in legacy mode | |
638 | */ | |
639 | return list_first_entry(progs, typeof(*pl), node); | |
640 | } | |
641 | ||
642 | if (!prog && !link) | |
643 | /* to detach MULTI prog the user has to specify valid FD | |
644 | * of the program or link to be detached | |
645 | */ | |
646 | return ERR_PTR(-EINVAL); | |
647 | ||
648 | /* find the prog or link and detach it */ | |
649 | list_for_each_entry(pl, progs, node) { | |
650 | if (pl->prog == prog && pl->link == link) | |
651 | return pl; | |
652 | } | |
653 | return ERR_PTR(-ENOENT); | |
654 | } | |
655 | ||
324bda9e | 656 | /** |
af6eea57 | 657 | * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and |
324bda9e AS |
658 | * propagate the change to descendants |
659 | * @cgrp: The cgroup which descendants to traverse | |
660 | * @prog: A program to detach or NULL | |
af6eea57 | 661 | * @prog: A link to detach or NULL |
324bda9e AS |
662 | * @type: Type of detach operation |
663 | * | |
af6eea57 | 664 | * At most one of @prog or @link can be non-NULL. |
324bda9e AS |
665 | * Must be called with cgroup_mutex held. |
666 | */ | |
667 | int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, | |
af6eea57 | 668 | struct bpf_cgroup_link *link, enum bpf_attach_type type) |
324bda9e AS |
669 | { |
670 | struct list_head *progs = &cgrp->bpf.progs[type]; | |
671 | u32 flags = cgrp->bpf.flags[type]; | |
324bda9e | 672 | struct bpf_prog_list *pl; |
af6eea57 | 673 | struct bpf_prog *old_prog; |
324bda9e AS |
674 | int err; |
675 | ||
af6eea57 AN |
676 | if (prog && link) |
677 | /* only one of prog or link can be specified */ | |
678 | return -EINVAL; | |
324bda9e | 679 | |
af6eea57 AN |
680 | pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); |
681 | if (IS_ERR(pl)) | |
682 | return PTR_ERR(pl); | |
683 | ||
684 | /* mark it deleted, so it's ignored while recomputing effective */ | |
685 | old_prog = pl->prog; | |
686 | pl->prog = NULL; | |
687 | pl->link = NULL; | |
324bda9e | 688 | |
85fc4b16 RG |
689 | err = update_effective_progs(cgrp, type); |
690 | if (err) | |
691 | goto cleanup; | |
324bda9e AS |
692 | |
693 | /* now can actually delete it from this cgroup list */ | |
694 | list_del(&pl->node); | |
695 | kfree(pl); | |
696 | if (list_empty(progs)) | |
697 | /* last program was detached, reset flags to zero */ | |
698 | cgrp->bpf.flags[type] = 0; | |
af6eea57 AN |
699 | if (old_prog) |
700 | bpf_prog_put(old_prog); | |
324bda9e AS |
701 | static_branch_dec(&cgroup_bpf_enabled_key); |
702 | return 0; | |
703 | ||
704 | cleanup: | |
af6eea57 | 705 | /* restore back prog or link */ |
324bda9e | 706 | pl->prog = old_prog; |
af6eea57 | 707 | pl->link = link; |
324bda9e | 708 | return err; |
30070984 DM |
709 | } |
710 | ||
468e2f64 AS |
711 | /* Must be called with cgroup_mutex held to avoid races. */ |
712 | int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, | |
713 | union bpf_attr __user *uattr) | |
714 | { | |
715 | __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); | |
716 | enum bpf_attach_type type = attr->query.attach_type; | |
717 | struct list_head *progs = &cgrp->bpf.progs[type]; | |
718 | u32 flags = cgrp->bpf.flags[type]; | |
dbcc1ba2 | 719 | struct bpf_prog_array *effective; |
af6eea57 | 720 | struct bpf_prog *prog; |
468e2f64 AS |
721 | int cnt, ret = 0, i; |
722 | ||
dbcc1ba2 SF |
723 | effective = rcu_dereference_protected(cgrp->bpf.effective[type], |
724 | lockdep_is_held(&cgroup_mutex)); | |
725 | ||
468e2f64 | 726 | if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) |
dbcc1ba2 | 727 | cnt = bpf_prog_array_length(effective); |
468e2f64 AS |
728 | else |
729 | cnt = prog_list_length(progs); | |
730 | ||
731 | if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) | |
732 | return -EFAULT; | |
733 | if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) | |
734 | return -EFAULT; | |
735 | if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) | |
736 | /* return early if user requested only program count + flags */ | |
737 | return 0; | |
738 | if (attr->query.prog_cnt < cnt) { | |
739 | cnt = attr->query.prog_cnt; | |
740 | ret = -ENOSPC; | |
741 | } | |
742 | ||
743 | if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { | |
dbcc1ba2 | 744 | return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); |
468e2f64 AS |
745 | } else { |
746 | struct bpf_prog_list *pl; | |
747 | u32 id; | |
748 | ||
749 | i = 0; | |
750 | list_for_each_entry(pl, progs, node) { | |
af6eea57 AN |
751 | prog = prog_list_prog(pl); |
752 | id = prog->aux->id; | |
468e2f64 AS |
753 | if (copy_to_user(prog_ids + i, &id, sizeof(id))) |
754 | return -EFAULT; | |
755 | if (++i == cnt) | |
756 | break; | |
757 | } | |
758 | } | |
759 | return ret; | |
760 | } | |
761 | ||
fdb5c453 SY |
762 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, |
763 | enum bpf_prog_type ptype, struct bpf_prog *prog) | |
764 | { | |
7dd68b32 | 765 | struct bpf_prog *replace_prog = NULL; |
fdb5c453 SY |
766 | struct cgroup *cgrp; |
767 | int ret; | |
768 | ||
769 | cgrp = cgroup_get_from_fd(attr->target_fd); | |
770 | if (IS_ERR(cgrp)) | |
771 | return PTR_ERR(cgrp); | |
772 | ||
7dd68b32 AI |
773 | if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && |
774 | (attr->attach_flags & BPF_F_REPLACE)) { | |
775 | replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); | |
776 | if (IS_ERR(replace_prog)) { | |
777 | cgroup_put(cgrp); | |
778 | return PTR_ERR(replace_prog); | |
779 | } | |
780 | } | |
781 | ||
af6eea57 AN |
782 | ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, |
783 | attr->attach_type, attr->attach_flags); | |
7dd68b32 AI |
784 | |
785 | if (replace_prog) | |
786 | bpf_prog_put(replace_prog); | |
fdb5c453 SY |
787 | cgroup_put(cgrp); |
788 | return ret; | |
789 | } | |
790 | ||
791 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) | |
792 | { | |
793 | struct bpf_prog *prog; | |
794 | struct cgroup *cgrp; | |
795 | int ret; | |
796 | ||
797 | cgrp = cgroup_get_from_fd(attr->target_fd); | |
798 | if (IS_ERR(cgrp)) | |
799 | return PTR_ERR(cgrp); | |
800 | ||
801 | prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); | |
802 | if (IS_ERR(prog)) | |
803 | prog = NULL; | |
804 | ||
af6eea57 | 805 | ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); |
fdb5c453 SY |
806 | if (prog) |
807 | bpf_prog_put(prog); | |
808 | ||
809 | cgroup_put(cgrp); | |
810 | return ret; | |
811 | } | |
812 | ||
af6eea57 AN |
813 | static void bpf_cgroup_link_release(struct bpf_link *link) |
814 | { | |
815 | struct bpf_cgroup_link *cg_link = | |
816 | container_of(link, struct bpf_cgroup_link, link); | |
73b11c2a | 817 | struct cgroup *cg; |
af6eea57 AN |
818 | |
819 | /* link might have been auto-detached by dying cgroup already, | |
820 | * in that case our work is done here | |
821 | */ | |
822 | if (!cg_link->cgroup) | |
823 | return; | |
824 | ||
825 | mutex_lock(&cgroup_mutex); | |
826 | ||
827 | /* re-check cgroup under lock again */ | |
828 | if (!cg_link->cgroup) { | |
829 | mutex_unlock(&cgroup_mutex); | |
830 | return; | |
831 | } | |
832 | ||
833 | WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, | |
834 | cg_link->type)); | |
835 | ||
73b11c2a AN |
836 | cg = cg_link->cgroup; |
837 | cg_link->cgroup = NULL; | |
838 | ||
af6eea57 | 839 | mutex_unlock(&cgroup_mutex); |
73b11c2a AN |
840 | |
841 | cgroup_put(cg); | |
af6eea57 AN |
842 | } |
843 | ||
844 | static void bpf_cgroup_link_dealloc(struct bpf_link *link) | |
845 | { | |
846 | struct bpf_cgroup_link *cg_link = | |
847 | container_of(link, struct bpf_cgroup_link, link); | |
848 | ||
849 | kfree(cg_link); | |
850 | } | |
851 | ||
73b11c2a AN |
852 | static int bpf_cgroup_link_detach(struct bpf_link *link) |
853 | { | |
854 | bpf_cgroup_link_release(link); | |
855 | ||
856 | return 0; | |
857 | } | |
858 | ||
f2e10bff AN |
859 | static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, |
860 | struct seq_file *seq) | |
861 | { | |
862 | struct bpf_cgroup_link *cg_link = | |
863 | container_of(link, struct bpf_cgroup_link, link); | |
864 | u64 cg_id = 0; | |
865 | ||
866 | mutex_lock(&cgroup_mutex); | |
867 | if (cg_link->cgroup) | |
868 | cg_id = cgroup_id(cg_link->cgroup); | |
869 | mutex_unlock(&cgroup_mutex); | |
870 | ||
871 | seq_printf(seq, | |
872 | "cgroup_id:\t%llu\n" | |
873 | "attach_type:\t%d\n", | |
874 | cg_id, | |
875 | cg_link->type); | |
876 | } | |
877 | ||
878 | static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, | |
879 | struct bpf_link_info *info) | |
880 | { | |
881 | struct bpf_cgroup_link *cg_link = | |
882 | container_of(link, struct bpf_cgroup_link, link); | |
883 | u64 cg_id = 0; | |
884 | ||
885 | mutex_lock(&cgroup_mutex); | |
886 | if (cg_link->cgroup) | |
887 | cg_id = cgroup_id(cg_link->cgroup); | |
888 | mutex_unlock(&cgroup_mutex); | |
889 | ||
890 | info->cgroup.cgroup_id = cg_id; | |
891 | info->cgroup.attach_type = cg_link->type; | |
892 | return 0; | |
893 | } | |
894 | ||
895 | static const struct bpf_link_ops bpf_cgroup_link_lops = { | |
af6eea57 AN |
896 | .release = bpf_cgroup_link_release, |
897 | .dealloc = bpf_cgroup_link_dealloc, | |
73b11c2a | 898 | .detach = bpf_cgroup_link_detach, |
f9d04127 | 899 | .update_prog = cgroup_bpf_replace, |
f2e10bff AN |
900 | .show_fdinfo = bpf_cgroup_link_show_fdinfo, |
901 | .fill_link_info = bpf_cgroup_link_fill_link_info, | |
af6eea57 AN |
902 | }; |
903 | ||
904 | int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) | |
905 | { | |
a3b80e10 | 906 | struct bpf_link_primer link_primer; |
af6eea57 | 907 | struct bpf_cgroup_link *link; |
af6eea57 | 908 | struct cgroup *cgrp; |
a3b80e10 | 909 | int err; |
af6eea57 AN |
910 | |
911 | if (attr->link_create.flags) | |
912 | return -EINVAL; | |
913 | ||
914 | cgrp = cgroup_get_from_fd(attr->link_create.target_fd); | |
915 | if (IS_ERR(cgrp)) | |
916 | return PTR_ERR(cgrp); | |
917 | ||
918 | link = kzalloc(sizeof(*link), GFP_USER); | |
919 | if (!link) { | |
920 | err = -ENOMEM; | |
921 | goto out_put_cgroup; | |
922 | } | |
f2e10bff AN |
923 | bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, |
924 | prog); | |
af6eea57 AN |
925 | link->cgroup = cgrp; |
926 | link->type = attr->link_create.attach_type; | |
927 | ||
a3b80e10 AN |
928 | err = bpf_link_prime(&link->link, &link_primer); |
929 | if (err) { | |
af6eea57 | 930 | kfree(link); |
af6eea57 AN |
931 | goto out_put_cgroup; |
932 | } | |
933 | ||
934 | err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, | |
935 | BPF_F_ALLOW_MULTI); | |
936 | if (err) { | |
a3b80e10 | 937 | bpf_link_cleanup(&link_primer); |
af6eea57 AN |
938 | goto out_put_cgroup; |
939 | } | |
940 | ||
a3b80e10 | 941 | return bpf_link_settle(&link_primer); |
af6eea57 AN |
942 | |
943 | out_put_cgroup: | |
944 | cgroup_put(cgrp); | |
945 | return err; | |
946 | } | |
947 | ||
fdb5c453 SY |
948 | int cgroup_bpf_prog_query(const union bpf_attr *attr, |
949 | union bpf_attr __user *uattr) | |
950 | { | |
951 | struct cgroup *cgrp; | |
952 | int ret; | |
953 | ||
954 | cgrp = cgroup_get_from_fd(attr->query.target_fd); | |
955 | if (IS_ERR(cgrp)) | |
956 | return PTR_ERR(cgrp); | |
957 | ||
958 | ret = cgroup_bpf_query(cgrp, attr, uattr); | |
959 | ||
960 | cgroup_put(cgrp); | |
961 | return ret; | |
962 | } | |
963 | ||
30070984 | 964 | /** |
b2cd1257 | 965 | * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering |
8f917bba | 966 | * @sk: The socket sending or receiving traffic |
30070984 DM |
967 | * @skb: The skb that is being sent or received |
968 | * @type: The type of program to be exectuted | |
969 | * | |
970 | * If no socket is passed, or the socket is not of type INET or INET6, | |
971 | * this function does nothing and returns 0. | |
972 | * | |
973 | * The program type passed in via @type must be suitable for network | |
974 | * filtering. No further check is performed to assert that. | |
975 | * | |
e7a3160d | 976 | * For egress packets, this function can return: |
977 | * NET_XMIT_SUCCESS (0) - continue with packet output | |
978 | * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr | |
979 | * NET_XMIT_CN (2) - continue with packet output and notify TCP | |
980 | * to call cwr | |
981 | * -EPERM - drop packet | |
982 | * | |
983 | * For ingress packets, this function will return -EPERM if any | |
984 | * attached program was found and if it returned != 1 during execution. | |
985 | * Otherwise 0 is returned. | |
30070984 | 986 | */ |
b2cd1257 DA |
987 | int __cgroup_bpf_run_filter_skb(struct sock *sk, |
988 | struct sk_buff *skb, | |
989 | enum bpf_attach_type type) | |
30070984 | 990 | { |
324bda9e AS |
991 | unsigned int offset = skb->data - skb_network_header(skb); |
992 | struct sock *save_sk; | |
b39b5f41 | 993 | void *saved_data_end; |
30070984 | 994 | struct cgroup *cgrp; |
324bda9e | 995 | int ret; |
30070984 DM |
996 | |
997 | if (!sk || !sk_fullsock(sk)) | |
998 | return 0; | |
999 | ||
324bda9e | 1000 | if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) |
30070984 DM |
1001 | return 0; |
1002 | ||
1003 | cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); | |
324bda9e AS |
1004 | save_sk = skb->sk; |
1005 | skb->sk = sk; | |
1006 | __skb_push(skb, offset); | |
b39b5f41 SL |
1007 | |
1008 | /* compute pointers for the bpf prog */ | |
1009 | bpf_compute_and_save_data_end(skb, &saved_data_end); | |
1010 | ||
e7a3160d | 1011 | if (type == BPF_CGROUP_INET_EGRESS) { |
1012 | ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( | |
1013 | cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); | |
1014 | } else { | |
1015 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, | |
1016 | __bpf_prog_run_save_cb); | |
1017 | ret = (ret == 1 ? 0 : -EPERM); | |
1018 | } | |
b39b5f41 | 1019 | bpf_restore_data_end(skb, saved_data_end); |
324bda9e AS |
1020 | __skb_pull(skb, offset); |
1021 | skb->sk = save_sk; | |
e7a3160d | 1022 | |
1023 | return ret; | |
30070984 | 1024 | } |
b2cd1257 | 1025 | EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); |
61023658 DA |
1026 | |
1027 | /** | |
1028 | * __cgroup_bpf_run_filter_sk() - Run a program on a sock | |
1029 | * @sk: sock structure to manipulate | |
1030 | * @type: The type of program to be exectuted | |
1031 | * | |
1032 | * socket is passed is expected to be of type INET or INET6. | |
1033 | * | |
1034 | * The program type passed in via @type must be suitable for sock | |
1035 | * filtering. No further check is performed to assert that. | |
1036 | * | |
1037 | * This function will return %-EPERM if any if an attached program was found | |
1038 | * and if it returned != 1 during execution. In all other cases, 0 is returned. | |
1039 | */ | |
1040 | int __cgroup_bpf_run_filter_sk(struct sock *sk, | |
1041 | enum bpf_attach_type type) | |
1042 | { | |
1043 | struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); | |
324bda9e | 1044 | int ret; |
61023658 | 1045 | |
324bda9e AS |
1046 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); |
1047 | return ret == 1 ? 0 : -EPERM; | |
61023658 DA |
1048 | } |
1049 | EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); | |
40304b2a | 1050 | |
4fbac77d AI |
1051 | /** |
1052 | * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and | |
1053 | * provided by user sockaddr | |
1054 | * @sk: sock struct that will use sockaddr | |
1055 | * @uaddr: sockaddr struct provided by user | |
1056 | * @type: The type of program to be exectuted | |
1cedee13 | 1057 | * @t_ctx: Pointer to attach type specific context |
4fbac77d AI |
1058 | * |
1059 | * socket is expected to be of type INET or INET6. | |
1060 | * | |
1061 | * This function will return %-EPERM if an attached program is found and | |
1062 | * returned value != 1 during execution. In all other cases, 0 is returned. | |
1063 | */ | |
1064 | int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, | |
1065 | struct sockaddr *uaddr, | |
1cedee13 AI |
1066 | enum bpf_attach_type type, |
1067 | void *t_ctx) | |
4fbac77d AI |
1068 | { |
1069 | struct bpf_sock_addr_kern ctx = { | |
1070 | .sk = sk, | |
1071 | .uaddr = uaddr, | |
1cedee13 | 1072 | .t_ctx = t_ctx, |
4fbac77d | 1073 | }; |
1cedee13 | 1074 | struct sockaddr_storage unspec; |
4fbac77d AI |
1075 | struct cgroup *cgrp; |
1076 | int ret; | |
1077 | ||
1078 | /* Check socket family since not all sockets represent network | |
1079 | * endpoint (e.g. AF_UNIX). | |
1080 | */ | |
1081 | if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) | |
1082 | return 0; | |
1083 | ||
1cedee13 AI |
1084 | if (!ctx.uaddr) { |
1085 | memset(&unspec, 0, sizeof(unspec)); | |
1086 | ctx.uaddr = (struct sockaddr *)&unspec; | |
1087 | } | |
1088 | ||
4fbac77d AI |
1089 | cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); |
1090 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); | |
1091 | ||
1092 | return ret == 1 ? 0 : -EPERM; | |
1093 | } | |
1094 | EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); | |
1095 | ||
40304b2a LB |
1096 | /** |
1097 | * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock | |
1098 | * @sk: socket to get cgroup from | |
1099 | * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains | |
1100 | * sk with connection information (IP addresses, etc.) May not contain | |
1101 | * cgroup info if it is a req sock. | |
1102 | * @type: The type of program to be exectuted | |
1103 | * | |
1104 | * socket passed is expected to be of type INET or INET6. | |
1105 | * | |
1106 | * The program type passed in via @type must be suitable for sock_ops | |
1107 | * filtering. No further check is performed to assert that. | |
1108 | * | |
1109 | * This function will return %-EPERM if any if an attached program was found | |
1110 | * and if it returned != 1 during execution. In all other cases, 0 is returned. | |
1111 | */ | |
1112 | int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, | |
1113 | struct bpf_sock_ops_kern *sock_ops, | |
1114 | enum bpf_attach_type type) | |
1115 | { | |
1116 | struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); | |
324bda9e | 1117 | int ret; |
40304b2a | 1118 | |
324bda9e AS |
1119 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, |
1120 | BPF_PROG_RUN); | |
1121 | return ret == 1 ? 0 : -EPERM; | |
40304b2a LB |
1122 | } |
1123 | EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); | |
ebc614f6 RG |
1124 | |
1125 | int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, | |
1126 | short access, enum bpf_attach_type type) | |
1127 | { | |
1128 | struct cgroup *cgrp; | |
1129 | struct bpf_cgroup_dev_ctx ctx = { | |
1130 | .access_type = (access << 16) | dev_type, | |
1131 | .major = major, | |
1132 | .minor = minor, | |
1133 | }; | |
1134 | int allow = 1; | |
1135 | ||
1136 | rcu_read_lock(); | |
1137 | cgrp = task_dfl_cgroup(current); | |
1138 | allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, | |
1139 | BPF_PROG_RUN); | |
1140 | rcu_read_unlock(); | |
1141 | ||
1142 | return !allow; | |
1143 | } | |
ebc614f6 RG |
1144 | |
1145 | static const struct bpf_func_proto * | |
b1cd609d | 1146 | cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
ebc614f6 RG |
1147 | { |
1148 | switch (func_id) { | |
ebc614f6 RG |
1149 | case BPF_FUNC_get_current_uid_gid: |
1150 | return &bpf_get_current_uid_gid_proto; | |
cd339431 RG |
1151 | case BPF_FUNC_get_local_storage: |
1152 | return &bpf_get_local_storage_proto; | |
5bf7a60b YS |
1153 | case BPF_FUNC_get_current_cgroup_id: |
1154 | return &bpf_get_current_cgroup_id_proto; | |
0456ea17 SF |
1155 | case BPF_FUNC_perf_event_output: |
1156 | return &bpf_event_output_data_proto; | |
ebc614f6 | 1157 | default: |
0456ea17 | 1158 | return bpf_base_func_proto(func_id); |
ebc614f6 RG |
1159 | } |
1160 | } | |
1161 | ||
b1cd609d AI |
1162 | static const struct bpf_func_proto * |
1163 | cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
1164 | { | |
1165 | return cgroup_base_func_proto(func_id, prog); | |
1166 | } | |
1167 | ||
ebc614f6 RG |
1168 | static bool cgroup_dev_is_valid_access(int off, int size, |
1169 | enum bpf_access_type type, | |
5e43f899 | 1170 | const struct bpf_prog *prog, |
ebc614f6 RG |
1171 | struct bpf_insn_access_aux *info) |
1172 | { | |
06ef0ccb YS |
1173 | const int size_default = sizeof(__u32); |
1174 | ||
ebc614f6 RG |
1175 | if (type == BPF_WRITE) |
1176 | return false; | |
1177 | ||
1178 | if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) | |
1179 | return false; | |
1180 | /* The verifier guarantees that size > 0. */ | |
1181 | if (off % size != 0) | |
1182 | return false; | |
06ef0ccb YS |
1183 | |
1184 | switch (off) { | |
1185 | case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): | |
1186 | bpf_ctx_record_field_size(info, size_default); | |
1187 | if (!bpf_ctx_narrow_access_ok(off, size, size_default)) | |
1188 | return false; | |
1189 | break; | |
1190 | default: | |
1191 | if (size != size_default) | |
1192 | return false; | |
1193 | } | |
ebc614f6 RG |
1194 | |
1195 | return true; | |
1196 | } | |
1197 | ||
1198 | const struct bpf_prog_ops cg_dev_prog_ops = { | |
1199 | }; | |
1200 | ||
1201 | const struct bpf_verifier_ops cg_dev_verifier_ops = { | |
1202 | .get_func_proto = cgroup_dev_func_proto, | |
1203 | .is_valid_access = cgroup_dev_is_valid_access, | |
1204 | }; | |
7b146ceb AI |
1205 | |
1206 | /** | |
1207 | * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl | |
1208 | * | |
1209 | * @head: sysctl table header | |
1210 | * @table: sysctl table | |
1211 | * @write: sysctl is being read (= 0) or written (= 1) | |
32927393 | 1212 | * @buf: pointer to buffer (in and out) |
4e63acdf AI |
1213 | * @pcount: value-result argument: value is size of buffer pointed to by @buf, |
1214 | * result is size of @new_buf if program set new value, initial value | |
1215 | * otherwise | |
e1550bfe AI |
1216 | * @ppos: value-result argument: value is position at which read from or write |
1217 | * to sysctl is happening, result is new position if program overrode it, | |
1218 | * initial value otherwise | |
7b146ceb AI |
1219 | * @type: type of program to be executed |
1220 | * | |
1221 | * Program is run when sysctl is being accessed, either read or written, and | |
1222 | * can allow or deny such access. | |
1223 | * | |
1224 | * This function will return %-EPERM if an attached program is found and | |
1225 | * returned value != 1 during execution. In all other cases 0 is returned. | |
1226 | */ | |
1227 | int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, | |
1228 | struct ctl_table *table, int write, | |
4bd6a735 | 1229 | char **buf, size_t *pcount, loff_t *ppos, |
e1550bfe | 1230 | enum bpf_attach_type type) |
7b146ceb AI |
1231 | { |
1232 | struct bpf_sysctl_kern ctx = { | |
1233 | .head = head, | |
1234 | .table = table, | |
1235 | .write = write, | |
e1550bfe | 1236 | .ppos = ppos, |
1d11b301 AI |
1237 | .cur_val = NULL, |
1238 | .cur_len = PAGE_SIZE, | |
4e63acdf AI |
1239 | .new_val = NULL, |
1240 | .new_len = 0, | |
1241 | .new_updated = 0, | |
7b146ceb AI |
1242 | }; |
1243 | struct cgroup *cgrp; | |
32927393 | 1244 | loff_t pos = 0; |
7b146ceb AI |
1245 | int ret; |
1246 | ||
1d11b301 | 1247 | ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); |
32927393 CH |
1248 | if (!ctx.cur_val || |
1249 | table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { | |
1d11b301 AI |
1250 | /* Let BPF program decide how to proceed. */ |
1251 | ctx.cur_len = 0; | |
1252 | } | |
1253 | ||
32927393 | 1254 | if (write && *buf && *pcount) { |
4e63acdf AI |
1255 | /* BPF program should be able to override new value with a |
1256 | * buffer bigger than provided by user. | |
1257 | */ | |
1258 | ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); | |
51356ac8 | 1259 | ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); |
32927393 CH |
1260 | if (ctx.new_val) { |
1261 | memcpy(ctx.new_val, *buf, ctx.new_len); | |
1262 | } else { | |
4e63acdf AI |
1263 | /* Let BPF program decide how to proceed. */ |
1264 | ctx.new_len = 0; | |
32927393 | 1265 | } |
4e63acdf AI |
1266 | } |
1267 | ||
7b146ceb AI |
1268 | rcu_read_lock(); |
1269 | cgrp = task_dfl_cgroup(current); | |
1270 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); | |
1271 | rcu_read_unlock(); | |
1272 | ||
1d11b301 AI |
1273 | kfree(ctx.cur_val); |
1274 | ||
4e63acdf | 1275 | if (ret == 1 && ctx.new_updated) { |
32927393 CH |
1276 | kfree(*buf); |
1277 | *buf = ctx.new_val; | |
4e63acdf AI |
1278 | *pcount = ctx.new_len; |
1279 | } else { | |
1280 | kfree(ctx.new_val); | |
1281 | } | |
1282 | ||
7b146ceb AI |
1283 | return ret == 1 ? 0 : -EPERM; |
1284 | } | |
7b146ceb | 1285 | |
6705fea0 | 1286 | #ifdef CONFIG_NET |
0d01da6a SF |
1287 | static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, |
1288 | enum bpf_attach_type attach_type) | |
1289 | { | |
1290 | struct bpf_prog_array *prog_array; | |
1291 | bool empty; | |
1292 | ||
1293 | rcu_read_lock(); | |
1294 | prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); | |
1295 | empty = bpf_prog_array_is_empty(prog_array); | |
1296 | rcu_read_unlock(); | |
1297 | ||
1298 | return empty; | |
1299 | } | |
1300 | ||
1301 | static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) | |
1302 | { | |
d8fe449a | 1303 | if (unlikely(max_optlen < 0)) |
0d01da6a SF |
1304 | return -EINVAL; |
1305 | ||
d8fe449a SF |
1306 | if (unlikely(max_optlen > PAGE_SIZE)) { |
1307 | /* We don't expose optvals that are greater than PAGE_SIZE | |
1308 | * to the BPF program. | |
1309 | */ | |
1310 | max_optlen = PAGE_SIZE; | |
1311 | } | |
1312 | ||
0d01da6a SF |
1313 | ctx->optval = kzalloc(max_optlen, GFP_USER); |
1314 | if (!ctx->optval) | |
1315 | return -ENOMEM; | |
1316 | ||
1317 | ctx->optval_end = ctx->optval + max_optlen; | |
0d01da6a | 1318 | |
d8fe449a | 1319 | return max_optlen; |
0d01da6a SF |
1320 | } |
1321 | ||
1322 | static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) | |
1323 | { | |
1324 | kfree(ctx->optval); | |
1325 | } | |
1326 | ||
1327 | int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, | |
1328 | int *optname, char __user *optval, | |
1329 | int *optlen, char **kernel_optval) | |
1330 | { | |
1331 | struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); | |
1332 | struct bpf_sockopt_kern ctx = { | |
1333 | .sk = sk, | |
1334 | .level = *level, | |
1335 | .optname = *optname, | |
1336 | }; | |
9babe825 | 1337 | int ret, max_optlen; |
0d01da6a SF |
1338 | |
1339 | /* Opportunistic check to see whether we have any BPF program | |
1340 | * attached to the hook so we don't waste time allocating | |
1341 | * memory and locking the socket. | |
1342 | */ | |
1343 | if (!cgroup_bpf_enabled || | |
1344 | __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) | |
1345 | return 0; | |
1346 | ||
9babe825 SF |
1347 | /* Allocate a bit more than the initial user buffer for |
1348 | * BPF program. The canonical use case is overriding | |
1349 | * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). | |
1350 | */ | |
1351 | max_optlen = max_t(int, 16, *optlen); | |
1352 | ||
d8fe449a SF |
1353 | max_optlen = sockopt_alloc_buf(&ctx, max_optlen); |
1354 | if (max_optlen < 0) | |
1355 | return max_optlen; | |
0d01da6a | 1356 | |
9babe825 SF |
1357 | ctx.optlen = *optlen; |
1358 | ||
d8fe449a | 1359 | if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { |
0d01da6a SF |
1360 | ret = -EFAULT; |
1361 | goto out; | |
1362 | } | |
1363 | ||
1364 | lock_sock(sk); | |
1365 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], | |
1366 | &ctx, BPF_PROG_RUN); | |
1367 | release_sock(sk); | |
1368 | ||
1369 | if (!ret) { | |
1370 | ret = -EPERM; | |
1371 | goto out; | |
1372 | } | |
1373 | ||
1374 | if (ctx.optlen == -1) { | |
1375 | /* optlen set to -1, bypass kernel */ | |
1376 | ret = 1; | |
9babe825 | 1377 | } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { |
0d01da6a SF |
1378 | /* optlen is out of bounds */ |
1379 | ret = -EFAULT; | |
1380 | } else { | |
1381 | /* optlen within bounds, run kernel handler */ | |
1382 | ret = 0; | |
1383 | ||
1384 | /* export any potential modifications */ | |
1385 | *level = ctx.level; | |
1386 | *optname = ctx.optname; | |
d8fe449a SF |
1387 | |
1388 | /* optlen == 0 from BPF indicates that we should | |
1389 | * use original userspace data. | |
1390 | */ | |
1391 | if (ctx.optlen != 0) { | |
1392 | *optlen = ctx.optlen; | |
1393 | *kernel_optval = ctx.optval; | |
1394 | } | |
0d01da6a SF |
1395 | } |
1396 | ||
1397 | out: | |
1398 | if (ret) | |
1399 | sockopt_free_buf(&ctx); | |
1400 | return ret; | |
1401 | } | |
0d01da6a SF |
1402 | |
1403 | int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, | |
1404 | int optname, char __user *optval, | |
1405 | int __user *optlen, int max_optlen, | |
1406 | int retval) | |
1407 | { | |
1408 | struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); | |
1409 | struct bpf_sockopt_kern ctx = { | |
1410 | .sk = sk, | |
1411 | .level = level, | |
1412 | .optname = optname, | |
1413 | .retval = retval, | |
1414 | }; | |
1415 | int ret; | |
1416 | ||
1417 | /* Opportunistic check to see whether we have any BPF program | |
1418 | * attached to the hook so we don't waste time allocating | |
1419 | * memory and locking the socket. | |
1420 | */ | |
1421 | if (!cgroup_bpf_enabled || | |
1422 | __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) | |
1423 | return retval; | |
1424 | ||
9babe825 SF |
1425 | ctx.optlen = max_optlen; |
1426 | ||
d8fe449a SF |
1427 | max_optlen = sockopt_alloc_buf(&ctx, max_optlen); |
1428 | if (max_optlen < 0) | |
1429 | return max_optlen; | |
1430 | ||
0d01da6a SF |
1431 | if (!retval) { |
1432 | /* If kernel getsockopt finished successfully, | |
1433 | * copy whatever was returned to the user back | |
1434 | * into our temporary buffer. Set optlen to the | |
1435 | * one that kernel returned as well to let | |
1436 | * BPF programs inspect the value. | |
1437 | */ | |
1438 | ||
1439 | if (get_user(ctx.optlen, optlen)) { | |
1440 | ret = -EFAULT; | |
1441 | goto out; | |
1442 | } | |
1443 | ||
d8fe449a SF |
1444 | if (copy_from_user(ctx.optval, optval, |
1445 | min(ctx.optlen, max_optlen)) != 0) { | |
0d01da6a SF |
1446 | ret = -EFAULT; |
1447 | goto out; | |
1448 | } | |
1449 | } | |
1450 | ||
1451 | lock_sock(sk); | |
1452 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], | |
1453 | &ctx, BPF_PROG_RUN); | |
1454 | release_sock(sk); | |
1455 | ||
1456 | if (!ret) { | |
1457 | ret = -EPERM; | |
1458 | goto out; | |
1459 | } | |
1460 | ||
1461 | if (ctx.optlen > max_optlen) { | |
1462 | ret = -EFAULT; | |
1463 | goto out; | |
1464 | } | |
1465 | ||
1466 | /* BPF programs only allowed to set retval to 0, not some | |
1467 | * arbitrary value. | |
1468 | */ | |
1469 | if (ctx.retval != 0 && ctx.retval != retval) { | |
1470 | ret = -EFAULT; | |
1471 | goto out; | |
1472 | } | |
1473 | ||
d8fe449a SF |
1474 | if (ctx.optlen != 0) { |
1475 | if (copy_to_user(optval, ctx.optval, ctx.optlen) || | |
1476 | put_user(ctx.optlen, optlen)) { | |
1477 | ret = -EFAULT; | |
1478 | goto out; | |
1479 | } | |
0d01da6a SF |
1480 | } |
1481 | ||
1482 | ret = ctx.retval; | |
1483 | ||
1484 | out: | |
1485 | sockopt_free_buf(&ctx); | |
1486 | return ret; | |
1487 | } | |
6705fea0 | 1488 | #endif |
0d01da6a | 1489 | |
808649fb AI |
1490 | static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, |
1491 | size_t *lenp) | |
1492 | { | |
1493 | ssize_t tmp_ret = 0, ret; | |
1494 | ||
1495 | if (dir->header.parent) { | |
1496 | tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); | |
1497 | if (tmp_ret < 0) | |
1498 | return tmp_ret; | |
1499 | } | |
1500 | ||
1501 | ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); | |
1502 | if (ret < 0) | |
1503 | return ret; | |
1504 | *bufp += ret; | |
1505 | *lenp -= ret; | |
1506 | ret += tmp_ret; | |
1507 | ||
1508 | /* Avoid leading slash. */ | |
1509 | if (!ret) | |
1510 | return ret; | |
1511 | ||
1512 | tmp_ret = strscpy(*bufp, "/", *lenp); | |
1513 | if (tmp_ret < 0) | |
1514 | return tmp_ret; | |
1515 | *bufp += tmp_ret; | |
1516 | *lenp -= tmp_ret; | |
1517 | ||
1518 | return ret + tmp_ret; | |
1519 | } | |
1520 | ||
1521 | BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, | |
1522 | size_t, buf_len, u64, flags) | |
1523 | { | |
1524 | ssize_t tmp_ret = 0, ret; | |
1525 | ||
1526 | if (!buf) | |
1527 | return -EINVAL; | |
1528 | ||
1529 | if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { | |
1530 | if (!ctx->head) | |
1531 | return -EINVAL; | |
1532 | tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); | |
1533 | if (tmp_ret < 0) | |
1534 | return tmp_ret; | |
1535 | } | |
1536 | ||
1537 | ret = strscpy(buf, ctx->table->procname, buf_len); | |
1538 | ||
1539 | return ret < 0 ? ret : tmp_ret + ret; | |
1540 | } | |
1541 | ||
1542 | static const struct bpf_func_proto bpf_sysctl_get_name_proto = { | |
1543 | .func = bpf_sysctl_get_name, | |
1544 | .gpl_only = false, | |
1545 | .ret_type = RET_INTEGER, | |
1546 | .arg1_type = ARG_PTR_TO_CTX, | |
1547 | .arg2_type = ARG_PTR_TO_MEM, | |
1548 | .arg3_type = ARG_CONST_SIZE, | |
1549 | .arg4_type = ARG_ANYTHING, | |
1550 | }; | |
1551 | ||
1d11b301 AI |
1552 | static int copy_sysctl_value(char *dst, size_t dst_len, char *src, |
1553 | size_t src_len) | |
1554 | { | |
1555 | if (!dst) | |
1556 | return -EINVAL; | |
1557 | ||
1558 | if (!dst_len) | |
1559 | return -E2BIG; | |
1560 | ||
1561 | if (!src || !src_len) { | |
1562 | memset(dst, 0, dst_len); | |
1563 | return -EINVAL; | |
1564 | } | |
1565 | ||
1566 | memcpy(dst, src, min(dst_len, src_len)); | |
1567 | ||
1568 | if (dst_len > src_len) { | |
1569 | memset(dst + src_len, '\0', dst_len - src_len); | |
1570 | return src_len; | |
1571 | } | |
1572 | ||
1573 | dst[dst_len - 1] = '\0'; | |
1574 | ||
1575 | return -E2BIG; | |
1576 | } | |
1577 | ||
1578 | BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, | |
1579 | char *, buf, size_t, buf_len) | |
1580 | { | |
1581 | return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); | |
1582 | } | |
1583 | ||
1584 | static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { | |
1585 | .func = bpf_sysctl_get_current_value, | |
1586 | .gpl_only = false, | |
1587 | .ret_type = RET_INTEGER, | |
1588 | .arg1_type = ARG_PTR_TO_CTX, | |
1589 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1590 | .arg3_type = ARG_CONST_SIZE, | |
1591 | }; | |
1592 | ||
4e63acdf AI |
1593 | BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, |
1594 | size_t, buf_len) | |
1595 | { | |
1596 | if (!ctx->write) { | |
1597 | if (buf && buf_len) | |
1598 | memset(buf, '\0', buf_len); | |
1599 | return -EINVAL; | |
1600 | } | |
1601 | return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); | |
1602 | } | |
1603 | ||
1604 | static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { | |
1605 | .func = bpf_sysctl_get_new_value, | |
1606 | .gpl_only = false, | |
1607 | .ret_type = RET_INTEGER, | |
1608 | .arg1_type = ARG_PTR_TO_CTX, | |
1609 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | |
1610 | .arg3_type = ARG_CONST_SIZE, | |
1611 | }; | |
1612 | ||
1613 | BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, | |
1614 | const char *, buf, size_t, buf_len) | |
1615 | { | |
1616 | if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) | |
1617 | return -EINVAL; | |
1618 | ||
1619 | if (buf_len > PAGE_SIZE - 1) | |
1620 | return -E2BIG; | |
1621 | ||
1622 | memcpy(ctx->new_val, buf, buf_len); | |
1623 | ctx->new_len = buf_len; | |
1624 | ctx->new_updated = 1; | |
1625 | ||
1626 | return 0; | |
1627 | } | |
1628 | ||
1629 | static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { | |
1630 | .func = bpf_sysctl_set_new_value, | |
1631 | .gpl_only = false, | |
1632 | .ret_type = RET_INTEGER, | |
1633 | .arg1_type = ARG_PTR_TO_CTX, | |
1634 | .arg2_type = ARG_PTR_TO_MEM, | |
1635 | .arg3_type = ARG_CONST_SIZE, | |
1636 | }; | |
1637 | ||
7b146ceb AI |
1638 | static const struct bpf_func_proto * |
1639 | sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
1640 | { | |
808649fb | 1641 | switch (func_id) { |
d7a4cb9b AI |
1642 | case BPF_FUNC_strtol: |
1643 | return &bpf_strtol_proto; | |
1644 | case BPF_FUNC_strtoul: | |
1645 | return &bpf_strtoul_proto; | |
808649fb AI |
1646 | case BPF_FUNC_sysctl_get_name: |
1647 | return &bpf_sysctl_get_name_proto; | |
1d11b301 AI |
1648 | case BPF_FUNC_sysctl_get_current_value: |
1649 | return &bpf_sysctl_get_current_value_proto; | |
4e63acdf AI |
1650 | case BPF_FUNC_sysctl_get_new_value: |
1651 | return &bpf_sysctl_get_new_value_proto; | |
1652 | case BPF_FUNC_sysctl_set_new_value: | |
1653 | return &bpf_sysctl_set_new_value_proto; | |
808649fb AI |
1654 | default: |
1655 | return cgroup_base_func_proto(func_id, prog); | |
1656 | } | |
7b146ceb AI |
1657 | } |
1658 | ||
1659 | static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, | |
1660 | const struct bpf_prog *prog, | |
1661 | struct bpf_insn_access_aux *info) | |
1662 | { | |
1663 | const int size_default = sizeof(__u32); | |
1664 | ||
e1550bfe | 1665 | if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) |
7b146ceb AI |
1666 | return false; |
1667 | ||
1668 | switch (off) { | |
7541c87c | 1669 | case bpf_ctx_range(struct bpf_sysctl, write): |
e1550bfe AI |
1670 | if (type != BPF_READ) |
1671 | return false; | |
7b146ceb AI |
1672 | bpf_ctx_record_field_size(info, size_default); |
1673 | return bpf_ctx_narrow_access_ok(off, size, size_default); | |
7541c87c | 1674 | case bpf_ctx_range(struct bpf_sysctl, file_pos): |
e1550bfe AI |
1675 | if (type == BPF_READ) { |
1676 | bpf_ctx_record_field_size(info, size_default); | |
1677 | return bpf_ctx_narrow_access_ok(off, size, size_default); | |
1678 | } else { | |
1679 | return size == size_default; | |
1680 | } | |
7b146ceb AI |
1681 | default: |
1682 | return false; | |
1683 | } | |
1684 | } | |
1685 | ||
1686 | static u32 sysctl_convert_ctx_access(enum bpf_access_type type, | |
1687 | const struct bpf_insn *si, | |
1688 | struct bpf_insn *insn_buf, | |
1689 | struct bpf_prog *prog, u32 *target_size) | |
1690 | { | |
1691 | struct bpf_insn *insn = insn_buf; | |
d895a0f1 | 1692 | u32 read_size; |
7b146ceb AI |
1693 | |
1694 | switch (si->off) { | |
1695 | case offsetof(struct bpf_sysctl, write): | |
1696 | *insn++ = BPF_LDX_MEM( | |
1697 | BPF_SIZE(si->code), si->dst_reg, si->src_reg, | |
1698 | bpf_target_off(struct bpf_sysctl_kern, write, | |
c593642c | 1699 | sizeof_field(struct bpf_sysctl_kern, |
7b146ceb AI |
1700 | write), |
1701 | target_size)); | |
1702 | break; | |
e1550bfe AI |
1703 | case offsetof(struct bpf_sysctl, file_pos): |
1704 | /* ppos is a pointer so it should be accessed via indirect | |
1705 | * loads and stores. Also for stores additional temporary | |
1706 | * register is used since neither src_reg nor dst_reg can be | |
1707 | * overridden. | |
1708 | */ | |
1709 | if (type == BPF_WRITE) { | |
1710 | int treg = BPF_REG_9; | |
1711 | ||
1712 | if (si->src_reg == treg || si->dst_reg == treg) | |
1713 | --treg; | |
1714 | if (si->src_reg == treg || si->dst_reg == treg) | |
1715 | --treg; | |
1716 | *insn++ = BPF_STX_MEM( | |
1717 | BPF_DW, si->dst_reg, treg, | |
1718 | offsetof(struct bpf_sysctl_kern, tmp_reg)); | |
1719 | *insn++ = BPF_LDX_MEM( | |
1720 | BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), | |
1721 | treg, si->dst_reg, | |
1722 | offsetof(struct bpf_sysctl_kern, ppos)); | |
1723 | *insn++ = BPF_STX_MEM( | |
d895a0f1 IL |
1724 | BPF_SIZEOF(u32), treg, si->src_reg, |
1725 | bpf_ctx_narrow_access_offset( | |
1726 | 0, sizeof(u32), sizeof(loff_t))); | |
e1550bfe AI |
1727 | *insn++ = BPF_LDX_MEM( |
1728 | BPF_DW, treg, si->dst_reg, | |
1729 | offsetof(struct bpf_sysctl_kern, tmp_reg)); | |
1730 | } else { | |
1731 | *insn++ = BPF_LDX_MEM( | |
1732 | BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), | |
1733 | si->dst_reg, si->src_reg, | |
1734 | offsetof(struct bpf_sysctl_kern, ppos)); | |
d895a0f1 | 1735 | read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); |
e1550bfe | 1736 | *insn++ = BPF_LDX_MEM( |
d895a0f1 IL |
1737 | BPF_SIZE(si->code), si->dst_reg, si->dst_reg, |
1738 | bpf_ctx_narrow_access_offset( | |
1739 | 0, read_size, sizeof(loff_t))); | |
e1550bfe AI |
1740 | } |
1741 | *target_size = sizeof(u32); | |
1742 | break; | |
7b146ceb AI |
1743 | } |
1744 | ||
1745 | return insn - insn_buf; | |
1746 | } | |
1747 | ||
1748 | const struct bpf_verifier_ops cg_sysctl_verifier_ops = { | |
1749 | .get_func_proto = sysctl_func_proto, | |
1750 | .is_valid_access = sysctl_is_valid_access, | |
1751 | .convert_ctx_access = sysctl_convert_ctx_access, | |
1752 | }; | |
1753 | ||
1754 | const struct bpf_prog_ops cg_sysctl_prog_ops = { | |
1755 | }; | |
0d01da6a SF |
1756 | |
1757 | static const struct bpf_func_proto * | |
1758 | cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
1759 | { | |
1760 | switch (func_id) { | |
6705fea0 | 1761 | #ifdef CONFIG_NET |
0d01da6a SF |
1762 | case BPF_FUNC_sk_storage_get: |
1763 | return &bpf_sk_storage_get_proto; | |
1764 | case BPF_FUNC_sk_storage_delete: | |
1765 | return &bpf_sk_storage_delete_proto; | |
6705fea0 | 1766 | #endif |
0d01da6a SF |
1767 | #ifdef CONFIG_INET |
1768 | case BPF_FUNC_tcp_sock: | |
1769 | return &bpf_tcp_sock_proto; | |
1770 | #endif | |
1771 | default: | |
1772 | return cgroup_base_func_proto(func_id, prog); | |
1773 | } | |
1774 | } | |
1775 | ||
1776 | static bool cg_sockopt_is_valid_access(int off, int size, | |
1777 | enum bpf_access_type type, | |
1778 | const struct bpf_prog *prog, | |
1779 | struct bpf_insn_access_aux *info) | |
1780 | { | |
1781 | const int size_default = sizeof(__u32); | |
1782 | ||
1783 | if (off < 0 || off >= sizeof(struct bpf_sockopt)) | |
1784 | return false; | |
1785 | ||
1786 | if (off % size != 0) | |
1787 | return false; | |
1788 | ||
1789 | if (type == BPF_WRITE) { | |
1790 | switch (off) { | |
1791 | case offsetof(struct bpf_sockopt, retval): | |
1792 | if (size != size_default) | |
1793 | return false; | |
1794 | return prog->expected_attach_type == | |
1795 | BPF_CGROUP_GETSOCKOPT; | |
1796 | case offsetof(struct bpf_sockopt, optname): | |
df561f66 | 1797 | fallthrough; |
0d01da6a SF |
1798 | case offsetof(struct bpf_sockopt, level): |
1799 | if (size != size_default) | |
1800 | return false; | |
1801 | return prog->expected_attach_type == | |
1802 | BPF_CGROUP_SETSOCKOPT; | |
1803 | case offsetof(struct bpf_sockopt, optlen): | |
1804 | return size == size_default; | |
1805 | default: | |
1806 | return false; | |
1807 | } | |
1808 | } | |
1809 | ||
1810 | switch (off) { | |
1811 | case offsetof(struct bpf_sockopt, sk): | |
1812 | if (size != sizeof(__u64)) | |
1813 | return false; | |
1814 | info->reg_type = PTR_TO_SOCKET; | |
1815 | break; | |
1816 | case offsetof(struct bpf_sockopt, optval): | |
1817 | if (size != sizeof(__u64)) | |
1818 | return false; | |
1819 | info->reg_type = PTR_TO_PACKET; | |
1820 | break; | |
1821 | case offsetof(struct bpf_sockopt, optval_end): | |
1822 | if (size != sizeof(__u64)) | |
1823 | return false; | |
1824 | info->reg_type = PTR_TO_PACKET_END; | |
1825 | break; | |
1826 | case offsetof(struct bpf_sockopt, retval): | |
1827 | if (size != size_default) | |
1828 | return false; | |
1829 | return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; | |
1830 | default: | |
1831 | if (size != size_default) | |
1832 | return false; | |
1833 | break; | |
1834 | } | |
1835 | return true; | |
1836 | } | |
1837 | ||
1838 | #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ | |
1839 | T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ | |
1840 | si->dst_reg, si->src_reg, \ | |
1841 | offsetof(struct bpf_sockopt_kern, F)) | |
1842 | ||
1843 | static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, | |
1844 | const struct bpf_insn *si, | |
1845 | struct bpf_insn *insn_buf, | |
1846 | struct bpf_prog *prog, | |
1847 | u32 *target_size) | |
1848 | { | |
1849 | struct bpf_insn *insn = insn_buf; | |
1850 | ||
1851 | switch (si->off) { | |
1852 | case offsetof(struct bpf_sockopt, sk): | |
1853 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); | |
1854 | break; | |
1855 | case offsetof(struct bpf_sockopt, level): | |
1856 | if (type == BPF_WRITE) | |
1857 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); | |
1858 | else | |
1859 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); | |
1860 | break; | |
1861 | case offsetof(struct bpf_sockopt, optname): | |
1862 | if (type == BPF_WRITE) | |
1863 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); | |
1864 | else | |
1865 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); | |
1866 | break; | |
1867 | case offsetof(struct bpf_sockopt, optlen): | |
1868 | if (type == BPF_WRITE) | |
1869 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); | |
1870 | else | |
1871 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); | |
1872 | break; | |
1873 | case offsetof(struct bpf_sockopt, retval): | |
1874 | if (type == BPF_WRITE) | |
1875 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); | |
1876 | else | |
1877 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); | |
1878 | break; | |
1879 | case offsetof(struct bpf_sockopt, optval): | |
1880 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); | |
1881 | break; | |
1882 | case offsetof(struct bpf_sockopt, optval_end): | |
1883 | *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); | |
1884 | break; | |
1885 | } | |
1886 | ||
1887 | return insn - insn_buf; | |
1888 | } | |
1889 | ||
1890 | static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, | |
1891 | bool direct_write, | |
1892 | const struct bpf_prog *prog) | |
1893 | { | |
1894 | /* Nothing to do for sockopt argument. The data is kzalloc'ated. | |
1895 | */ | |
1896 | return 0; | |
1897 | } | |
1898 | ||
1899 | const struct bpf_verifier_ops cg_sockopt_verifier_ops = { | |
1900 | .get_func_proto = cg_sockopt_func_proto, | |
1901 | .is_valid_access = cg_sockopt_is_valid_access, | |
1902 | .convert_ctx_access = cg_sockopt_convert_ctx_access, | |
1903 | .gen_prologue = cg_sockopt_get_prologue, | |
1904 | }; | |
1905 | ||
1906 | const struct bpf_prog_ops cg_sockopt_prog_ops = { | |
1907 | }; |