1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_core_read.h>
8 #include "bpf_experimental.h"
13 struct bpf_rb_node node;
16 struct root_nested_inner {
17 struct bpf_spin_lock glock;
18 struct bpf_rb_root root __contains(node_data, node);
22 struct root_nested_inner inner;
25 long less_callback_ran = -1;
26 long removed_key = -1;
27 long first_data[2] = {-1, -1};
29 #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
30 private(A) struct bpf_spin_lock glock;
31 private(A) struct bpf_rb_root groot __contains(node_data, node);
32 private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
33 private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
34 private(B) struct root_nested groot_nested;
36 static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
38 struct node_data *node_a;
39 struct node_data *node_b;
41 node_a = container_of(a, struct node_data, node);
42 node_b = container_of(b, struct node_data, node);
43 less_callback_ran = 1;
45 return node_a->key < node_b->key;
48 static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
50 struct node_data *n, *m;
52 n = bpf_obj_new(typeof(*n));
57 m = bpf_obj_new(typeof(*m));
64 bpf_spin_lock(&glock);
65 bpf_rbtree_add(&groot, &n->node, less);
66 bpf_rbtree_add(&groot, &m->node, less);
67 bpf_spin_unlock(&glock);
69 n = bpf_obj_new(typeof(*n));
74 bpf_spin_lock(&glock);
75 bpf_rbtree_add(&groot, &n->node, less);
76 bpf_spin_unlock(&glock);
81 long rbtree_add_nodes(void *ctx)
83 return __add_three(&groot, &glock);
87 long rbtree_add_nodes_nested(void *ctx)
89 return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock);
93 long rbtree_add_and_remove(void *ctx)
95 struct bpf_rb_node *res = NULL;
96 struct node_data *n, *m = NULL;
98 n = bpf_obj_new(typeof(*n));
103 m = bpf_obj_new(typeof(*m));
108 bpf_spin_lock(&glock);
109 bpf_rbtree_add(&groot, &n->node, less);
110 bpf_rbtree_add(&groot, &m->node, less);
111 res = bpf_rbtree_remove(&groot, &n->node);
112 bpf_spin_unlock(&glock);
117 n = container_of(res, struct node_data, node);
118 removed_key = n->key;
131 long rbtree_add_and_remove_array(void *ctx)
133 struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL;
134 struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}};
136 long k1 = -1, k2 = -1, k3 = -1;
139 for (i = 0; i < 3; i++) {
140 for (j = 0; j < 2; j++) {
141 nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j]));
144 nodes[i][j]->key = i * 2 + j;
148 bpf_spin_lock(&glock);
149 for (i = 0; i < 2; i++)
150 for (j = 0; j < 2; j++)
151 bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
152 for (j = 0; j < 2; j++)
153 bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
154 res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
155 res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
156 res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
157 bpf_spin_unlock(&glock);
160 n = container_of(res1, struct node_data, node);
165 n = container_of(res2, struct node_data, node);
170 n = container_of(res3, struct node_data, node);
174 if (k1 != 0 || k2 != 2 || k3 != 4)
180 for (i = 0; i < 3; i++) {
181 for (j = 0; j < 2; j++) {
183 bpf_obj_drop(nodes[i][j]);
190 long rbtree_first_and_remove(void *ctx)
192 struct bpf_rb_node *res = NULL;
193 struct node_data *n, *m, *o;
195 n = bpf_obj_new(typeof(*n));
201 m = bpf_obj_new(typeof(*m));
207 o = bpf_obj_new(typeof(*o));
213 bpf_spin_lock(&glock);
214 bpf_rbtree_add(&groot, &n->node, less);
215 bpf_rbtree_add(&groot, &m->node, less);
216 bpf_rbtree_add(&groot, &o->node, less);
218 res = bpf_rbtree_first(&groot);
220 bpf_spin_unlock(&glock);
224 o = container_of(res, struct node_data, node);
225 first_data[0] = o->data;
227 res = bpf_rbtree_remove(&groot, &o->node);
228 bpf_spin_unlock(&glock);
233 o = container_of(res, struct node_data, node);
234 removed_key = o->key;
237 bpf_spin_lock(&glock);
238 res = bpf_rbtree_first(&groot);
240 bpf_spin_unlock(&glock);
244 o = container_of(res, struct node_data, node);
245 first_data[1] = o->data;
246 bpf_spin_unlock(&glock);
258 long rbtree_api_release_aliasing(void *ctx)
260 struct node_data *n, *m, *o;
261 struct bpf_rb_node *res, *res2;
263 n = bpf_obj_new(typeof(*n));
269 bpf_spin_lock(&glock);
270 bpf_rbtree_add(&groot, &n->node, less);
271 bpf_spin_unlock(&glock);
273 bpf_spin_lock(&glock);
275 /* m and o point to the same node,
276 * but verifier doesn't know this
278 res = bpf_rbtree_first(&groot);
281 o = container_of(res, struct node_data, node);
283 res = bpf_rbtree_first(&groot);
286 m = container_of(res, struct node_data, node);
288 res = bpf_rbtree_remove(&groot, &m->node);
289 /* Retval of previous remove returns an owning reference to m,
290 * which is the same node non-owning ref o is pointing at.
291 * We can safely try to remove o as the second rbtree_remove will
292 * return NULL since the node isn't in a tree.
294 * Previously we relied on the verifier type system + rbtree_remove
295 * invalidating non-owning refs to ensure that rbtree_remove couldn't
296 * fail, but now rbtree_remove does runtime checking so we no longer
297 * invalidate non-owning refs after remove.
299 res2 = bpf_rbtree_remove(&groot, &o->node);
301 bpf_spin_unlock(&glock);
304 o = container_of(res, struct node_data, node);
305 first_data[0] = o->data;
309 /* The second remove fails, so res2 is null and this doesn't
312 m = container_of(res2, struct node_data, node);
313 first_data[1] = m->data;
319 bpf_spin_unlock(&glock);
323 char _license[] SEC("license") = "GPL";