1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Google */
4 #include <linux/btf_ids.h>
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/seq_file.h>
9 #include "../../mm/slab.h" /* kmem_cache, slab_caches and slab_mutex */
11 /* open-coded version */
12 struct bpf_iter_kmem_cache {
14 } __attribute__((aligned(8)));
16 struct bpf_iter_kmem_cache_kern {
17 struct kmem_cache *pos;
18 } __attribute__((aligned(8)));
20 #define KMEM_CACHE_POS_START ((void *)1L)
22 __bpf_kfunc_start_defs();
24 __bpf_kfunc int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it)
26 struct bpf_iter_kmem_cache_kern *kit = (void *)it;
28 BUILD_BUG_ON(sizeof(*kit) > sizeof(*it));
29 BUILD_BUG_ON(__alignof__(*kit) != __alignof__(*it));
31 kit->pos = KMEM_CACHE_POS_START;
35 __bpf_kfunc struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it)
37 struct bpf_iter_kmem_cache_kern *kit = (void *)it;
38 struct kmem_cache *prev = kit->pos;
39 struct kmem_cache *next;
45 mutex_lock(&slab_mutex);
47 if (list_empty(&slab_caches)) {
48 mutex_unlock(&slab_mutex);
52 if (prev == KMEM_CACHE_POS_START)
53 next = list_first_entry(&slab_caches, struct kmem_cache, list);
54 else if (list_last_entry(&slab_caches, struct kmem_cache, list) == prev)
57 next = list_next_entry(prev, list);
59 /* boot_caches have negative refcount, don't touch them */
60 if (next && next->refcount > 0)
63 /* Skip kmem_cache_destroy() for active entries */
64 if (prev && prev != KMEM_CACHE_POS_START) {
65 if (prev->refcount > 1)
67 else if (prev->refcount == 1)
71 mutex_unlock(&slab_mutex);
74 kmem_cache_destroy(prev);
80 __bpf_kfunc void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it)
82 struct bpf_iter_kmem_cache_kern *kit = (void *)it;
83 struct kmem_cache *s = kit->pos;
86 if (s == NULL || s == KMEM_CACHE_POS_START)
89 mutex_lock(&slab_mutex);
91 /* Skip kmem_cache_destroy() for active entries */
94 else if (s->refcount == 1)
97 mutex_unlock(&slab_mutex);
100 kmem_cache_destroy(s);
103 __bpf_kfunc_end_defs();
105 struct bpf_iter__kmem_cache {
106 __bpf_md_ptr(struct bpf_iter_meta *, meta);
107 __bpf_md_ptr(struct kmem_cache *, s);
110 union kmem_cache_iter_priv {
111 struct bpf_iter_kmem_cache it;
112 struct bpf_iter_kmem_cache_kern kit;
115 static void *kmem_cache_iter_seq_start(struct seq_file *seq, loff_t *pos)
119 struct kmem_cache *s;
120 union kmem_cache_iter_priv *p = seq->private;
122 mutex_lock(&slab_mutex);
124 /* Find an entry at the given position in the slab_caches list instead
125 * of keeping a reference (of the last visited entry, if any) out of
126 * slab_mutex. It might miss something if one is deleted in the middle
127 * while it releases the lock. But it should be rare and there's not
128 * much we can do about it.
130 list_for_each_entry(s, &slab_caches, list) {
132 /* Make sure this entry remains in the list by getting
133 * a new reference count. Note that boot_cache entries
134 * have a negative refcount, so don't touch them.
143 mutex_unlock(&slab_mutex);
152 static void kmem_cache_iter_seq_stop(struct seq_file *seq, void *v)
154 struct bpf_iter_meta meta;
155 struct bpf_iter__kmem_cache ctx = {
159 union kmem_cache_iter_priv *p = seq->private;
160 struct bpf_prog *prog;
163 prog = bpf_iter_get_info(&meta, true);
165 bpf_iter_run_prog(prog, &ctx);
167 bpf_iter_kmem_cache_destroy(&p->it);
170 static void *kmem_cache_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
172 union kmem_cache_iter_priv *p = seq->private;
176 return bpf_iter_kmem_cache_next(&p->it);
179 static int kmem_cache_iter_seq_show(struct seq_file *seq, void *v)
181 struct bpf_iter_meta meta;
182 struct bpf_iter__kmem_cache ctx = {
186 struct bpf_prog *prog;
190 prog = bpf_iter_get_info(&meta, false);
192 ret = bpf_iter_run_prog(prog, &ctx);
197 static const struct seq_operations kmem_cache_iter_seq_ops = {
198 .start = kmem_cache_iter_seq_start,
199 .next = kmem_cache_iter_seq_next,
200 .stop = kmem_cache_iter_seq_stop,
201 .show = kmem_cache_iter_seq_show,
204 BTF_ID_LIST_GLOBAL_SINGLE(bpf_kmem_cache_btf_id, struct, kmem_cache)
206 static const struct bpf_iter_seq_info kmem_cache_iter_seq_info = {
207 .seq_ops = &kmem_cache_iter_seq_ops,
208 .seq_priv_size = sizeof(union kmem_cache_iter_priv),
211 static void bpf_iter_kmem_cache_show_fdinfo(const struct bpf_iter_aux_info *aux,
212 struct seq_file *seq)
214 seq_puts(seq, "kmem_cache iter\n");
217 DEFINE_BPF_ITER_FUNC(kmem_cache, struct bpf_iter_meta *meta,
218 struct kmem_cache *s)
220 static struct bpf_iter_reg bpf_kmem_cache_reg_info = {
221 .target = "kmem_cache",
222 .feature = BPF_ITER_RESCHED,
223 .show_fdinfo = bpf_iter_kmem_cache_show_fdinfo,
224 .ctx_arg_info_size = 1,
226 { offsetof(struct bpf_iter__kmem_cache, s),
227 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
229 .seq_info = &kmem_cache_iter_seq_info,
232 static int __init bpf_kmem_cache_iter_init(void)
234 bpf_kmem_cache_reg_info.ctx_arg_info[0].btf_id = bpf_kmem_cache_btf_id[0];
235 return bpf_iter_reg_target(&bpf_kmem_cache_reg_info);
238 late_initcall(bpf_kmem_cache_iter_init);