1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16 #include <linux/rcupdate_trace.h>
18 DEFINE_BPF_STORAGE_CACHE(sk_cache);
20 static struct bpf_local_storage_data *
21 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
23 struct bpf_local_storage *sk_storage;
24 struct bpf_local_storage_map *smap;
27 rcu_dereference_check(sk->sk_bpf_storage, bpf_rcu_lock_held());
31 smap = (struct bpf_local_storage_map *)map;
32 return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
35 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
37 struct bpf_local_storage_data *sdata;
39 sdata = bpf_sk_storage_lookup(sk, map, false);
43 bpf_selem_unlink(SELEM(sdata), true);
48 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
49 void bpf_sk_storage_free(struct sock *sk)
51 struct bpf_local_storage *sk_storage;
52 bool free_sk_storage = false;
55 sk_storage = rcu_dereference(sk->sk_bpf_storage);
61 raw_spin_lock_bh(&sk_storage->lock);
62 free_sk_storage = bpf_local_storage_unlink_nolock(sk_storage);
63 raw_spin_unlock_bh(&sk_storage->lock);
67 kfree_rcu(sk_storage, rcu);
70 static void bpf_sk_storage_map_free(struct bpf_map *map)
72 bpf_local_storage_map_free(map, &sk_cache, NULL);
75 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
77 return bpf_local_storage_map_alloc(attr, &sk_cache);
80 static int notsupp_get_next_key(struct bpf_map *map, void *key,
86 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
88 struct bpf_local_storage_data *sdata;
93 sock = sockfd_lookup(fd, &err);
95 sdata = bpf_sk_storage_lookup(sock->sk, map, true);
97 return sdata ? sdata->data : NULL;
103 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
104 void *value, u64 map_flags)
106 struct bpf_local_storage_data *sdata;
111 sock = sockfd_lookup(fd, &err);
113 sdata = bpf_local_storage_update(
114 sock->sk, (struct bpf_local_storage_map *)map, value,
115 map_flags, GFP_ATOMIC);
117 return PTR_ERR_OR_ZERO(sdata);
123 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
129 sock = sockfd_lookup(fd, &err);
131 err = bpf_sk_storage_del(sock->sk, map);
139 static struct bpf_local_storage_elem *
140 bpf_sk_storage_clone_elem(struct sock *newsk,
141 struct bpf_local_storage_map *smap,
142 struct bpf_local_storage_elem *selem)
144 struct bpf_local_storage_elem *copy_selem;
146 copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC);
150 if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
151 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
152 SDATA(selem)->data, true);
154 copy_map_value(&smap->map, SDATA(copy_selem)->data,
160 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
162 struct bpf_local_storage *new_sk_storage = NULL;
163 struct bpf_local_storage *sk_storage;
164 struct bpf_local_storage_elem *selem;
167 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
170 sk_storage = rcu_dereference(sk->sk_bpf_storage);
172 if (!sk_storage || hlist_empty(&sk_storage->list))
175 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
176 struct bpf_local_storage_elem *copy_selem;
177 struct bpf_local_storage_map *smap;
180 smap = rcu_dereference(SDATA(selem)->smap);
181 if (!(smap->map.map_flags & BPF_F_CLONE))
184 /* Note that for lockless listeners adding new element
185 * here can race with cleanup in bpf_local_storage_map_free.
186 * Try to grab map refcnt to make sure that it's still
187 * alive and prevent concurrent removal.
189 map = bpf_map_inc_not_zero(&smap->map);
193 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
200 if (new_sk_storage) {
201 bpf_selem_link_map(smap, copy_selem);
202 bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
204 ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
207 atomic_sub(smap->elem_size,
208 &newsk->sk_omem_alloc);
214 rcu_dereference(copy_selem->local_storage);
222 /* In case of an error, don't free anything explicitly here, the
223 * caller is responsible to call bpf_sk_storage_free.
229 /* *gfp_flags* is a hidden argument provided by the verifier */
230 BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
231 void *, value, u64, flags, gfp_t, gfp_flags)
233 struct bpf_local_storage_data *sdata;
235 WARN_ON_ONCE(!bpf_rcu_lock_held());
236 if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
237 return (unsigned long)NULL;
239 sdata = bpf_sk_storage_lookup(sk, map, true);
241 return (unsigned long)sdata->data;
243 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
244 /* Cannot add new elem to a going away sk.
245 * Otherwise, the new elem may become a leak
246 * (and also other memory issues during map
249 refcount_inc_not_zero(&sk->sk_refcnt)) {
250 sdata = bpf_local_storage_update(
251 sk, (struct bpf_local_storage_map *)map, value,
252 BPF_NOEXIST, gfp_flags);
253 /* sk must be a fullsock (guaranteed by verifier),
254 * so sock_gen_put() is unnecessary.
257 return IS_ERR(sdata) ?
258 (unsigned long)NULL : (unsigned long)sdata->data;
261 return (unsigned long)NULL;
264 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
266 WARN_ON_ONCE(!bpf_rcu_lock_held());
267 if (!sk || !sk_fullsock(sk))
270 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
273 err = bpf_sk_storage_del(sk, map);
281 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
282 void *owner, u32 size)
284 int optmem_max = READ_ONCE(sysctl_optmem_max);
285 struct sock *sk = (struct sock *)owner;
287 /* same check as in sock_kmalloc() */
288 if (size <= optmem_max &&
289 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
290 atomic_add(size, &sk->sk_omem_alloc);
297 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
298 void *owner, u32 size)
300 struct sock *sk = owner;
302 atomic_sub(size, &sk->sk_omem_alloc);
305 static struct bpf_local_storage __rcu **
306 bpf_sk_storage_ptr(void *owner)
308 struct sock *sk = owner;
310 return &sk->sk_bpf_storage;
313 const struct bpf_map_ops sk_storage_map_ops = {
314 .map_meta_equal = bpf_map_meta_equal,
315 .map_alloc_check = bpf_local_storage_map_alloc_check,
316 .map_alloc = bpf_sk_storage_map_alloc,
317 .map_free = bpf_sk_storage_map_free,
318 .map_get_next_key = notsupp_get_next_key,
319 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
320 .map_update_elem = bpf_fd_sk_storage_update_elem,
321 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
322 .map_check_btf = bpf_local_storage_map_check_btf,
323 .map_btf_id = &bpf_local_storage_map_btf_id[0],
324 .map_local_storage_charge = bpf_sk_storage_charge,
325 .map_local_storage_uncharge = bpf_sk_storage_uncharge,
326 .map_owner_storage_ptr = bpf_sk_storage_ptr,
329 const struct bpf_func_proto bpf_sk_storage_get_proto = {
330 .func = bpf_sk_storage_get,
332 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
333 .arg1_type = ARG_CONST_MAP_PTR,
334 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
335 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
336 .arg4_type = ARG_ANYTHING,
339 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
340 .func = bpf_sk_storage_get,
342 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
343 .arg1_type = ARG_CONST_MAP_PTR,
344 .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */
345 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
346 .arg4_type = ARG_ANYTHING,
349 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
350 .func = bpf_sk_storage_delete,
352 .ret_type = RET_INTEGER,
353 .arg1_type = ARG_CONST_MAP_PTR,
354 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
357 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
359 const struct btf *btf_vmlinux;
360 const struct btf_type *t;
364 if (prog->aux->dst_prog)
367 /* Ensure the tracing program is not tracing
368 * any bpf_sk_storage*() function and also
369 * use the bpf_sk_storage_(get|delete) helper.
371 switch (prog->expected_attach_type) {
373 case BPF_TRACE_RAW_TP:
374 /* bpf_sk_storage has no trace point */
376 case BPF_TRACE_FENTRY:
377 case BPF_TRACE_FEXIT:
378 btf_vmlinux = bpf_get_btf_vmlinux();
379 if (IS_ERR_OR_NULL(btf_vmlinux))
381 btf_id = prog->aux->attach_btf_id;
382 t = btf_type_by_id(btf_vmlinux, btf_id);
383 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
384 return !!strncmp(tname, "bpf_sk_storage",
385 strlen("bpf_sk_storage"));
393 /* *gfp_flags* is a hidden argument provided by the verifier */
394 BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
395 void *, value, u64, flags, gfp_t, gfp_flags)
397 WARN_ON_ONCE(!bpf_rcu_lock_held());
398 if (in_hardirq() || in_nmi())
399 return (unsigned long)NULL;
401 return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags,
405 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
408 WARN_ON_ONCE(!bpf_rcu_lock_held());
409 if (in_hardirq() || in_nmi())
412 return ____bpf_sk_storage_delete(map, sk);
415 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
416 .func = bpf_sk_storage_get_tracing,
418 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
419 .arg1_type = ARG_CONST_MAP_PTR,
420 .arg2_type = ARG_PTR_TO_BTF_ID,
421 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
422 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
423 .arg4_type = ARG_ANYTHING,
424 .allowed = bpf_sk_storage_tracing_allowed,
427 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
428 .func = bpf_sk_storage_delete_tracing,
430 .ret_type = RET_INTEGER,
431 .arg1_type = ARG_CONST_MAP_PTR,
432 .arg2_type = ARG_PTR_TO_BTF_ID,
433 .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
434 .allowed = bpf_sk_storage_tracing_allowed,
437 struct bpf_sk_storage_diag {
439 struct bpf_map *maps[];
442 /* The reply will be like:
443 * INET_DIAG_BPF_SK_STORAGES (nla_nest)
444 * SK_DIAG_BPF_STORAGE (nla_nest)
445 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
446 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
447 * SK_DIAG_BPF_STORAGE (nla_nest)
448 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
449 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
452 static int nla_value_size(u32 value_size)
454 /* SK_DIAG_BPF_STORAGE (nla_nest)
455 * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
456 * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
458 return nla_total_size(0) + nla_total_size(sizeof(u32)) +
459 nla_total_size_64bit(value_size);
462 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
469 for (i = 0; i < diag->nr_maps; i++)
470 bpf_map_put(diag->maps[i]);
474 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
476 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
477 const struct bpf_map *map)
481 for (i = 0; i < diag->nr_maps; i++) {
482 if (diag->maps[i] == map)
489 struct bpf_sk_storage_diag *
490 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
492 struct bpf_sk_storage_diag *diag;
497 /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
498 * the map_alloc_check() side also does.
501 return ERR_PTR(-EPERM);
503 nla_for_each_nested(nla, nla_stgs, rem) {
504 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
508 diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
510 return ERR_PTR(-ENOMEM);
512 nla_for_each_nested(nla, nla_stgs, rem) {
516 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
519 map_fd = nla_get_u32(nla);
520 map = bpf_map_get(map_fd);
525 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
530 if (diag_check_dup(diag, map)) {
535 diag->maps[diag->nr_maps++] = map;
541 bpf_sk_storage_diag_free(diag);
544 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
546 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
548 struct nlattr *nla_stg, *nla_value;
549 struct bpf_local_storage_map *smap;
551 /* It cannot exceed max nlattr's payload */
552 BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
554 nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
558 smap = rcu_dereference(sdata->smap);
559 if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
562 nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
563 smap->map.value_size,
564 SK_DIAG_BPF_STORAGE_PAD);
568 if (btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))
569 copy_map_value_locked(&smap->map, nla_data(nla_value),
572 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
574 nla_nest_end(skb, nla_stg);
578 nla_nest_cancel(skb, nla_stg);
582 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
584 unsigned int *res_diag_size)
586 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
587 unsigned int diag_size = nla_total_size(0);
588 struct bpf_local_storage *sk_storage;
589 struct bpf_local_storage_elem *selem;
590 struct bpf_local_storage_map *smap;
591 struct nlattr *nla_stgs;
592 unsigned int saved_len;
597 sk_storage = rcu_dereference(sk->sk_bpf_storage);
598 if (!sk_storage || hlist_empty(&sk_storage->list)) {
603 nla_stgs = nla_nest_start(skb, stg_array_type);
605 /* Continue to learn diag_size */
608 saved_len = skb->len;
609 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
610 smap = rcu_dereference(SDATA(selem)->smap);
611 diag_size += nla_value_size(smap->map.value_size);
613 if (nla_stgs && diag_get(SDATA(selem), skb))
614 /* Continue to learn diag_size */
621 if (saved_len == skb->len)
622 nla_nest_cancel(skb, nla_stgs);
624 nla_nest_end(skb, nla_stgs);
627 if (diag_size == nla_total_size(0)) {
632 *res_diag_size = diag_size;
636 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
637 struct sock *sk, struct sk_buff *skb,
639 unsigned int *res_diag_size)
641 /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
642 unsigned int diag_size = nla_total_size(0);
643 struct bpf_local_storage *sk_storage;
644 struct bpf_local_storage_data *sdata;
645 struct nlattr *nla_stgs;
646 unsigned int saved_len;
652 /* No map has been specified. Dump all. */
654 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
658 sk_storage = rcu_dereference(sk->sk_bpf_storage);
659 if (!sk_storage || hlist_empty(&sk_storage->list)) {
664 nla_stgs = nla_nest_start(skb, stg_array_type);
666 /* Continue to learn diag_size */
669 saved_len = skb->len;
670 for (i = 0; i < diag->nr_maps; i++) {
671 sdata = bpf_local_storage_lookup(sk_storage,
672 (struct bpf_local_storage_map *)diag->maps[i],
678 diag_size += nla_value_size(diag->maps[i]->value_size);
680 if (nla_stgs && diag_get(sdata, skb))
681 /* Continue to learn diag_size */
687 if (saved_len == skb->len)
688 nla_nest_cancel(skb, nla_stgs);
690 nla_nest_end(skb, nla_stgs);
693 if (diag_size == nla_total_size(0)) {
698 *res_diag_size = diag_size;
701 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
703 struct bpf_iter_seq_sk_storage_map_info {
705 unsigned int bucket_id;
709 static struct bpf_local_storage_elem *
710 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
711 struct bpf_local_storage_elem *prev_selem)
712 __acquires(RCU) __releases(RCU)
714 struct bpf_local_storage *sk_storage;
715 struct bpf_local_storage_elem *selem;
716 u32 skip_elems = info->skip_elems;
717 struct bpf_local_storage_map *smap;
718 u32 bucket_id = info->bucket_id;
719 u32 i, count, n_buckets;
720 struct bpf_local_storage_map_bucket *b;
722 smap = (struct bpf_local_storage_map *)info->map;
723 n_buckets = 1U << smap->bucket_log;
724 if (bucket_id >= n_buckets)
727 /* try to find next selem in the same bucket */
731 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
732 struct bpf_local_storage_elem, map_node);
734 /* not found, unlock and go to the next bucket */
735 b = &smap->buckets[bucket_id++];
740 sk_storage = rcu_dereference(selem->local_storage);
742 info->skip_elems = skip_elems + count;
748 for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
749 b = &smap->buckets[i];
752 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
753 sk_storage = rcu_dereference(selem->local_storage);
754 if (sk_storage && count >= skip_elems) {
756 info->skip_elems = count;
766 info->skip_elems = 0;
770 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
772 struct bpf_local_storage_elem *selem;
774 selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
783 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
786 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
790 return bpf_sk_storage_map_seq_find_next(seq->private, v);
793 struct bpf_iter__bpf_sk_storage_map {
794 __bpf_md_ptr(struct bpf_iter_meta *, meta);
795 __bpf_md_ptr(struct bpf_map *, map);
796 __bpf_md_ptr(struct sock *, sk);
797 __bpf_md_ptr(void *, value);
800 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
801 struct bpf_map *map, struct sock *sk,
804 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
805 struct bpf_local_storage_elem *selem)
807 struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
808 struct bpf_iter__bpf_sk_storage_map ctx = {};
809 struct bpf_local_storage *sk_storage;
810 struct bpf_iter_meta meta;
811 struct bpf_prog *prog;
815 prog = bpf_iter_get_info(&meta, selem == NULL);
820 sk_storage = rcu_dereference(selem->local_storage);
821 ctx.sk = sk_storage->owner;
822 ctx.value = SDATA(selem)->data;
824 ret = bpf_iter_run_prog(prog, &ctx);
830 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
832 return __bpf_sk_storage_map_seq_show(seq, v);
835 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
839 (void)__bpf_sk_storage_map_seq_show(seq, v);
844 static int bpf_iter_init_sk_storage_map(void *priv_data,
845 struct bpf_iter_aux_info *aux)
847 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
849 bpf_map_inc_with_uref(aux->map);
850 seq_info->map = aux->map;
854 static void bpf_iter_fini_sk_storage_map(void *priv_data)
856 struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
858 bpf_map_put_with_uref(seq_info->map);
861 static int bpf_iter_attach_map(struct bpf_prog *prog,
862 union bpf_iter_link_info *linfo,
863 struct bpf_iter_aux_info *aux)
868 if (!linfo->map.map_fd)
871 map = bpf_map_get_with_uref(linfo->map.map_fd);
875 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
878 if (prog->aux->max_rdwr_access > map->value_size) {
887 bpf_map_put_with_uref(map);
891 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
893 bpf_map_put_with_uref(aux->map);
896 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
897 .start = bpf_sk_storage_map_seq_start,
898 .next = bpf_sk_storage_map_seq_next,
899 .stop = bpf_sk_storage_map_seq_stop,
900 .show = bpf_sk_storage_map_seq_show,
903 static const struct bpf_iter_seq_info iter_seq_info = {
904 .seq_ops = &bpf_sk_storage_map_seq_ops,
905 .init_seq_private = bpf_iter_init_sk_storage_map,
906 .fini_seq_private = bpf_iter_fini_sk_storage_map,
907 .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info),
910 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
911 .target = "bpf_sk_storage_map",
912 .attach_target = bpf_iter_attach_map,
913 .detach_target = bpf_iter_detach_map,
914 .show_fdinfo = bpf_iter_map_show_fdinfo,
915 .fill_link_info = bpf_iter_map_fill_link_info,
916 .ctx_arg_info_size = 2,
918 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
919 PTR_TO_BTF_ID_OR_NULL },
920 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
921 PTR_TO_BUF | PTR_MAYBE_NULL },
923 .seq_info = &iter_seq_info,
926 static int __init bpf_sk_storage_map_iter_init(void)
928 bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
929 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
930 return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
932 late_initcall(bpf_sk_storage_map_iter_init);