4 #include <linux/jump_label.h>
5 #include <uapi/linux/bpf.h>
10 struct bpf_sock_ops_kern;
12 #ifdef CONFIG_CGROUP_BPF
14 extern struct static_key_false cgroup_bpf_enabled_key;
15 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
17 struct bpf_prog_list {
18 struct list_head node;
19 struct bpf_prog *prog;
22 struct bpf_prog_array;
25 /* array of effective progs in this cgroup */
26 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
28 /* attached progs to this cgroup and attach flags
29 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
30 * have either zero or one element
31 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
33 struct list_head progs[MAX_BPF_ATTACH_TYPE];
34 u32 flags[MAX_BPF_ATTACH_TYPE];
36 /* temp storage for effective prog array used by prog_attach/detach */
37 struct bpf_prog_array __rcu *inactive;
40 void cgroup_bpf_put(struct cgroup *cgrp);
41 int cgroup_bpf_inherit(struct cgroup *cgrp);
43 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
44 enum bpf_attach_type type, u32 flags);
45 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
46 enum bpf_attach_type type, u32 flags);
47 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
48 union bpf_attr __user *uattr);
50 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
51 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
52 enum bpf_attach_type type, u32 flags);
53 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
54 enum bpf_attach_type type, u32 flags);
55 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
56 union bpf_attr __user *uattr);
58 int __cgroup_bpf_run_filter_skb(struct sock *sk,
60 enum bpf_attach_type type);
62 int __cgroup_bpf_run_filter_sk(struct sock *sk,
63 enum bpf_attach_type type);
65 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
66 struct bpf_sock_ops_kern *sock_ops,
67 enum bpf_attach_type type);
69 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
70 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
73 if (cgroup_bpf_enabled) \
74 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
75 BPF_CGROUP_INET_INGRESS); \
80 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
83 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
84 typeof(sk) __sk = sk_to_full_sk(sk); \
85 if (sk_fullsock(__sk)) \
86 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
87 BPF_CGROUP_INET_EGRESS); \
92 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
95 if (cgroup_bpf_enabled && sk) { \
96 __ret = __cgroup_bpf_run_filter_sk(sk, \
97 BPF_CGROUP_INET_SOCK_CREATE); \
102 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
105 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
106 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
107 if (__sk && sk_fullsock(__sk)) \
108 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
110 BPF_CGROUP_SOCK_OPS); \
116 struct cgroup_bpf {};
117 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
118 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
120 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
121 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
122 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
123 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
125 #endif /* CONFIG_CGROUP_BPF */
127 #endif /* _BPF_CGROUP_H */