]> Git Repo - linux.git/blob - net/ipv4/bpf_tcp_ca.c
net: bcmgenet: Fix return value check for fixed_phy_register()
[linux.git] / net / ipv4 / bpf_tcp_ca.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3
4 #include <linux/init.h>
5 #include <linux/types.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/filter.h>
11 #include <net/tcp.h>
12 #include <net/bpf_sk_storage.h>
13
14 /* "extern" is to avoid sparse warning.  It is only used in bpf_struct_ops.c. */
15 extern struct bpf_struct_ops bpf_tcp_congestion_ops;
16
17 static u32 unsupported_ops[] = {
18         offsetof(struct tcp_congestion_ops, get_info),
19 };
20
21 static const struct btf_type *tcp_sock_type;
22 static u32 tcp_sock_id, sock_id;
23
24 static int bpf_tcp_ca_init(struct btf *btf)
25 {
26         s32 type_id;
27
28         type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
29         if (type_id < 0)
30                 return -EINVAL;
31         sock_id = type_id;
32
33         type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
34         if (type_id < 0)
35                 return -EINVAL;
36         tcp_sock_id = type_id;
37         tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
38
39         return 0;
40 }
41
42 static bool is_unsupported(u32 member_offset)
43 {
44         unsigned int i;
45
46         for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
47                 if (member_offset == unsupported_ops[i])
48                         return true;
49         }
50
51         return false;
52 }
53
54 extern struct btf *btf_vmlinux;
55
56 static bool bpf_tcp_ca_is_valid_access(int off, int size,
57                                        enum bpf_access_type type,
58                                        const struct bpf_prog *prog,
59                                        struct bpf_insn_access_aux *info)
60 {
61         if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
62                 return false;
63
64         if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
65             !bpf_type_has_unsafe_modifiers(info->reg_type) &&
66             info->btf_id == sock_id)
67                 /* promote it to tcp_sock */
68                 info->btf_id = tcp_sock_id;
69
70         return true;
71 }
72
73 static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
74                                         const struct bpf_reg_state *reg,
75                                         int off, int size)
76 {
77         const struct btf_type *t;
78         size_t end;
79
80         t = btf_type_by_id(reg->btf, reg->btf_id);
81         if (t != tcp_sock_type) {
82                 bpf_log(log, "only read is supported\n");
83                 return -EACCES;
84         }
85
86         switch (off) {
87         case offsetof(struct sock, sk_pacing_rate):
88                 end = offsetofend(struct sock, sk_pacing_rate);
89                 break;
90         case offsetof(struct sock, sk_pacing_status):
91                 end = offsetofend(struct sock, sk_pacing_status);
92                 break;
93         case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
94                 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
95                 break;
96         case offsetof(struct inet_connection_sock, icsk_ack.pending):
97                 end = offsetofend(struct inet_connection_sock,
98                                   icsk_ack.pending);
99                 break;
100         case offsetof(struct tcp_sock, snd_cwnd):
101                 end = offsetofend(struct tcp_sock, snd_cwnd);
102                 break;
103         case offsetof(struct tcp_sock, snd_cwnd_cnt):
104                 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
105                 break;
106         case offsetof(struct tcp_sock, snd_ssthresh):
107                 end = offsetofend(struct tcp_sock, snd_ssthresh);
108                 break;
109         case offsetof(struct tcp_sock, ecn_flags):
110                 end = offsetofend(struct tcp_sock, ecn_flags);
111                 break;
112         case offsetof(struct tcp_sock, app_limited):
113                 end = offsetofend(struct tcp_sock, app_limited);
114                 break;
115         default:
116                 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
117                 return -EACCES;
118         }
119
120         if (off + size > end) {
121                 bpf_log(log,
122                         "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
123                         off, size, end);
124                 return -EACCES;
125         }
126
127         return 0;
128 }
129
130 BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
131 {
132         /* bpf_tcp_ca prog cannot have NULL tp */
133         __tcp_send_ack((struct sock *)tp, rcv_nxt);
134         return 0;
135 }
136
137 static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
138         .func           = bpf_tcp_send_ack,
139         .gpl_only       = false,
140         /* In case we want to report error later */
141         .ret_type       = RET_INTEGER,
142         .arg1_type      = ARG_PTR_TO_BTF_ID,
143         .arg1_btf_id    = &tcp_sock_id,
144         .arg2_type      = ARG_ANYTHING,
145 };
146
147 static u32 prog_ops_moff(const struct bpf_prog *prog)
148 {
149         const struct btf_member *m;
150         const struct btf_type *t;
151         u32 midx;
152
153         midx = prog->expected_attach_type;
154         t = bpf_tcp_congestion_ops.type;
155         m = &btf_type_member(t)[midx];
156
157         return __btf_member_bit_offset(t, m) / 8;
158 }
159
160 static const struct bpf_func_proto *
161 bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
162                           const struct bpf_prog *prog)
163 {
164         switch (func_id) {
165         case BPF_FUNC_tcp_send_ack:
166                 return &bpf_tcp_send_ack_proto;
167         case BPF_FUNC_sk_storage_get:
168                 return &bpf_sk_storage_get_proto;
169         case BPF_FUNC_sk_storage_delete:
170                 return &bpf_sk_storage_delete_proto;
171         case BPF_FUNC_setsockopt:
172                 /* Does not allow release() to call setsockopt.
173                  * release() is called when the current bpf-tcp-cc
174                  * is retiring.  It is not allowed to call
175                  * setsockopt() to make further changes which
176                  * may potentially allocate new resources.
177                  */
178                 if (prog_ops_moff(prog) !=
179                     offsetof(struct tcp_congestion_ops, release))
180                         return &bpf_sk_setsockopt_proto;
181                 return NULL;
182         case BPF_FUNC_getsockopt:
183                 /* Since get/setsockopt is usually expected to
184                  * be available together, disable getsockopt for
185                  * release also to avoid usage surprise.
186                  * The bpf-tcp-cc already has a more powerful way
187                  * to read tcp_sock from the PTR_TO_BTF_ID.
188                  */
189                 if (prog_ops_moff(prog) !=
190                     offsetof(struct tcp_congestion_ops, release))
191                         return &bpf_sk_getsockopt_proto;
192                 return NULL;
193         case BPF_FUNC_ktime_get_coarse_ns:
194                 return &bpf_ktime_get_coarse_ns_proto;
195         default:
196                 return bpf_base_func_proto(func_id);
197         }
198 }
199
200 BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids)
201 BTF_ID_FLAGS(func, tcp_reno_ssthresh)
202 BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
203 BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
204 BTF_ID_FLAGS(func, tcp_slow_start)
205 BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
206 BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids)
207
208 static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
209         .owner = THIS_MODULE,
210         .set   = &bpf_tcp_ca_check_kfunc_ids,
211 };
212
213 static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
214         .get_func_proto         = bpf_tcp_ca_get_func_proto,
215         .is_valid_access        = bpf_tcp_ca_is_valid_access,
216         .btf_struct_access      = bpf_tcp_ca_btf_struct_access,
217 };
218
219 static int bpf_tcp_ca_init_member(const struct btf_type *t,
220                                   const struct btf_member *member,
221                                   void *kdata, const void *udata)
222 {
223         const struct tcp_congestion_ops *utcp_ca;
224         struct tcp_congestion_ops *tcp_ca;
225         u32 moff;
226
227         utcp_ca = (const struct tcp_congestion_ops *)udata;
228         tcp_ca = (struct tcp_congestion_ops *)kdata;
229
230         moff = __btf_member_bit_offset(t, member) / 8;
231         switch (moff) {
232         case offsetof(struct tcp_congestion_ops, flags):
233                 if (utcp_ca->flags & ~TCP_CONG_MASK)
234                         return -EINVAL;
235                 tcp_ca->flags = utcp_ca->flags;
236                 return 1;
237         case offsetof(struct tcp_congestion_ops, name):
238                 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
239                                      sizeof(tcp_ca->name)) <= 0)
240                         return -EINVAL;
241                 return 1;
242         }
243
244         return 0;
245 }
246
247 static int bpf_tcp_ca_check_member(const struct btf_type *t,
248                                    const struct btf_member *member,
249                                    const struct bpf_prog *prog)
250 {
251         if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
252                 return -ENOTSUPP;
253         return 0;
254 }
255
256 static int bpf_tcp_ca_reg(void *kdata)
257 {
258         return tcp_register_congestion_control(kdata);
259 }
260
261 static void bpf_tcp_ca_unreg(void *kdata)
262 {
263         tcp_unregister_congestion_control(kdata);
264 }
265
266 static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
267 {
268         return tcp_update_congestion_control(kdata, old_kdata);
269 }
270
271 static int bpf_tcp_ca_validate(void *kdata)
272 {
273         return tcp_validate_congestion_control(kdata);
274 }
275
276 struct bpf_struct_ops bpf_tcp_congestion_ops = {
277         .verifier_ops = &bpf_tcp_ca_verifier_ops,
278         .reg = bpf_tcp_ca_reg,
279         .unreg = bpf_tcp_ca_unreg,
280         .update = bpf_tcp_ca_update,
281         .check_member = bpf_tcp_ca_check_member,
282         .init_member = bpf_tcp_ca_init_member,
283         .init = bpf_tcp_ca_init,
284         .validate = bpf_tcp_ca_validate,
285         .name = "tcp_congestion_ops",
286 };
287
288 static int __init bpf_tcp_ca_kfunc_init(void)
289 {
290         return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
291 }
292 late_initcall(bpf_tcp_ca_kfunc_init);
This page took 0.04381 seconds and 4 git commands to generate.