]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * Linux Socket Filter Data Structures | |
4 | */ | |
1da177e4 LT |
5 | #ifndef __LINUX_FILTER_H__ |
6 | #define __LINUX_FILTER_H__ | |
7 | ||
b954d834 DB |
8 | #include <stdarg.h> |
9 | ||
60063497 | 10 | #include <linux/atomic.h> |
4c355cdf | 11 | #include <linux/refcount.h> |
0c5fe1b4 | 12 | #include <linux/compat.h> |
9f12fbe6 | 13 | #include <linux/skbuff.h> |
b954d834 DB |
14 | #include <linux/linkage.h> |
15 | #include <linux/printk.h> | |
d45ed4a4 | 16 | #include <linux/workqueue.h> |
b13138ef | 17 | #include <linux/sched.h> |
4f3446bb | 18 | #include <linux/capability.h> |
7bd509e3 | 19 | #include <linux/cryptohash.h> |
820a0b24 | 20 | #include <linux/set_memory.h> |
7105e828 | 21 | #include <linux/kallsyms.h> |
6d5fc195 | 22 | #include <linux/if_vlan.h> |
4f3446bb | 23 | |
ff936a04 | 24 | #include <net/sch_generic.h> |
b954d834 | 25 | |
b954d834 | 26 | #include <uapi/linux/filter.h> |
daedfb22 | 27 | #include <uapi/linux/bpf.h> |
60a3b225 DB |
28 | |
29 | struct sk_buff; | |
30 | struct sock; | |
31 | struct seccomp_data; | |
09756af4 | 32 | struct bpf_prog_aux; |
297dd12c | 33 | struct xdp_rxq_info; |
106ca27f | 34 | struct xdp_buff; |
2dbb9b9e | 35 | struct sock_reuseport; |
792d4b5c | 36 | |
30743837 DB |
37 | /* ArgX, context and stack frame pointer register positions. Note, |
38 | * Arg1, Arg2, Arg3, etc are used as argument mappings of function | |
39 | * calls in BPF_CALL instruction. | |
40 | */ | |
41 | #define BPF_REG_ARG1 BPF_REG_1 | |
42 | #define BPF_REG_ARG2 BPF_REG_2 | |
43 | #define BPF_REG_ARG3 BPF_REG_3 | |
44 | #define BPF_REG_ARG4 BPF_REG_4 | |
45 | #define BPF_REG_ARG5 BPF_REG_5 | |
46 | #define BPF_REG_CTX BPF_REG_6 | |
47 | #define BPF_REG_FP BPF_REG_10 | |
48 | ||
49 | /* Additional register mappings for converted user programs. */ | |
50 | #define BPF_REG_A BPF_REG_0 | |
51 | #define BPF_REG_X BPF_REG_7 | |
e0cea7ce DB |
52 | #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ |
53 | #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ | |
54 | #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ | |
bd4cf0ed | 55 | |
4f3446bb DB |
56 | /* Kernel hidden auxiliary/helper register for hardening step. |
57 | * Only used by eBPF JITs. It's nothing more than a temporary | |
58 | * register that JITs use internally, only that here it's part | |
59 | * of eBPF instructions that have been rewritten for blinding | |
60 | * constants. See JIT pre-step in bpf_jit_blind_constants(). | |
61 | */ | |
62 | #define BPF_REG_AX MAX_BPF_REG | |
63 | #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) | |
64 | ||
71189fa9 AS |
65 | /* unused opcode to mark special call to bpf_tail_call() helper */ |
66 | #define BPF_TAIL_CALL 0xf0 | |
67 | ||
1ea47e01 AS |
68 | /* unused opcode to mark call to interpreter with arguments */ |
69 | #define BPF_CALL_ARGS 0xe0 | |
70 | ||
74451e66 DB |
71 | /* As per nm, we expose JITed images as text (code) section for |
72 | * kallsyms. That way, tools like perf can find it to match | |
73 | * addresses. | |
74 | */ | |
75 | #define BPF_SYM_ELF_TYPE 't' | |
76 | ||
bd4cf0ed AS |
77 | /* BPF program can access up to 512 bytes of stack space. */ |
78 | #define MAX_BPF_STACK 512 | |
79 | ||
f8f6d679 DB |
80 | /* Helper macros for filter block array initializers. */ |
81 | ||
e430f34e | 82 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ |
f8f6d679 | 83 | |
e430f34e | 84 | #define BPF_ALU64_REG(OP, DST, SRC) \ |
2695fb55 | 85 | ((struct bpf_insn) { \ |
f8f6d679 | 86 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
87 | .dst_reg = DST, \ |
88 | .src_reg = SRC, \ | |
f8f6d679 DB |
89 | .off = 0, \ |
90 | .imm = 0 }) | |
91 | ||
e430f34e | 92 | #define BPF_ALU32_REG(OP, DST, SRC) \ |
2695fb55 | 93 | ((struct bpf_insn) { \ |
f8f6d679 | 94 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
95 | .dst_reg = DST, \ |
96 | .src_reg = SRC, \ | |
f8f6d679 DB |
97 | .off = 0, \ |
98 | .imm = 0 }) | |
99 | ||
e430f34e | 100 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ |
f8f6d679 | 101 | |
e430f34e | 102 | #define BPF_ALU64_IMM(OP, DST, IMM) \ |
2695fb55 | 103 | ((struct bpf_insn) { \ |
f8f6d679 | 104 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
105 | .dst_reg = DST, \ |
106 | .src_reg = 0, \ | |
f8f6d679 DB |
107 | .off = 0, \ |
108 | .imm = IMM }) | |
109 | ||
e430f34e | 110 | #define BPF_ALU32_IMM(OP, DST, IMM) \ |
2695fb55 | 111 | ((struct bpf_insn) { \ |
f8f6d679 | 112 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
113 | .dst_reg = DST, \ |
114 | .src_reg = 0, \ | |
f8f6d679 DB |
115 | .off = 0, \ |
116 | .imm = IMM }) | |
117 | ||
118 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ | |
119 | ||
e430f34e | 120 | #define BPF_ENDIAN(TYPE, DST, LEN) \ |
2695fb55 | 121 | ((struct bpf_insn) { \ |
f8f6d679 | 122 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ |
e430f34e AS |
123 | .dst_reg = DST, \ |
124 | .src_reg = 0, \ | |
f8f6d679 DB |
125 | .off = 0, \ |
126 | .imm = LEN }) | |
127 | ||
e430f34e | 128 | /* Short form of mov, dst_reg = src_reg */ |
f8f6d679 | 129 | |
e430f34e | 130 | #define BPF_MOV64_REG(DST, SRC) \ |
2695fb55 | 131 | ((struct bpf_insn) { \ |
f8f6d679 | 132 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ |
e430f34e AS |
133 | .dst_reg = DST, \ |
134 | .src_reg = SRC, \ | |
f8f6d679 DB |
135 | .off = 0, \ |
136 | .imm = 0 }) | |
137 | ||
e430f34e | 138 | #define BPF_MOV32_REG(DST, SRC) \ |
2695fb55 | 139 | ((struct bpf_insn) { \ |
f8f6d679 | 140 | .code = BPF_ALU | BPF_MOV | BPF_X, \ |
e430f34e AS |
141 | .dst_reg = DST, \ |
142 | .src_reg = SRC, \ | |
f8f6d679 DB |
143 | .off = 0, \ |
144 | .imm = 0 }) | |
145 | ||
e430f34e | 146 | /* Short form of mov, dst_reg = imm32 */ |
f8f6d679 | 147 | |
e430f34e | 148 | #define BPF_MOV64_IMM(DST, IMM) \ |
2695fb55 | 149 | ((struct bpf_insn) { \ |
f8f6d679 | 150 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ |
e430f34e AS |
151 | .dst_reg = DST, \ |
152 | .src_reg = 0, \ | |
f8f6d679 DB |
153 | .off = 0, \ |
154 | .imm = IMM }) | |
155 | ||
e430f34e | 156 | #define BPF_MOV32_IMM(DST, IMM) \ |
2695fb55 | 157 | ((struct bpf_insn) { \ |
f8f6d679 | 158 | .code = BPF_ALU | BPF_MOV | BPF_K, \ |
e430f34e AS |
159 | .dst_reg = DST, \ |
160 | .src_reg = 0, \ | |
f8f6d679 DB |
161 | .off = 0, \ |
162 | .imm = IMM }) | |
163 | ||
02ab695b AS |
164 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ |
165 | #define BPF_LD_IMM64(DST, IMM) \ | |
166 | BPF_LD_IMM64_RAW(DST, 0, IMM) | |
167 | ||
168 | #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ | |
169 | ((struct bpf_insn) { \ | |
170 | .code = BPF_LD | BPF_DW | BPF_IMM, \ | |
171 | .dst_reg = DST, \ | |
172 | .src_reg = SRC, \ | |
173 | .off = 0, \ | |
174 | .imm = (__u32) (IMM) }), \ | |
175 | ((struct bpf_insn) { \ | |
176 | .code = 0, /* zero is reserved opcode */ \ | |
177 | .dst_reg = 0, \ | |
178 | .src_reg = 0, \ | |
179 | .off = 0, \ | |
180 | .imm = ((__u64) (IMM)) >> 32 }) | |
181 | ||
0246e64d AS |
182 | /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ |
183 | #define BPF_LD_MAP_FD(DST, MAP_FD) \ | |
184 | BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) | |
185 | ||
e430f34e | 186 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ |
f8f6d679 | 187 | |
e430f34e | 188 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 189 | ((struct bpf_insn) { \ |
f8f6d679 | 190 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
191 | .dst_reg = DST, \ |
192 | .src_reg = SRC, \ | |
f8f6d679 DB |
193 | .off = 0, \ |
194 | .imm = IMM }) | |
195 | ||
e430f34e | 196 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 197 | ((struct bpf_insn) { \ |
f8f6d679 | 198 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
199 | .dst_reg = DST, \ |
200 | .src_reg = SRC, \ | |
f8f6d679 DB |
201 | .off = 0, \ |
202 | .imm = IMM }) | |
203 | ||
e430f34e | 204 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ |
f8f6d679 | 205 | |
e430f34e | 206 | #define BPF_LD_ABS(SIZE, IMM) \ |
2695fb55 | 207 | ((struct bpf_insn) { \ |
f8f6d679 | 208 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ |
e430f34e AS |
209 | .dst_reg = 0, \ |
210 | .src_reg = 0, \ | |
f8f6d679 | 211 | .off = 0, \ |
e430f34e | 212 | .imm = IMM }) |
f8f6d679 | 213 | |
e430f34e | 214 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ |
f8f6d679 | 215 | |
e430f34e | 216 | #define BPF_LD_IND(SIZE, SRC, IMM) \ |
2695fb55 | 217 | ((struct bpf_insn) { \ |
f8f6d679 | 218 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ |
e430f34e AS |
219 | .dst_reg = 0, \ |
220 | .src_reg = SRC, \ | |
f8f6d679 | 221 | .off = 0, \ |
e430f34e | 222 | .imm = IMM }) |
f8f6d679 | 223 | |
e430f34e | 224 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ |
f8f6d679 | 225 | |
e430f34e | 226 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ |
2695fb55 | 227 | ((struct bpf_insn) { \ |
f8f6d679 | 228 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
229 | .dst_reg = DST, \ |
230 | .src_reg = SRC, \ | |
f8f6d679 DB |
231 | .off = OFF, \ |
232 | .imm = 0 }) | |
233 | ||
e430f34e AS |
234 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ |
235 | ||
236 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ | |
2695fb55 | 237 | ((struct bpf_insn) { \ |
f8f6d679 | 238 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
239 | .dst_reg = DST, \ |
240 | .src_reg = SRC, \ | |
f8f6d679 DB |
241 | .off = OFF, \ |
242 | .imm = 0 }) | |
243 | ||
cffc642d MH |
244 | /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ |
245 | ||
246 | #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ | |
247 | ((struct bpf_insn) { \ | |
248 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ | |
249 | .dst_reg = DST, \ | |
250 | .src_reg = SRC, \ | |
251 | .off = OFF, \ | |
252 | .imm = 0 }) | |
253 | ||
e430f34e AS |
254 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ |
255 | ||
256 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | |
2695fb55 | 257 | ((struct bpf_insn) { \ |
e430f34e AS |
258 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ |
259 | .dst_reg = DST, \ | |
260 | .src_reg = 0, \ | |
261 | .off = OFF, \ | |
262 | .imm = IMM }) | |
263 | ||
264 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ | |
f8f6d679 | 265 | |
e430f34e | 266 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ |
2695fb55 | 267 | ((struct bpf_insn) { \ |
f8f6d679 | 268 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
269 | .dst_reg = DST, \ |
270 | .src_reg = SRC, \ | |
f8f6d679 DB |
271 | .off = OFF, \ |
272 | .imm = 0 }) | |
273 | ||
e430f34e | 274 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ |
f8f6d679 | 275 | |
e430f34e | 276 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ |
2695fb55 | 277 | ((struct bpf_insn) { \ |
f8f6d679 | 278 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
279 | .dst_reg = DST, \ |
280 | .src_reg = 0, \ | |
f8f6d679 DB |
281 | .off = OFF, \ |
282 | .imm = IMM }) | |
283 | ||
614d0d77 DB |
284 | /* Unconditional jumps, goto pc + off16 */ |
285 | ||
286 | #define BPF_JMP_A(OFF) \ | |
287 | ((struct bpf_insn) { \ | |
288 | .code = BPF_JMP | BPF_JA, \ | |
289 | .dst_reg = 0, \ | |
290 | .src_reg = 0, \ | |
291 | .off = OFF, \ | |
292 | .imm = 0 }) | |
293 | ||
06be0864 DB |
294 | /* Relative call */ |
295 | ||
296 | #define BPF_CALL_REL(TGT) \ | |
297 | ((struct bpf_insn) { \ | |
298 | .code = BPF_JMP | BPF_CALL, \ | |
299 | .dst_reg = 0, \ | |
300 | .src_reg = BPF_PSEUDO_CALL, \ | |
301 | .off = 0, \ | |
302 | .imm = TGT }) | |
303 | ||
f8f6d679 DB |
304 | /* Function call */ |
305 | ||
09772d92 DB |
306 | #define BPF_CAST_CALL(x) \ |
307 | ((u64 (*)(u64, u64, u64, u64, u64))(x)) | |
308 | ||
f8f6d679 | 309 | #define BPF_EMIT_CALL(FUNC) \ |
2695fb55 | 310 | ((struct bpf_insn) { \ |
f8f6d679 | 311 | .code = BPF_JMP | BPF_CALL, \ |
e430f34e AS |
312 | .dst_reg = 0, \ |
313 | .src_reg = 0, \ | |
f8f6d679 DB |
314 | .off = 0, \ |
315 | .imm = ((FUNC) - __bpf_call_base) }) | |
316 | ||
317 | /* Raw code statement block */ | |
318 | ||
e430f34e | 319 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ |
2695fb55 | 320 | ((struct bpf_insn) { \ |
f8f6d679 | 321 | .code = CODE, \ |
e430f34e AS |
322 | .dst_reg = DST, \ |
323 | .src_reg = SRC, \ | |
f8f6d679 DB |
324 | .off = OFF, \ |
325 | .imm = IMM }) | |
326 | ||
327 | /* Program exit */ | |
328 | ||
329 | #define BPF_EXIT_INSN() \ | |
2695fb55 | 330 | ((struct bpf_insn) { \ |
f8f6d679 | 331 | .code = BPF_JMP | BPF_EXIT, \ |
e430f34e AS |
332 | .dst_reg = 0, \ |
333 | .src_reg = 0, \ | |
f8f6d679 DB |
334 | .off = 0, \ |
335 | .imm = 0 }) | |
336 | ||
a4afd37b DB |
337 | /* Internal classic blocks for direct assignment */ |
338 | ||
339 | #define __BPF_STMT(CODE, K) \ | |
340 | ((struct sock_filter) BPF_STMT(CODE, K)) | |
341 | ||
342 | #define __BPF_JUMP(CODE, K, JT, JF) \ | |
343 | ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) | |
344 | ||
f8f6d679 DB |
345 | #define bytes_to_bpf_size(bytes) \ |
346 | ({ \ | |
347 | int bpf_size = -EINVAL; \ | |
348 | \ | |
349 | if (bytes == sizeof(u8)) \ | |
350 | bpf_size = BPF_B; \ | |
351 | else if (bytes == sizeof(u16)) \ | |
352 | bpf_size = BPF_H; \ | |
353 | else if (bytes == sizeof(u32)) \ | |
354 | bpf_size = BPF_W; \ | |
355 | else if (bytes == sizeof(u64)) \ | |
356 | bpf_size = BPF_DW; \ | |
357 | \ | |
358 | bpf_size; \ | |
359 | }) | |
9739eef1 | 360 | |
f96da094 DB |
361 | #define bpf_size_to_bytes(bpf_size) \ |
362 | ({ \ | |
363 | int bytes = -EINVAL; \ | |
364 | \ | |
365 | if (bpf_size == BPF_B) \ | |
366 | bytes = sizeof(u8); \ | |
367 | else if (bpf_size == BPF_H) \ | |
368 | bytes = sizeof(u16); \ | |
369 | else if (bpf_size == BPF_W) \ | |
370 | bytes = sizeof(u32); \ | |
371 | else if (bpf_size == BPF_DW) \ | |
372 | bytes = sizeof(u64); \ | |
373 | \ | |
374 | bytes; \ | |
375 | }) | |
376 | ||
f035a515 DB |
377 | #define BPF_SIZEOF(type) \ |
378 | ({ \ | |
379 | const int __size = bytes_to_bpf_size(sizeof(type)); \ | |
380 | BUILD_BUG_ON(__size < 0); \ | |
381 | __size; \ | |
382 | }) | |
383 | ||
384 | #define BPF_FIELD_SIZEOF(type, field) \ | |
385 | ({ \ | |
386 | const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ | |
387 | BUILD_BUG_ON(__size < 0); \ | |
388 | __size; \ | |
389 | }) | |
390 | ||
f96da094 DB |
391 | #define BPF_LDST_BYTES(insn) \ |
392 | ({ \ | |
e59ac634 | 393 | const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ |
f96da094 DB |
394 | WARN_ON(__size < 0); \ |
395 | __size; \ | |
396 | }) | |
397 | ||
f3694e00 DB |
398 | #define __BPF_MAP_0(m, v, ...) v |
399 | #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) | |
400 | #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) | |
401 | #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) | |
402 | #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) | |
403 | #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) | |
404 | ||
405 | #define __BPF_REG_0(...) __BPF_PAD(5) | |
406 | #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) | |
407 | #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) | |
408 | #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) | |
409 | #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) | |
410 | #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) | |
411 | ||
412 | #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) | |
413 | #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) | |
414 | ||
415 | #define __BPF_CAST(t, a) \ | |
416 | (__force t) \ | |
417 | (__force \ | |
418 | typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ | |
419 | (unsigned long)0, (t)0))) a | |
420 | #define __BPF_V void | |
421 | #define __BPF_N | |
422 | ||
423 | #define __BPF_DECL_ARGS(t, a) t a | |
424 | #define __BPF_DECL_REGS(t, a) u64 a | |
425 | ||
426 | #define __BPF_PAD(n) \ | |
427 | __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ | |
428 | u64, __ur_3, u64, __ur_4, u64, __ur_5) | |
429 | ||
430 | #define BPF_CALL_x(x, name, ...) \ | |
431 | static __always_inline \ | |
432 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ | |
433 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ | |
434 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ | |
435 | { \ | |
436 | return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ | |
437 | } \ | |
438 | static __always_inline \ | |
439 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) | |
440 | ||
441 | #define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) | |
442 | #define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) | |
443 | #define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) | |
444 | #define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) | |
445 | #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) | |
446 | #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) | |
447 | ||
f96da094 DB |
448 | #define bpf_ctx_range(TYPE, MEMBER) \ |
449 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | |
450 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ | |
451 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 | |
452 | ||
453 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ | |
454 | ({ \ | |
455 | BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ | |
456 | *(PTR_SIZE) = (SIZE); \ | |
457 | offsetof(TYPE, MEMBER); \ | |
458 | }) | |
459 | ||
bd4cf0ed AS |
460 | #ifdef CONFIG_COMPAT |
461 | /* A struct sock_filter is architecture independent. */ | |
0c5fe1b4 WD |
462 | struct compat_sock_fprog { |
463 | u16 len; | |
bd4cf0ed | 464 | compat_uptr_t filter; /* struct sock_filter * */ |
0c5fe1b4 WD |
465 | }; |
466 | #endif | |
467 | ||
a3ea269b DB |
468 | struct sock_fprog_kern { |
469 | u16 len; | |
470 | struct sock_filter *filter; | |
471 | }; | |
472 | ||
738cbe72 | 473 | struct bpf_binary_header { |
85782e03 | 474 | u32 pages; |
92624782 ED |
475 | /* Some arches need word alignment for their instructions */ |
476 | u8 image[] __aligned(4); | |
738cbe72 DB |
477 | }; |
478 | ||
7ae457c1 | 479 | struct bpf_prog { |
286aad3c | 480 | u16 pages; /* Number of allocated pages */ |
a91263d5 | 481 | u16 jited:1, /* Is our filter JIT'ed? */ |
60b58afc | 482 | jit_requested:1,/* archs need to JIT the prog */ |
85782e03 | 483 | undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ |
c46646d0 | 484 | gpl_compatible:1, /* Is filter GPL compatible? */ |
ff936a04 | 485 | cb_access:1, /* Is control block accessed? */ |
9802d865 | 486 | dst_needed:1, /* Do we need dst entry? */ |
1c2a088a AS |
487 | blinded:1, /* Was blinded */ |
488 | is_func:1, /* program is a bpf function */ | |
c195651e YS |
489 | kprobe_override:1, /* Do we override a kprobe? */ |
490 | has_callchain_buf:1; /* callchain buffer allocated? */ | |
24701ece | 491 | enum bpf_prog_type type; /* Type of BPF program */ |
5e43f899 | 492 | enum bpf_attach_type expected_attach_type; /* For some prog types */ |
7bd509e3 | 493 | u32 len; /* Number of filter blocks */ |
783d28dd | 494 | u32 jited_len; /* Size of jited insns in bytes */ |
f1f7714e | 495 | u8 tag[BPF_TAG_SIZE]; |
09756af4 | 496 | struct bpf_prog_aux *aux; /* Auxiliary fields */ |
24701ece | 497 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ |
88575199 DB |
498 | unsigned int (*bpf_func)(const void *ctx, |
499 | const struct bpf_insn *insn); | |
60a3b225 | 500 | /* Instructions for interpreter */ |
d45ed4a4 | 501 | union { |
bd4cf0ed | 502 | struct sock_filter insns[0]; |
2695fb55 | 503 | struct bpf_insn insnsi[0]; |
d45ed4a4 | 504 | }; |
b715631f SH |
505 | }; |
506 | ||
7ae457c1 | 507 | struct sk_filter { |
4c355cdf | 508 | refcount_t refcnt; |
7ae457c1 AS |
509 | struct rcu_head rcu; |
510 | struct bpf_prog *prog; | |
511 | }; | |
512 | ||
324bda9e | 513 | #define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi) |
7ae457c1 | 514 | |
01dd194c DB |
515 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN |
516 | ||
db58ba45 AS |
517 | struct bpf_skb_data_end { |
518 | struct qdisc_skb_cb qdisc_cb; | |
de8f3a83 | 519 | void *data_meta; |
db58ba45 AS |
520 | void *data_end; |
521 | }; | |
522 | ||
0b19cc0a TM |
523 | struct bpf_redirect_info { |
524 | u32 ifindex; | |
525 | u32 flags; | |
526 | struct bpf_map *map; | |
527 | struct bpf_map *map_to_flush; | |
2539650f | 528 | u32 kern_flags; |
0b19cc0a TM |
529 | }; |
530 | ||
531 | DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); | |
532 | ||
2539650f TM |
533 | /* flags for bpf_redirect_info kern_flags */ |
534 | #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ | |
535 | ||
6aaae2b6 DB |
536 | /* Compute the linear packet data range [data, data_end) which |
537 | * will be accessed by various program types (cls_bpf, act_bpf, | |
538 | * lwt, ...). Subsystems allowing direct data access must (!) | |
539 | * ensure that cb[] area can be written to when BPF program is | |
540 | * invoked (otherwise cb[] save/restore is necessary). | |
db58ba45 | 541 | */ |
6aaae2b6 | 542 | static inline void bpf_compute_data_pointers(struct sk_buff *skb) |
db58ba45 AS |
543 | { |
544 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
545 | ||
546 | BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); | |
de8f3a83 DB |
547 | cb->data_meta = skb->data - skb_metadata_len(skb); |
548 | cb->data_end = skb->data + skb_headlen(skb); | |
db58ba45 AS |
549 | } |
550 | ||
b39b5f41 SL |
551 | /* Similar to bpf_compute_data_pointers(), except that save orginal |
552 | * data in cb->data and cb->meta_data for restore. | |
553 | */ | |
554 | static inline void bpf_compute_and_save_data_end( | |
555 | struct sk_buff *skb, void **saved_data_end) | |
556 | { | |
557 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
558 | ||
559 | *saved_data_end = cb->data_end; | |
560 | cb->data_end = skb->data + skb_headlen(skb); | |
561 | } | |
562 | ||
563 | /* Restore data saved by bpf_compute_data_pointers(). */ | |
564 | static inline void bpf_restore_data_end( | |
565 | struct sk_buff *skb, void *saved_data_end) | |
566 | { | |
567 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
568 | ||
569 | cb->data_end = saved_data_end; | |
570 | } | |
571 | ||
01dd194c DB |
572 | static inline u8 *bpf_skb_cb(struct sk_buff *skb) |
573 | { | |
574 | /* eBPF programs may read/write skb->cb[] area to transfer meta | |
575 | * data between tail calls. Since this also needs to work with | |
576 | * tc, that scratch memory is mapped to qdisc_skb_cb's data area. | |
577 | * | |
578 | * In some socket filter cases, the cb unfortunately needs to be | |
579 | * saved/restored so that protocol specific skb->cb[] data won't | |
580 | * be lost. In any case, due to unpriviledged eBPF programs | |
581 | * attached to sockets, we need to clear the bpf_skb_cb() area | |
582 | * to not leak previous contents to user space. | |
583 | */ | |
584 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); | |
585 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != | |
586 | FIELD_SIZEOF(struct qdisc_skb_cb, data)); | |
587 | ||
588 | return qdisc_skb_cb(skb)->data; | |
589 | } | |
590 | ||
ff936a04 AS |
591 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, |
592 | struct sk_buff *skb) | |
593 | { | |
01dd194c DB |
594 | u8 *cb_data = bpf_skb_cb(skb); |
595 | u8 cb_saved[BPF_SKB_CB_LEN]; | |
ff936a04 AS |
596 | u32 res; |
597 | ||
ff936a04 | 598 | if (unlikely(prog->cb_access)) { |
01dd194c DB |
599 | memcpy(cb_saved, cb_data, sizeof(cb_saved)); |
600 | memset(cb_data, 0, sizeof(cb_saved)); | |
ff936a04 AS |
601 | } |
602 | ||
603 | res = BPF_PROG_RUN(prog, skb); | |
604 | ||
605 | if (unlikely(prog->cb_access)) | |
01dd194c | 606 | memcpy(cb_data, cb_saved, sizeof(cb_saved)); |
ff936a04 AS |
607 | |
608 | return res; | |
609 | } | |
610 | ||
611 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | |
612 | struct sk_buff *skb) | |
613 | { | |
01dd194c | 614 | u8 *cb_data = bpf_skb_cb(skb); |
ff936a04 AS |
615 | |
616 | if (unlikely(prog->cb_access)) | |
01dd194c DB |
617 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
618 | ||
ff936a04 AS |
619 | return BPF_PROG_RUN(prog, skb); |
620 | } | |
621 | ||
366cbf2f DB |
622 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
623 | struct xdp_buff *xdp) | |
6a773a15 | 624 | { |
366cbf2f DB |
625 | /* Caller needs to hold rcu_read_lock() (!), otherwise program |
626 | * can be released while still running, or map elements could be | |
627 | * freed early while still having concurrent users. XDP fastpath | |
628 | * already takes rcu_read_lock() when fetching the program, so | |
629 | * it's not necessary here anymore. | |
630 | */ | |
631 | return BPF_PROG_RUN(prog, xdp); | |
6a773a15 BB |
632 | } |
633 | ||
aafe6ae9 DB |
634 | static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) |
635 | { | |
636 | return prog->len * sizeof(struct bpf_insn); | |
637 | } | |
638 | ||
f1f7714e | 639 | static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) |
aafe6ae9 DB |
640 | { |
641 | return round_up(bpf_prog_insn_size(prog) + | |
642 | sizeof(__be64) + 1, SHA_MESSAGE_BYTES); | |
643 | } | |
644 | ||
7ae457c1 | 645 | static inline unsigned int bpf_prog_size(unsigned int proglen) |
b715631f | 646 | { |
7ae457c1 AS |
647 | return max(sizeof(struct bpf_prog), |
648 | offsetof(struct bpf_prog, insns[proglen])); | |
b715631f SH |
649 | } |
650 | ||
7b36f929 DB |
651 | static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) |
652 | { | |
653 | /* When classic BPF programs have been loaded and the arch | |
654 | * does not have a classic BPF JIT (anymore), they have been | |
655 | * converted via bpf_migrate_filter() to eBPF and thus always | |
656 | * have an unspec program type. | |
657 | */ | |
658 | return prog->type == BPF_PROG_TYPE_UNSPEC; | |
659 | } | |
660 | ||
bc23105c DB |
661 | static inline u32 bpf_ctx_off_adjust_machine(u32 size) |
662 | { | |
663 | const u32 size_machine = sizeof(unsigned long); | |
664 | ||
665 | if (size > size_machine && size % size_machine == 0) | |
666 | size = size_machine; | |
667 | ||
668 | return size; | |
669 | } | |
670 | ||
671 | static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access, | |
672 | u32 size_default) | |
f96da094 | 673 | { |
bc23105c DB |
674 | size_default = bpf_ctx_off_adjust_machine(size_default); |
675 | size_access = bpf_ctx_off_adjust_machine(size_access); | |
676 | ||
f96da094 | 677 | #ifdef __LITTLE_ENDIAN |
bc23105c | 678 | return (off & (size_default - 1)) == 0; |
f96da094 | 679 | #else |
bc23105c | 680 | return (off & (size_default - 1)) + size_access == size_default; |
f96da094 | 681 | #endif |
bc23105c DB |
682 | } |
683 | ||
684 | static inline bool | |
685 | bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) | |
686 | { | |
687 | return bpf_ctx_narrow_align_ok(off, size, size_default) && | |
688 | size <= size_default && (size & (size - 1)) == 0; | |
f96da094 DB |
689 | } |
690 | ||
009937e7 | 691 | #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) |
a3ea269b | 692 | |
60a3b225 DB |
693 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) |
694 | { | |
85782e03 DB |
695 | fp->undo_set_mem = 1; |
696 | set_memory_ro((unsigned long)fp, fp->pages); | |
60a3b225 DB |
697 | } |
698 | ||
699 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | |
700 | { | |
85782e03 DB |
701 | if (fp->undo_set_mem) |
702 | set_memory_rw((unsigned long)fp, fp->pages); | |
60a3b225 | 703 | } |
74451e66 | 704 | |
9d876e79 DB |
705 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) |
706 | { | |
85782e03 | 707 | set_memory_ro((unsigned long)hdr, hdr->pages); |
9d876e79 DB |
708 | } |
709 | ||
74451e66 DB |
710 | static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) |
711 | { | |
85782e03 | 712 | set_memory_rw((unsigned long)hdr, hdr->pages); |
74451e66 | 713 | } |
60a3b225 | 714 | |
74451e66 DB |
715 | static inline struct bpf_binary_header * |
716 | bpf_jit_binary_hdr(const struct bpf_prog *fp) | |
717 | { | |
718 | unsigned long real_start = (unsigned long)fp->bpf_func; | |
719 | unsigned long addr = real_start & PAGE_MASK; | |
720 | ||
721 | return (void *)addr; | |
722 | } | |
723 | ||
f4979fce WB |
724 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); |
725 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | |
726 | { | |
727 | return sk_filter_trim_cap(sk, skb, 1); | |
728 | } | |
bd4cf0ed | 729 | |
d1c55ab5 | 730 | struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); |
7ae457c1 | 731 | void bpf_prog_free(struct bpf_prog *fp); |
bd4cf0ed | 732 | |
5e581dad DB |
733 | bool bpf_opcode_in_insntable(u8 code); |
734 | ||
60a3b225 DB |
735 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); |
736 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, | |
737 | gfp_t gfp_extra_flags); | |
738 | void __bpf_prog_free(struct bpf_prog *fp); | |
739 | ||
740 | static inline void bpf_prog_unlock_free(struct bpf_prog *fp) | |
741 | { | |
742 | bpf_prog_unlock_ro(fp); | |
743 | __bpf_prog_free(fp); | |
744 | } | |
745 | ||
ac67eb2c DB |
746 | typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, |
747 | unsigned int flen); | |
748 | ||
7ae457c1 | 749 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); |
ac67eb2c | 750 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
bab18991 | 751 | bpf_aux_classic_check_t trans, bool save_orig); |
7ae457c1 | 752 | void bpf_prog_destroy(struct bpf_prog *fp); |
a3ea269b | 753 | |
fbc907f0 | 754 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
89aa0758 | 755 | int sk_attach_bpf(u32 ufd, struct sock *sk); |
538950a1 CG |
756 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
757 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); | |
8217ca65 | 758 | void sk_reuseport_prog_free(struct bpf_prog *prog); |
fbc907f0 | 759 | int sk_detach_filter(struct sock *sk); |
fbc907f0 DB |
760 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, |
761 | unsigned int len); | |
fbc907f0 | 762 | |
278571ba | 763 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); |
fbc907f0 | 764 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); |
0a14842f | 765 | |
62258278 | 766 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
1ea47e01 AS |
767 | #define __bpf_call_base_args \ |
768 | ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ | |
769 | __bpf_call_base) | |
d1c55ab5 DB |
770 | |
771 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); | |
9383191d | 772 | void bpf_jit_compile(struct bpf_prog *prog); |
17bedab2 | 773 | bool bpf_helper_changes_pkt_data(void *func); |
62258278 | 774 | |
7105e828 DB |
775 | static inline bool bpf_dump_raw_ok(void) |
776 | { | |
777 | /* Reconstruction of call-sites is dependent on kallsyms, | |
778 | * thus make dump the same restriction. | |
779 | */ | |
780 | return kallsyms_show_value() == 1; | |
781 | } | |
782 | ||
c237ee5e DB |
783 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
784 | const struct bpf_insn *patch, u32 len); | |
814abfab | 785 | |
f6069b9a DB |
786 | void bpf_clear_redirect_map(struct bpf_map *map); |
787 | ||
2539650f TM |
788 | static inline bool xdp_return_frame_no_direct(void) |
789 | { | |
790 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
791 | ||
792 | return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; | |
793 | } | |
794 | ||
795 | static inline void xdp_set_return_frame_no_direct(void) | |
796 | { | |
797 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
798 | ||
799 | ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; | |
800 | } | |
801 | ||
802 | static inline void xdp_clear_return_frame_no_direct(void) | |
803 | { | |
804 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
805 | ||
806 | ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; | |
807 | } | |
808 | ||
d8d7218a TM |
809 | static inline int xdp_ok_fwd_dev(const struct net_device *fwd, |
810 | unsigned int pktlen) | |
6d5fc195 TM |
811 | { |
812 | unsigned int len; | |
813 | ||
814 | if (unlikely(!(fwd->flags & IFF_UP))) | |
815 | return -ENETDOWN; | |
816 | ||
817 | len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; | |
d8d7218a | 818 | if (pktlen > len) |
6d5fc195 TM |
819 | return -EMSGSIZE; |
820 | ||
821 | return 0; | |
822 | } | |
823 | ||
11393cc9 JF |
824 | /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the |
825 | * same cpu context. Further for best results no more than a single map | |
826 | * for the do_redirect/do_flush pair should be used. This limitation is | |
827 | * because we only track one map and force a flush when the map changes. | |
2ddf71e2 | 828 | * This does not appear to be a real limitation for existing software. |
11393cc9 | 829 | */ |
2facaad6 | 830 | int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, |
02671e23 | 831 | struct xdp_buff *xdp, struct bpf_prog *prog); |
5acaee0a JF |
832 | int xdp_do_redirect(struct net_device *dev, |
833 | struct xdp_buff *xdp, | |
834 | struct bpf_prog *prog); | |
11393cc9 | 835 | void xdp_do_flush_map(void); |
814abfab | 836 | |
6a773a15 | 837 | void bpf_warn_invalid_xdp_action(u32 act); |
c237ee5e | 838 | |
2dbb9b9e MKL |
839 | #ifdef CONFIG_INET |
840 | struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
841 | struct bpf_prog *prog, struct sk_buff *skb, | |
842 | u32 hash); | |
843 | #else | |
844 | static inline struct sock * | |
845 | bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
846 | struct bpf_prog *prog, struct sk_buff *skb, | |
847 | u32 hash) | |
848 | { | |
849 | return NULL; | |
850 | } | |
851 | #endif | |
852 | ||
b954d834 | 853 | #ifdef CONFIG_BPF_JIT |
c94987e4 | 854 | extern int bpf_jit_enable; |
4f3446bb | 855 | extern int bpf_jit_harden; |
74451e66 | 856 | extern int bpf_jit_kallsyms; |
ede95a63 | 857 | extern int bpf_jit_limit; |
c94987e4 | 858 | |
b954d834 DB |
859 | typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); |
860 | ||
861 | struct bpf_binary_header * | |
862 | bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |
863 | unsigned int alignment, | |
864 | bpf_jit_fill_hole_t bpf_fill_ill_insns); | |
865 | void bpf_jit_binary_free(struct bpf_binary_header *hdr); | |
866 | ||
b954d834 DB |
867 | void bpf_jit_free(struct bpf_prog *fp); |
868 | ||
e2c95a61 DB |
869 | int bpf_jit_get_func_addr(const struct bpf_prog *prog, |
870 | const struct bpf_insn *insn, bool extra_pass, | |
871 | u64 *func_addr, bool *func_addr_fixed); | |
872 | ||
4f3446bb DB |
873 | struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); |
874 | void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); | |
875 | ||
b954d834 DB |
876 | static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, |
877 | u32 pass, void *image) | |
878 | { | |
b13138ef DB |
879 | pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, |
880 | proglen, pass, image, current->comm, task_pid_nr(current)); | |
881 | ||
b954d834 DB |
882 | if (image) |
883 | print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, | |
884 | 16, 1, image, proglen, false); | |
885 | } | |
4f3446bb DB |
886 | |
887 | static inline bool bpf_jit_is_ebpf(void) | |
888 | { | |
889 | # ifdef CONFIG_HAVE_EBPF_JIT | |
890 | return true; | |
891 | # else | |
892 | return false; | |
893 | # endif | |
894 | } | |
895 | ||
81ed18ab AS |
896 | static inline bool ebpf_jit_enabled(void) |
897 | { | |
898 | return bpf_jit_enable && bpf_jit_is_ebpf(); | |
899 | } | |
900 | ||
74451e66 DB |
901 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) |
902 | { | |
903 | return fp->jited && bpf_jit_is_ebpf(); | |
904 | } | |
905 | ||
60b58afc | 906 | static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) |
4f3446bb DB |
907 | { |
908 | /* These are the prerequisites, should someone ever have the | |
909 | * idea to call blinding outside of them, we make sure to | |
910 | * bail out. | |
911 | */ | |
912 | if (!bpf_jit_is_ebpf()) | |
913 | return false; | |
60b58afc | 914 | if (!prog->jit_requested) |
4f3446bb DB |
915 | return false; |
916 | if (!bpf_jit_harden) | |
917 | return false; | |
918 | if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) | |
919 | return false; | |
920 | ||
921 | return true; | |
922 | } | |
74451e66 DB |
923 | |
924 | static inline bool bpf_jit_kallsyms_enabled(void) | |
925 | { | |
926 | /* There are a couple of corner cases where kallsyms should | |
927 | * not be enabled f.e. on hardening. | |
928 | */ | |
929 | if (bpf_jit_harden) | |
930 | return false; | |
931 | if (!bpf_jit_kallsyms) | |
932 | return false; | |
933 | if (bpf_jit_kallsyms == 1) | |
934 | return true; | |
935 | ||
936 | return false; | |
937 | } | |
938 | ||
939 | const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, | |
940 | unsigned long *off, char *sym); | |
941 | bool is_bpf_text_address(unsigned long addr); | |
942 | int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |
943 | char *sym); | |
944 | ||
945 | static inline const char * | |
946 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
947 | unsigned long *off, char **modname, char *sym) | |
948 | { | |
949 | const char *ret = __bpf_address_lookup(addr, size, off, sym); | |
950 | ||
951 | if (ret && modname) | |
952 | *modname = NULL; | |
953 | return ret; | |
954 | } | |
955 | ||
956 | void bpf_prog_kallsyms_add(struct bpf_prog *fp); | |
957 | void bpf_prog_kallsyms_del(struct bpf_prog *fp); | |
958 | ||
959 | #else /* CONFIG_BPF_JIT */ | |
960 | ||
81ed18ab AS |
961 | static inline bool ebpf_jit_enabled(void) |
962 | { | |
963 | return false; | |
964 | } | |
965 | ||
74451e66 DB |
966 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) |
967 | { | |
968 | return false; | |
969 | } | |
970 | ||
b954d834 DB |
971 | static inline void bpf_jit_free(struct bpf_prog *fp) |
972 | { | |
973 | bpf_prog_unlock_free(fp); | |
974 | } | |
74451e66 DB |
975 | |
976 | static inline bool bpf_jit_kallsyms_enabled(void) | |
977 | { | |
978 | return false; | |
979 | } | |
980 | ||
981 | static inline const char * | |
982 | __bpf_address_lookup(unsigned long addr, unsigned long *size, | |
983 | unsigned long *off, char *sym) | |
984 | { | |
985 | return NULL; | |
986 | } | |
987 | ||
988 | static inline bool is_bpf_text_address(unsigned long addr) | |
989 | { | |
990 | return false; | |
991 | } | |
992 | ||
993 | static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, | |
994 | char *type, char *sym) | |
995 | { | |
996 | return -ERANGE; | |
997 | } | |
998 | ||
999 | static inline const char * | |
1000 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1001 | unsigned long *off, char **modname, char *sym) | |
1002 | { | |
1003 | return NULL; | |
1004 | } | |
1005 | ||
1006 | static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) | |
1007 | { | |
1008 | } | |
1009 | ||
1010 | static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) | |
1011 | { | |
1012 | } | |
b954d834 DB |
1013 | #endif /* CONFIG_BPF_JIT */ |
1014 | ||
7d1982b4 DB |
1015 | void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); |
1016 | void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); | |
1017 | ||
34805931 DB |
1018 | #define BPF_ANC BIT(15) |
1019 | ||
55795ef5 RV |
1020 | static inline bool bpf_needs_clear_a(const struct sock_filter *first) |
1021 | { | |
1022 | switch (first->code) { | |
1023 | case BPF_RET | BPF_K: | |
1024 | case BPF_LD | BPF_W | BPF_LEN: | |
1025 | return false; | |
1026 | ||
1027 | case BPF_LD | BPF_W | BPF_ABS: | |
1028 | case BPF_LD | BPF_H | BPF_ABS: | |
1029 | case BPF_LD | BPF_B | BPF_ABS: | |
1030 | if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) | |
1031 | return true; | |
1032 | return false; | |
1033 | ||
1034 | default: | |
1035 | return true; | |
1036 | } | |
1037 | } | |
1038 | ||
34805931 DB |
1039 | static inline u16 bpf_anc_helper(const struct sock_filter *ftest) |
1040 | { | |
1041 | BUG_ON(ftest->code & BPF_ANC); | |
1042 | ||
1043 | switch (ftest->code) { | |
1044 | case BPF_LD | BPF_W | BPF_ABS: | |
1045 | case BPF_LD | BPF_H | BPF_ABS: | |
1046 | case BPF_LD | BPF_B | BPF_ABS: | |
1047 | #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ | |
1048 | return BPF_ANC | SKF_AD_##CODE | |
1049 | switch (ftest->k) { | |
1050 | BPF_ANCILLARY(PROTOCOL); | |
1051 | BPF_ANCILLARY(PKTTYPE); | |
1052 | BPF_ANCILLARY(IFINDEX); | |
1053 | BPF_ANCILLARY(NLATTR); | |
1054 | BPF_ANCILLARY(NLATTR_NEST); | |
1055 | BPF_ANCILLARY(MARK); | |
1056 | BPF_ANCILLARY(QUEUE); | |
1057 | BPF_ANCILLARY(HATYPE); | |
1058 | BPF_ANCILLARY(RXHASH); | |
1059 | BPF_ANCILLARY(CPU); | |
1060 | BPF_ANCILLARY(ALU_XOR_X); | |
1061 | BPF_ANCILLARY(VLAN_TAG); | |
1062 | BPF_ANCILLARY(VLAN_TAG_PRESENT); | |
1063 | BPF_ANCILLARY(PAY_OFFSET); | |
1064 | BPF_ANCILLARY(RANDOM); | |
27cd5452 | 1065 | BPF_ANCILLARY(VLAN_TPID); |
34805931 DB |
1066 | } |
1067 | /* Fallthrough. */ | |
1068 | default: | |
1069 | return ftest->code; | |
1070 | } | |
1071 | } | |
1072 | ||
9f12fbe6 ZSL |
1073 | void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, |
1074 | int k, unsigned int size); | |
1075 | ||
1076 | static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, | |
1077 | unsigned int size, void *buffer) | |
1078 | { | |
1079 | if (k >= 0) | |
1080 | return skb_header_pointer(skb, k, size, buffer); | |
1081 | ||
1082 | return bpf_internal_load_pointer_neg_helper(skb, k, size); | |
1083 | } | |
1084 | ||
ea02f941 MS |
1085 | static inline int bpf_tell_extensions(void) |
1086 | { | |
37692299 | 1087 | return SKF_AD_MAX; |
ea02f941 MS |
1088 | } |
1089 | ||
4fbac77d AI |
1090 | struct bpf_sock_addr_kern { |
1091 | struct sock *sk; | |
1092 | struct sockaddr *uaddr; | |
1093 | /* Temporary "register" to make indirect stores to nested structures | |
1094 | * defined above. We need three registers to make such a store, but | |
1095 | * only two (src and dst) are available at convert_ctx_access time | |
1096 | */ | |
1097 | u64 tmp_reg; | |
1cedee13 | 1098 | void *t_ctx; /* Attach type specific context. */ |
4fbac77d AI |
1099 | }; |
1100 | ||
40304b2a LB |
1101 | struct bpf_sock_ops_kern { |
1102 | struct sock *sk; | |
1103 | u32 op; | |
1104 | union { | |
de525be2 | 1105 | u32 args[4]; |
40304b2a LB |
1106 | u32 reply; |
1107 | u32 replylong[4]; | |
1108 | }; | |
f19397a5 | 1109 | u32 is_fullsock; |
b73042b8 LB |
1110 | u64 temp; /* temp and everything after is not |
1111 | * initialized to 0 before calling | |
1112 | * the BPF program. New fields that | |
1113 | * should be initialized to 0 should | |
1114 | * be inserted before temp. | |
1115 | * temp is scratch storage used by | |
1116 | * sock_ops_convert_ctx_access | |
1117 | * as temporary storage of a register. | |
1118 | */ | |
40304b2a LB |
1119 | }; |
1120 | ||
1da177e4 | 1121 | #endif /* __LINUX_FILTER_H__ */ |