1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp64.c: eBPF JIT compiler
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
19 #include "bpf_jit64.h"
21 static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
23 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
26 static inline void bpf_flush_icache(void *start, void *end)
29 flush_icache_range((unsigned long)start, (unsigned long)end);
32 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
34 return (ctx->seen & (1 << (31 - b2p[i])));
37 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
39 ctx->seen |= (1 << (31 - b2p[i]));
42 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
45 * We only need a stack frame if:
46 * - we call other functions (kernel helpers), or
47 * - the bpf program uses its stack area
48 * The latter condition is deduced from the usage of BPF_REG_FP
50 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
54 * When not setting up our own stackframe, the redzone usage is:
56 * [ prev sp ] <-------------
58 * sp (r1) ---> [ stack pointer ] --------------
59 * [ nv gpr save area ] 6*8
62 * [ unused red zone ] 208 bytes protected
64 static int bpf_jit_stack_local(struct codegen_context *ctx)
66 if (bpf_has_stack_frame(ctx))
67 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
69 return -(BPF_PPC_STACK_SAVE + 16);
72 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
74 return bpf_jit_stack_local(ctx) + 8;
77 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
79 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
80 return (bpf_has_stack_frame(ctx) ?
81 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
84 pr_err("BPF JIT is asking about unknown registers");
88 static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
93 * Initialize tail_call_cnt if we do tail calls.
94 * Otherwise, put in NOPs so that it can be skipped when we are
95 * invoked through a tail call.
97 if (ctx->seen & SEEN_TAILCALL) {
98 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
99 /* this goes in the redzone */
100 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
106 #define BPF_TAILCALL_PROLOGUE_SIZE 8
108 if (bpf_has_stack_frame(ctx)) {
110 * We need a stack frame, but we don't necessarily need to
111 * save/restore LR unless we call other functions
113 if (ctx->seen & SEEN_FUNC) {
114 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
115 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
118 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
122 * Back up non-volatile regs -- BPF registers 6-10
123 * If we haven't created our own stack frame, we save these
124 * in the protected zone below the previous stack frame
126 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
127 if (bpf_is_seen_register(ctx, i))
128 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
130 /* Setup frame pointer to point to the bpf stack area */
131 if (bpf_is_seen_register(ctx, BPF_REG_FP))
132 EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
133 STACK_FRAME_MIN_SIZE + ctx->stack_size));
136 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
141 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
142 if (bpf_is_seen_register(ctx, i))
143 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
145 /* Tear down our stack frame */
146 if (bpf_has_stack_frame(ctx)) {
147 EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
148 if (ctx->seen & SEEN_FUNC) {
149 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
150 EMIT(PPC_RAW_MTLR(0));
155 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
157 bpf_jit_emit_common_epilogue(image, ctx);
159 /* Move result to r3 */
160 EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
165 static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
168 #ifdef PPC64_ELF_ABI_v1
169 /* func points to the function descriptor */
170 PPC_LI64(b2p[TMP_REG_2], func);
171 /* Load actual entry point from function descriptor */
172 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
173 /* ... and move it to LR */
174 EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
176 * Load TOC from function descriptor at offset 8.
177 * We can clobber r2 since we get called through a
178 * function pointer (so caller will save/restore r2)
179 * and since we don't use a TOC ourself.
181 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
183 /* We can clobber r12 */
184 PPC_FUNC_ADDR(12, func);
185 EMIT(PPC_RAW_MTLR(12));
187 EMIT(PPC_RAW_BLRL());
190 static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
193 unsigned int i, ctx_idx = ctx->idx;
195 /* Load function address into r12 */
198 /* For bpf-to-bpf function calls, the callee's address is unknown
199 * until the last extra pass. As seen above, we use PPC_LI64() to
200 * load the callee's address, but this may optimize the number of
201 * instructions required based on the nature of the address.
203 * Since we don't want the number of instructions emitted to change,
204 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
205 * we always have a five-instruction sequence, which is the maximum
206 * that PPC_LI64() can emit.
208 for (i = ctx->idx - ctx_idx; i < 5; i++)
211 #ifdef PPC64_ELF_ABI_v1
213 * Load TOC from function descriptor at offset 8.
214 * We can clobber r2 since we get called through a
215 * function pointer (so caller will save/restore r2)
216 * and since we don't use a TOC ourself.
218 PPC_BPF_LL(2, 12, 8);
219 /* Load actual entry point from function descriptor */
220 PPC_BPF_LL(12, 12, 0);
223 EMIT(PPC_RAW_MTLR(12));
224 EMIT(PPC_RAW_BLRL());
227 static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
230 * By now, the eBPF program has already setup parameters in r3, r4 and r5
231 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
232 * r4/BPF_REG_2 - pointer to bpf_array
233 * r5/BPF_REG_3 - index in bpf_array
235 int b2p_bpf_array = b2p[BPF_REG_2];
236 int b2p_index = b2p[BPF_REG_3];
239 * if (index >= array->map.max_entries)
242 EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
243 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
244 EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
245 PPC_BCC(COND_GE, out);
248 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
251 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
252 EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
253 PPC_BCC(COND_GT, out);
258 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
259 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
261 /* prog = array->ptrs[index]; */
262 EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
263 EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
264 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
270 EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
271 PPC_BCC(COND_EQ, out);
273 /* goto *(prog->bpf_func + prologue_size); */
274 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
275 #ifdef PPC64_ELF_ABI_v1
276 /* skip past the function descriptor */
277 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
278 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
280 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
282 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
284 /* tear down stack, restore NVRs, ... */
285 bpf_jit_emit_common_epilogue(image, ctx);
287 EMIT(PPC_RAW_BCTR());
291 /* Assemble the body code between the prologue & epilogue */
292 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
293 struct codegen_context *ctx,
294 u32 *addrs, bool extra_pass)
296 const struct bpf_insn *insn = fp->insnsi;
300 /* Start of epilogue code - will only be valid 2nd pass onwards */
301 u32 exit_addr = addrs[flen];
303 for (i = 0; i < flen; i++) {
304 u32 code = insn[i].code;
305 u32 dst_reg = b2p[insn[i].dst_reg];
306 u32 src_reg = b2p[insn[i].src_reg];
307 s16 off = insn[i].off;
308 s32 imm = insn[i].imm;
309 bool func_addr_fixed;
316 * addrs[] maps a BPF bytecode address into a real offset from
317 * the start of the body code.
319 addrs[i] = ctx->idx * 4;
322 * As an optimization, we note down which non-volatile registers
323 * are used so that we can only save/restore those in our
324 * prologue and epilogue. We do this here regardless of whether
325 * the actual BPF instruction uses src/dst registers or not
326 * (for instance, BPF_CALL does not use them). The expectation
327 * is that those instructions will have src_reg/dst_reg set to
328 * 0. Even otherwise, we just lose some prologue/epilogue
329 * optimization but everything else should work without
332 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
333 bpf_set_seen_register(ctx, insn[i].dst_reg);
334 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
335 bpf_set_seen_register(ctx, insn[i].src_reg);
339 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
341 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
342 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
343 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
344 goto bpf_alu32_trunc;
345 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
346 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
347 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
348 goto bpf_alu32_trunc;
349 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
350 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
351 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
352 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
353 if (BPF_OP(code) == BPF_SUB)
356 if (imm >= -32768 && imm < 32768)
357 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
359 PPC_LI32(b2p[TMP_REG_1], imm);
360 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
363 goto bpf_alu32_trunc;
364 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
365 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
366 if (BPF_CLASS(code) == BPF_ALU)
367 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
369 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
370 goto bpf_alu32_trunc;
371 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
372 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
373 if (imm >= -32768 && imm < 32768)
374 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
376 PPC_LI32(b2p[TMP_REG_1], imm);
377 if (BPF_CLASS(code) == BPF_ALU)
378 EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
381 EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
384 goto bpf_alu32_trunc;
385 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
386 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
387 if (BPF_OP(code) == BPF_MOD) {
388 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
389 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
391 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
393 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
394 goto bpf_alu32_trunc;
395 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
396 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
397 if (BPF_OP(code) == BPF_MOD) {
398 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
399 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
401 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
403 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
405 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
406 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
407 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
408 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
412 goto bpf_alu32_trunc;
414 PPC_LI32(b2p[TMP_REG_1], imm);
415 switch (BPF_CLASS(code)) {
417 if (BPF_OP(code) == BPF_MOD) {
418 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
421 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
424 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
427 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
431 if (BPF_OP(code) == BPF_MOD) {
432 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
435 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
438 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
441 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
445 goto bpf_alu32_trunc;
446 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
447 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
448 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
449 goto bpf_alu32_trunc;
452 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
454 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
455 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
456 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
457 goto bpf_alu32_trunc;
458 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
459 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
461 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
464 PPC_LI32(b2p[TMP_REG_1], imm);
465 EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
467 goto bpf_alu32_trunc;
468 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
469 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
470 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
471 goto bpf_alu32_trunc;
472 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
473 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
474 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
476 PPC_LI32(b2p[TMP_REG_1], imm);
477 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
480 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
482 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
484 goto bpf_alu32_trunc;
485 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
486 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
487 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
488 goto bpf_alu32_trunc;
489 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
490 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
491 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
493 PPC_LI32(b2p[TMP_REG_1], imm);
494 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
497 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
499 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
501 goto bpf_alu32_trunc;
502 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
503 /* slw clears top 32 bits */
504 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
505 /* skip zero extension move, but set address map. */
506 if (insn_is_zext(&insn[i + 1]))
507 addrs[++i] = ctx->idx * 4;
509 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
510 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
512 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
513 /* with imm 0, we still need to clear top 32 bits */
514 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
515 if (insn_is_zext(&insn[i + 1]))
516 addrs[++i] = ctx->idx * 4;
518 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
520 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
522 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
523 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
524 if (insn_is_zext(&insn[i + 1]))
525 addrs[++i] = ctx->idx * 4;
527 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
528 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
530 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
531 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
532 if (insn_is_zext(&insn[i + 1]))
533 addrs[++i] = ctx->idx * 4;
535 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
537 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
539 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
540 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
541 goto bpf_alu32_trunc;
542 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
543 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
545 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
546 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
547 goto bpf_alu32_trunc;
548 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
550 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
556 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
557 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
559 /* special mov32 for zext */
560 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
563 EMIT(PPC_RAW_MR(dst_reg, src_reg));
564 goto bpf_alu32_trunc;
565 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
566 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
567 PPC_LI32(dst_reg, imm);
569 goto bpf_alu32_trunc;
570 else if (insn_is_zext(&insn[i + 1]))
571 addrs[++i] = ctx->idx * 4;
575 /* Truncate to 32-bits */
576 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
577 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
583 case BPF_ALU | BPF_END | BPF_FROM_LE:
584 case BPF_ALU | BPF_END | BPF_FROM_BE:
585 #ifdef __BIG_ENDIAN__
586 if (BPF_SRC(code) == BPF_FROM_BE)
588 #else /* !__BIG_ENDIAN__ */
589 if (BPF_SRC(code) == BPF_FROM_LE)
594 /* Rotate 8 bits left & mask with 0x0000ff00 */
595 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
596 /* Rotate 8 bits right & insert LSB to reg */
597 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
598 /* Move result back to dst_reg */
599 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
603 * Rotate word left by 8 bits:
604 * 2 bytes are already in their final position
605 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
607 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
608 /* Rotate 24 bits and insert byte 1 */
609 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
610 /* Rotate 24 bits and insert byte 3 */
611 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
612 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
616 * Way easier and faster(?) to store the value
617 * into stack and then use ldbrx
619 * ctx->seen will be reliable in pass2, but
620 * the instructions generated will remain the
621 * same across all passes
623 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
624 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
625 EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
633 /* zero-extend 16 bits into 64 bits */
634 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
635 if (insn_is_zext(&insn[i + 1]))
636 addrs[++i] = ctx->idx * 4;
639 if (!fp->aux->verifier_zext)
640 /* zero-extend 32 bits into 64 bits */
641 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
652 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
653 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
654 if (BPF_CLASS(code) == BPF_ST) {
655 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
656 src_reg = b2p[TMP_REG_1];
658 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
660 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
661 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
662 if (BPF_CLASS(code) == BPF_ST) {
663 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
664 src_reg = b2p[TMP_REG_1];
666 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
668 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
669 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
670 if (BPF_CLASS(code) == BPF_ST) {
671 PPC_LI32(b2p[TMP_REG_1], imm);
672 src_reg = b2p[TMP_REG_1];
674 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
676 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
677 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
678 if (BPF_CLASS(code) == BPF_ST) {
679 PPC_LI32(b2p[TMP_REG_1], imm);
680 src_reg = b2p[TMP_REG_1];
682 PPC_BPF_STL(src_reg, dst_reg, off);
686 * BPF_STX ATOMIC (atomic ops)
688 case BPF_STX | BPF_ATOMIC | BPF_W:
689 if (insn->imm != BPF_ADD) {
691 "eBPF filter atomic op code %02x (@%d) unsupported\n",
696 /* *(u32 *)(dst + off) += src */
698 /* Get EA into TMP_REG_1 */
699 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
700 tmp_idx = ctx->idx * 4;
701 /* load value from memory into TMP_REG_2 */
702 EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
703 /* add value from src_reg into this */
704 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
705 /* store result back */
706 EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
707 /* we're done if this succeeded */
708 PPC_BCC_SHORT(COND_NE, tmp_idx);
710 case BPF_STX | BPF_ATOMIC | BPF_DW:
711 if (insn->imm != BPF_ADD) {
713 "eBPF filter atomic op code %02x (@%d) unsupported\n",
717 /* *(u64 *)(dst + off) += src */
719 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
720 tmp_idx = ctx->idx * 4;
721 EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
722 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
723 EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
724 PPC_BCC_SHORT(COND_NE, tmp_idx);
730 /* dst = *(u8 *)(ul) (src + off) */
731 case BPF_LDX | BPF_MEM | BPF_B:
732 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
733 if (insn_is_zext(&insn[i + 1]))
734 addrs[++i] = ctx->idx * 4;
736 /* dst = *(u16 *)(ul) (src + off) */
737 case BPF_LDX | BPF_MEM | BPF_H:
738 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
739 if (insn_is_zext(&insn[i + 1]))
740 addrs[++i] = ctx->idx * 4;
742 /* dst = *(u32 *)(ul) (src + off) */
743 case BPF_LDX | BPF_MEM | BPF_W:
744 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
745 if (insn_is_zext(&insn[i + 1]))
746 addrs[++i] = ctx->idx * 4;
748 /* dst = *(u64 *)(ul) (src + off) */
749 case BPF_LDX | BPF_MEM | BPF_DW:
750 PPC_BPF_LL(dst_reg, src_reg, off);
755 * 16 byte instruction that uses two 'struct bpf_insn'
757 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
758 imm64 = ((u64)(u32) insn[i].imm) |
759 (((u64)(u32) insn[i+1].imm) << 32);
760 /* Adjust for two bpf instructions */
761 addrs[++i] = ctx->idx * 4;
762 PPC_LI64(dst_reg, imm64);
768 case BPF_JMP | BPF_EXIT:
770 * If this isn't the very last instruction, branch to
771 * the epilogue. If we _are_ the last instruction,
772 * we'll just fall through to the epilogue.
776 /* else fall through to the epilogue */
780 * Call kernel helper or bpf function
782 case BPF_JMP | BPF_CALL:
783 ctx->seen |= SEEN_FUNC;
785 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
786 &func_addr, &func_addr_fixed);
791 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
793 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
794 /* move return value from r3 to BPF_REG_0 */
795 EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
801 case BPF_JMP | BPF_JA:
802 PPC_JMP(addrs[i + 1 + off]);
805 case BPF_JMP | BPF_JGT | BPF_K:
806 case BPF_JMP | BPF_JGT | BPF_X:
807 case BPF_JMP | BPF_JSGT | BPF_K:
808 case BPF_JMP | BPF_JSGT | BPF_X:
809 case BPF_JMP32 | BPF_JGT | BPF_K:
810 case BPF_JMP32 | BPF_JGT | BPF_X:
811 case BPF_JMP32 | BPF_JSGT | BPF_K:
812 case BPF_JMP32 | BPF_JSGT | BPF_X:
815 case BPF_JMP | BPF_JLT | BPF_K:
816 case BPF_JMP | BPF_JLT | BPF_X:
817 case BPF_JMP | BPF_JSLT | BPF_K:
818 case BPF_JMP | BPF_JSLT | BPF_X:
819 case BPF_JMP32 | BPF_JLT | BPF_K:
820 case BPF_JMP32 | BPF_JLT | BPF_X:
821 case BPF_JMP32 | BPF_JSLT | BPF_K:
822 case BPF_JMP32 | BPF_JSLT | BPF_X:
825 case BPF_JMP | BPF_JGE | BPF_K:
826 case BPF_JMP | BPF_JGE | BPF_X:
827 case BPF_JMP | BPF_JSGE | BPF_K:
828 case BPF_JMP | BPF_JSGE | BPF_X:
829 case BPF_JMP32 | BPF_JGE | BPF_K:
830 case BPF_JMP32 | BPF_JGE | BPF_X:
831 case BPF_JMP32 | BPF_JSGE | BPF_K:
832 case BPF_JMP32 | BPF_JSGE | BPF_X:
835 case BPF_JMP | BPF_JLE | BPF_K:
836 case BPF_JMP | BPF_JLE | BPF_X:
837 case BPF_JMP | BPF_JSLE | BPF_K:
838 case BPF_JMP | BPF_JSLE | BPF_X:
839 case BPF_JMP32 | BPF_JLE | BPF_K:
840 case BPF_JMP32 | BPF_JLE | BPF_X:
841 case BPF_JMP32 | BPF_JSLE | BPF_K:
842 case BPF_JMP32 | BPF_JSLE | BPF_X:
845 case BPF_JMP | BPF_JEQ | BPF_K:
846 case BPF_JMP | BPF_JEQ | BPF_X:
847 case BPF_JMP32 | BPF_JEQ | BPF_K:
848 case BPF_JMP32 | BPF_JEQ | BPF_X:
851 case BPF_JMP | BPF_JNE | BPF_K:
852 case BPF_JMP | BPF_JNE | BPF_X:
853 case BPF_JMP32 | BPF_JNE | BPF_K:
854 case BPF_JMP32 | BPF_JNE | BPF_X:
857 case BPF_JMP | BPF_JSET | BPF_K:
858 case BPF_JMP | BPF_JSET | BPF_X:
859 case BPF_JMP32 | BPF_JSET | BPF_K:
860 case BPF_JMP32 | BPF_JSET | BPF_X:
866 case BPF_JMP | BPF_JGT | BPF_X:
867 case BPF_JMP | BPF_JLT | BPF_X:
868 case BPF_JMP | BPF_JGE | BPF_X:
869 case BPF_JMP | BPF_JLE | BPF_X:
870 case BPF_JMP | BPF_JEQ | BPF_X:
871 case BPF_JMP | BPF_JNE | BPF_X:
872 case BPF_JMP32 | BPF_JGT | BPF_X:
873 case BPF_JMP32 | BPF_JLT | BPF_X:
874 case BPF_JMP32 | BPF_JGE | BPF_X:
875 case BPF_JMP32 | BPF_JLE | BPF_X:
876 case BPF_JMP32 | BPF_JEQ | BPF_X:
877 case BPF_JMP32 | BPF_JNE | BPF_X:
878 /* unsigned comparison */
879 if (BPF_CLASS(code) == BPF_JMP32)
880 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
882 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
884 case BPF_JMP | BPF_JSGT | BPF_X:
885 case BPF_JMP | BPF_JSLT | BPF_X:
886 case BPF_JMP | BPF_JSGE | BPF_X:
887 case BPF_JMP | BPF_JSLE | BPF_X:
888 case BPF_JMP32 | BPF_JSGT | BPF_X:
889 case BPF_JMP32 | BPF_JSLT | BPF_X:
890 case BPF_JMP32 | BPF_JSGE | BPF_X:
891 case BPF_JMP32 | BPF_JSLE | BPF_X:
892 /* signed comparison */
893 if (BPF_CLASS(code) == BPF_JMP32)
894 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
896 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
898 case BPF_JMP | BPF_JSET | BPF_X:
899 case BPF_JMP32 | BPF_JSET | BPF_X:
900 if (BPF_CLASS(code) == BPF_JMP) {
901 EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
904 int tmp_reg = b2p[TMP_REG_1];
906 EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
907 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
911 case BPF_JMP | BPF_JNE | BPF_K:
912 case BPF_JMP | BPF_JEQ | BPF_K:
913 case BPF_JMP | BPF_JGT | BPF_K:
914 case BPF_JMP | BPF_JLT | BPF_K:
915 case BPF_JMP | BPF_JGE | BPF_K:
916 case BPF_JMP | BPF_JLE | BPF_K:
917 case BPF_JMP32 | BPF_JNE | BPF_K:
918 case BPF_JMP32 | BPF_JEQ | BPF_K:
919 case BPF_JMP32 | BPF_JGT | BPF_K:
920 case BPF_JMP32 | BPF_JLT | BPF_K:
921 case BPF_JMP32 | BPF_JGE | BPF_K:
922 case BPF_JMP32 | BPF_JLE | BPF_K:
924 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
927 * Need sign-extended load, so only positive
928 * values can be used as imm in cmpldi
930 if (imm >= 0 && imm < 32768) {
932 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
934 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
936 /* sign-extending load */
937 PPC_LI32(b2p[TMP_REG_1], imm);
938 /* ... but unsigned comparison */
940 EMIT(PPC_RAW_CMPLW(dst_reg,
943 EMIT(PPC_RAW_CMPLD(dst_reg,
948 case BPF_JMP | BPF_JSGT | BPF_K:
949 case BPF_JMP | BPF_JSLT | BPF_K:
950 case BPF_JMP | BPF_JSGE | BPF_K:
951 case BPF_JMP | BPF_JSLE | BPF_K:
952 case BPF_JMP32 | BPF_JSGT | BPF_K:
953 case BPF_JMP32 | BPF_JSLT | BPF_K:
954 case BPF_JMP32 | BPF_JSGE | BPF_K:
955 case BPF_JMP32 | BPF_JSLE | BPF_K:
957 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
960 * signed comparison, so any 16-bit value
961 * can be used in cmpdi
963 if (imm >= -32768 && imm < 32768) {
965 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
967 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
969 PPC_LI32(b2p[TMP_REG_1], imm);
971 EMIT(PPC_RAW_CMPW(dst_reg,
974 EMIT(PPC_RAW_CMPD(dst_reg,
979 case BPF_JMP | BPF_JSET | BPF_K:
980 case BPF_JMP32 | BPF_JSET | BPF_K:
981 /* andi does not sign-extend the immediate */
982 if (imm >= 0 && imm < 32768)
983 /* PPC_ANDI is _only/always_ dot-form */
984 EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
986 int tmp_reg = b2p[TMP_REG_1];
988 PPC_LI32(tmp_reg, imm);
989 if (BPF_CLASS(code) == BPF_JMP) {
990 EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
993 EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
995 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1001 PPC_BCC(true_cond, addrs[i + 1 + off]);
1007 case BPF_JMP | BPF_TAIL_CALL:
1008 ctx->seen |= SEEN_TAILCALL;
1009 bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1014 * The filter contains something cruel & unusual.
1015 * We don't handle it, but also there shouldn't be
1016 * anything missing from our list.
1018 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1024 /* Set end-of-body-code address for exit. */
1025 addrs[i] = ctx->idx * 4;
1030 /* Fix the branch target addresses for subprog calls */
1031 static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
1032 struct codegen_context *ctx, u32 *addrs)
1034 const struct bpf_insn *insn = fp->insnsi;
1035 bool func_addr_fixed;
1040 for (i = 0; i < fp->len; i++) {
1042 * During the extra pass, only the branch target addresses for
1043 * the subprog calls need to be fixed. All other instructions
1044 * can left untouched.
1046 * The JITed image length does not change because we already
1047 * ensure that the JITed instruction sequence for these calls
1048 * are of fixed length by padding them with NOPs.
1050 if (insn[i].code == (BPF_JMP | BPF_CALL) &&
1051 insn[i].src_reg == BPF_PSEUDO_CALL) {
1052 ret = bpf_jit_get_func_addr(fp, &insn[i], true,
1059 * Save ctx->idx as this would currently point to the
1060 * end of the JITed image and set it to the offset of
1061 * the instruction sequence corresponding to the
1062 * subprog call temporarily.
1065 ctx->idx = addrs[i] / 4;
1066 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
1069 * Restore ctx->idx here. This is safe as the length
1070 * of the JITed sequence remains unchanged.
1079 struct powerpc64_jit_data {
1080 struct bpf_binary_header *header;
1084 struct codegen_context ctx;
1087 bool bpf_jit_needs_zext(void)
1092 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1099 struct powerpc64_jit_data *jit_data;
1100 struct codegen_context cgctx;
1103 struct bpf_binary_header *bpf_hdr;
1104 struct bpf_prog *org_fp = fp;
1105 struct bpf_prog *tmp_fp;
1106 bool bpf_blinded = false;
1107 bool extra_pass = false;
1109 if (!fp->jit_requested)
1112 tmp_fp = bpf_jit_blind_constants(org_fp);
1116 if (tmp_fp != org_fp) {
1121 jit_data = fp->aux->jit_data;
1123 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1128 fp->aux->jit_data = jit_data;
1132 addrs = jit_data->addrs;
1134 cgctx = jit_data->ctx;
1135 image = jit_data->image;
1136 bpf_hdr = jit_data->header;
1137 proglen = jit_data->proglen;
1138 alloclen = proglen + FUNCTION_DESCR_SIZE;
1143 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
1144 if (addrs == NULL) {
1149 memset(&cgctx, 0, sizeof(struct codegen_context));
1151 /* Make sure that the stack is quadword aligned. */
1152 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
1154 /* Scouting faux-generate pass 0 */
1155 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1156 /* We hit something illegal or unsupported. */
1162 * If we have seen a tail call, we need a second pass.
1163 * This is because bpf_jit_emit_common_epilogue() is called
1164 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
1166 if (cgctx.seen & SEEN_TAILCALL) {
1168 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1175 * Pretend to build prologue, given the features we've seen. This will
1176 * update ctgtx.idx as it pretends to output instructions, then we can
1177 * calculate total size from idx.
1179 bpf_jit_build_prologue(0, &cgctx);
1180 bpf_jit_build_epilogue(0, &cgctx);
1182 proglen = cgctx.idx * 4;
1183 alloclen = proglen + FUNCTION_DESCR_SIZE;
1185 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1186 bpf_jit_fill_ill_insns);
1193 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1197 * Do not touch the prologue and epilogue as they will remain
1198 * unchanged. Only fix the branch target address for subprog
1199 * calls in the body.
1201 * This does not change the offsets and lengths of the subprog
1202 * call instruction sequences and hence, the size of the JITed
1205 bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
1207 /* There is no need to perform the usual passes. */
1208 goto skip_codegen_passes;
1211 /* Code generation passes 1-2 */
1212 for (pass = 1; pass < 3; pass++) {
1213 /* Now build the prologue, body code & epilogue for real. */
1215 bpf_jit_build_prologue(code_base, &cgctx);
1216 bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
1217 bpf_jit_build_epilogue(code_base, &cgctx);
1219 if (bpf_jit_enable > 1)
1220 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1221 proglen - (cgctx.idx * 4), cgctx.seen);
1224 skip_codegen_passes:
1225 if (bpf_jit_enable > 1)
1227 * Note that we output the base address of the code_base
1228 * rather than image, since opcodes are in code_base.
1230 bpf_jit_dump(flen, proglen, pass, code_base);
1232 #ifdef PPC64_ELF_ABI_v1
1233 /* Function descriptor nastiness: Address + TOC */
1234 ((u64 *)image)[0] = (u64)code_base;
1235 ((u64 *)image)[1] = local_paca->kernel_toc;
1238 fp->bpf_func = (void *)image;
1240 fp->jited_len = alloclen;
1242 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
1243 if (!fp->is_func || extra_pass) {
1244 bpf_prog_fill_jited_linfo(fp, addrs);
1248 fp->aux->jit_data = NULL;
1250 jit_data->addrs = addrs;
1251 jit_data->ctx = cgctx;
1252 jit_data->proglen = proglen;
1253 jit_data->image = image;
1254 jit_data->header = bpf_hdr;
1259 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1264 /* Overriding bpf_jit_free() as we don't set images read-only. */
1265 void bpf_jit_free(struct bpf_prog *fp)
1267 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1268 struct bpf_binary_header *bpf_hdr = (void *)addr;
1271 bpf_jit_binary_free(bpf_hdr);
1273 bpf_prog_unlock_free(fp);