1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp64.c: eBPF JIT compiler
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
20 #include "bpf_jit64.h"
22 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
25 * We only need a stack frame if:
26 * - we call other functions (kernel helpers), or
27 * - the bpf program uses its stack area
28 * The latter condition is deduced from the usage of BPF_REG_FP
30 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
34 * When not setting up our own stackframe, the redzone usage is:
36 * [ prev sp ] <-------------
38 * sp (r1) ---> [ stack pointer ] --------------
39 * [ nv gpr save area ] 5*8
41 * [ local_tmp_var ] 16
42 * [ unused red zone ] 208 bytes protected
44 static int bpf_jit_stack_local(struct codegen_context *ctx)
46 if (bpf_has_stack_frame(ctx))
47 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
49 return -(BPF_PPC_STACK_SAVE + 24);
52 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
54 return bpf_jit_stack_local(ctx) + 16;
57 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
59 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60 return (bpf_has_stack_frame(ctx) ?
61 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
64 pr_err("BPF JIT is asking about unknown registers");
68 void bpf_jit_realloc_regs(struct codegen_context *ctx)
72 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
77 * Initialize tail_call_cnt if we do tail calls.
78 * Otherwise, put in NOPs so that it can be skipped when we are
79 * invoked through a tail call.
81 if (ctx->seen & SEEN_TAILCALL) {
82 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
83 /* this goes in the redzone */
84 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
90 #define BPF_TAILCALL_PROLOGUE_SIZE 8
92 if (bpf_has_stack_frame(ctx)) {
94 * We need a stack frame, but we don't necessarily need to
95 * save/restore LR unless we call other functions
97 if (ctx->seen & SEEN_FUNC) {
98 EMIT(PPC_RAW_MFLR(_R0));
99 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
102 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
106 * Back up non-volatile regs -- BPF registers 6-10
107 * If we haven't created our own stack frame, we save these
108 * in the protected zone below the previous stack frame
110 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
111 if (bpf_is_seen_register(ctx, b2p[i]))
112 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
114 /* Setup frame pointer to point to the bpf stack area */
115 if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
116 EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
117 STACK_FRAME_MIN_SIZE + ctx->stack_size));
120 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
125 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
126 if (bpf_is_seen_register(ctx, b2p[i]))
127 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
129 /* Tear down our stack frame */
130 if (bpf_has_stack_frame(ctx)) {
131 EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
132 if (ctx->seen & SEEN_FUNC) {
133 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
134 EMIT(PPC_RAW_MTLR(0));
139 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
141 bpf_jit_emit_common_epilogue(image, ctx);
143 /* Move result to r3 */
144 EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
149 static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
152 #ifdef PPC64_ELF_ABI_v1
153 /* func points to the function descriptor */
154 PPC_LI64(b2p[TMP_REG_2], func);
155 /* Load actual entry point from function descriptor */
156 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
157 /* ... and move it to CTR */
158 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
160 * Load TOC from function descriptor at offset 8.
161 * We can clobber r2 since we get called through a
162 * function pointer (so caller will save/restore r2)
163 * and since we don't use a TOC ourself.
165 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
167 /* We can clobber r12 */
168 PPC_FUNC_ADDR(12, func);
169 EMIT(PPC_RAW_MTCTR(12));
171 EMIT(PPC_RAW_BCTRL());
174 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
176 unsigned int i, ctx_idx = ctx->idx;
178 /* Load function address into r12 */
181 /* For bpf-to-bpf function calls, the callee's address is unknown
182 * until the last extra pass. As seen above, we use PPC_LI64() to
183 * load the callee's address, but this may optimize the number of
184 * instructions required based on the nature of the address.
186 * Since we don't want the number of instructions emitted to change,
187 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
188 * we always have a five-instruction sequence, which is the maximum
189 * that PPC_LI64() can emit.
191 for (i = ctx->idx - ctx_idx; i < 5; i++)
194 #ifdef PPC64_ELF_ABI_v1
196 * Load TOC from function descriptor at offset 8.
197 * We can clobber r2 since we get called through a
198 * function pointer (so caller will save/restore r2)
199 * and since we don't use a TOC ourself.
201 PPC_BPF_LL(2, 12, 8);
202 /* Load actual entry point from function descriptor */
203 PPC_BPF_LL(12, 12, 0);
206 EMIT(PPC_RAW_MTCTR(12));
207 EMIT(PPC_RAW_BCTRL());
210 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
213 * By now, the eBPF program has already setup parameters in r3, r4 and r5
214 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
215 * r4/BPF_REG_2 - pointer to bpf_array
216 * r5/BPF_REG_3 - index in bpf_array
218 int b2p_bpf_array = b2p[BPF_REG_2];
219 int b2p_index = b2p[BPF_REG_3];
222 * if (index >= array->map.max_entries)
225 EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
226 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
227 EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
228 PPC_BCC(COND_GE, out);
231 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
234 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
235 EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
236 PPC_BCC(COND_GT, out);
241 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
242 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
244 /* prog = array->ptrs[index]; */
245 EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
246 EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
247 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
253 EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
254 PPC_BCC(COND_EQ, out);
256 /* goto *(prog->bpf_func + prologue_size); */
257 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
258 #ifdef PPC64_ELF_ABI_v1
259 /* skip past the function descriptor */
260 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
261 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
263 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
265 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
267 /* tear down stack, restore NVRs, ... */
268 bpf_jit_emit_common_epilogue(image, ctx);
270 EMIT(PPC_RAW_BCTR());
277 * We spill into the redzone always, even if the bpf program has its own stackframe.
278 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
280 void bpf_stf_barrier(void);
283 " .global bpf_stf_barrier ;"
284 " bpf_stf_barrier: ;"
298 /* Assemble the body code between the prologue & epilogue */
299 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
300 u32 *addrs, bool extra_pass)
302 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
303 const struct bpf_insn *insn = fp->insnsi;
307 /* Start of epilogue code - will only be valid 2nd pass onwards */
308 u32 exit_addr = addrs[flen];
310 for (i = 0; i < flen; i++) {
311 u32 code = insn[i].code;
312 u32 dst_reg = b2p[insn[i].dst_reg];
313 u32 src_reg = b2p[insn[i].src_reg];
314 s16 off = insn[i].off;
315 s32 imm = insn[i].imm;
316 bool func_addr_fixed;
323 * addrs[] maps a BPF bytecode address into a real offset from
324 * the start of the body code.
326 addrs[i] = ctx->idx * 4;
329 * As an optimization, we note down which non-volatile registers
330 * are used so that we can only save/restore those in our
331 * prologue and epilogue. We do this here regardless of whether
332 * the actual BPF instruction uses src/dst registers or not
333 * (for instance, BPF_CALL does not use them). The expectation
334 * is that those instructions will have src_reg/dst_reg set to
335 * 0. Even otherwise, we just lose some prologue/epilogue
336 * optimization but everything else should work without
339 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
340 bpf_set_seen_register(ctx, dst_reg);
341 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
342 bpf_set_seen_register(ctx, src_reg);
346 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
348 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
349 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
350 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
351 goto bpf_alu32_trunc;
352 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
353 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
354 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
355 goto bpf_alu32_trunc;
356 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
357 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
359 goto bpf_alu32_trunc;
360 } else if (imm >= -32768 && imm < 32768) {
361 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
363 PPC_LI32(b2p[TMP_REG_1], imm);
364 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
366 goto bpf_alu32_trunc;
367 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
368 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
370 goto bpf_alu32_trunc;
371 } else if (imm > -32768 && imm <= 32768) {
372 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
374 PPC_LI32(b2p[TMP_REG_1], imm);
375 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
377 goto bpf_alu32_trunc;
378 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
379 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
380 if (BPF_CLASS(code) == BPF_ALU)
381 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
383 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
384 goto bpf_alu32_trunc;
385 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
386 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
387 if (imm >= -32768 && imm < 32768)
388 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
390 PPC_LI32(b2p[TMP_REG_1], imm);
391 if (BPF_CLASS(code) == BPF_ALU)
392 EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
395 EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
398 goto bpf_alu32_trunc;
399 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
400 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
401 if (BPF_OP(code) == BPF_MOD) {
402 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
403 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
405 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
407 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
408 goto bpf_alu32_trunc;
409 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
410 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
411 if (BPF_OP(code) == BPF_MOD) {
412 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
413 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
415 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
417 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
419 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
420 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
421 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
422 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
426 if (BPF_OP(code) == BPF_DIV) {
427 goto bpf_alu32_trunc;
429 EMIT(PPC_RAW_LI(dst_reg, 0));
434 PPC_LI32(b2p[TMP_REG_1], imm);
435 switch (BPF_CLASS(code)) {
437 if (BPF_OP(code) == BPF_MOD) {
438 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
441 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
444 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
447 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
451 if (BPF_OP(code) == BPF_MOD) {
452 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
455 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
458 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
461 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
465 goto bpf_alu32_trunc;
466 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
467 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
468 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
469 goto bpf_alu32_trunc;
472 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
474 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
475 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
476 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
477 goto bpf_alu32_trunc;
478 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
479 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
481 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
484 PPC_LI32(b2p[TMP_REG_1], imm);
485 EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
487 goto bpf_alu32_trunc;
488 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
489 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
490 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
491 goto bpf_alu32_trunc;
492 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
493 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
494 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
496 PPC_LI32(b2p[TMP_REG_1], imm);
497 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
500 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
502 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
504 goto bpf_alu32_trunc;
505 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
506 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
507 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
508 goto bpf_alu32_trunc;
509 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
510 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
511 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
513 PPC_LI32(b2p[TMP_REG_1], imm);
514 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
517 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
519 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
521 goto bpf_alu32_trunc;
522 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
523 /* slw clears top 32 bits */
524 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
525 /* skip zero extension move, but set address map. */
526 if (insn_is_zext(&insn[i + 1]))
527 addrs[++i] = ctx->idx * 4;
529 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
530 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
532 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
533 /* with imm 0, we still need to clear top 32 bits */
534 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
535 if (insn_is_zext(&insn[i + 1]))
536 addrs[++i] = ctx->idx * 4;
538 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
540 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
542 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
543 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
544 if (insn_is_zext(&insn[i + 1]))
545 addrs[++i] = ctx->idx * 4;
547 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
548 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
550 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
551 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
552 if (insn_is_zext(&insn[i + 1]))
553 addrs[++i] = ctx->idx * 4;
555 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
557 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
559 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
560 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
561 goto bpf_alu32_trunc;
562 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
563 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
565 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
566 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
567 goto bpf_alu32_trunc;
568 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
570 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
576 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
577 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
579 /* special mov32 for zext */
580 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
583 EMIT(PPC_RAW_MR(dst_reg, src_reg));
584 goto bpf_alu32_trunc;
585 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
586 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
587 PPC_LI32(dst_reg, imm);
589 goto bpf_alu32_trunc;
590 else if (insn_is_zext(&insn[i + 1]))
591 addrs[++i] = ctx->idx * 4;
595 /* Truncate to 32-bits */
596 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
597 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
603 case BPF_ALU | BPF_END | BPF_FROM_LE:
604 case BPF_ALU | BPF_END | BPF_FROM_BE:
605 #ifdef __BIG_ENDIAN__
606 if (BPF_SRC(code) == BPF_FROM_BE)
608 #else /* !__BIG_ENDIAN__ */
609 if (BPF_SRC(code) == BPF_FROM_LE)
614 /* Rotate 8 bits left & mask with 0x0000ff00 */
615 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
616 /* Rotate 8 bits right & insert LSB to reg */
617 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
618 /* Move result back to dst_reg */
619 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
623 * Rotate word left by 8 bits:
624 * 2 bytes are already in their final position
625 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
627 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
628 /* Rotate 24 bits and insert byte 1 */
629 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
630 /* Rotate 24 bits and insert byte 3 */
631 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
632 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
636 * Way easier and faster(?) to store the value
637 * into stack and then use ldbrx
639 * ctx->seen will be reliable in pass2, but
640 * the instructions generated will remain the
641 * same across all passes
643 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
644 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
645 EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
653 /* zero-extend 16 bits into 64 bits */
654 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
655 if (insn_is_zext(&insn[i + 1]))
656 addrs[++i] = ctx->idx * 4;
659 if (!fp->aux->verifier_zext)
660 /* zero-extend 32 bits into 64 bits */
661 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
670 * BPF_ST NOSPEC (speculation barrier)
672 case BPF_ST | BPF_NOSPEC:
673 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
674 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
677 switch (stf_barrier) {
678 case STF_BARRIER_EIEIO:
679 EMIT(PPC_RAW_EIEIO() | 0x02000000);
681 case STF_BARRIER_SYNC_ORI:
682 EMIT(PPC_RAW_SYNC());
683 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
684 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
686 case STF_BARRIER_FALLBACK:
687 EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
688 PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
689 EMIT(PPC_RAW_MTCTR(12));
690 EMIT(PPC_RAW_BCTRL());
691 EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
693 case STF_BARRIER_NONE:
701 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
702 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
703 if (BPF_CLASS(code) == BPF_ST) {
704 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
705 src_reg = b2p[TMP_REG_1];
707 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
709 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
710 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
711 if (BPF_CLASS(code) == BPF_ST) {
712 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
713 src_reg = b2p[TMP_REG_1];
715 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
717 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
718 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
719 if (BPF_CLASS(code) == BPF_ST) {
720 PPC_LI32(b2p[TMP_REG_1], imm);
721 src_reg = b2p[TMP_REG_1];
723 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
725 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
726 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
727 if (BPF_CLASS(code) == BPF_ST) {
728 PPC_LI32(b2p[TMP_REG_1], imm);
729 src_reg = b2p[TMP_REG_1];
731 PPC_BPF_STL(src_reg, dst_reg, off);
735 * BPF_STX ATOMIC (atomic ops)
737 case BPF_STX | BPF_ATOMIC | BPF_W:
738 if (imm != BPF_ADD) {
740 "eBPF filter atomic op code %02x (@%d) unsupported\n",
745 /* *(u32 *)(dst + off) += src */
747 /* Get EA into TMP_REG_1 */
748 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
749 tmp_idx = ctx->idx * 4;
750 /* load value from memory into TMP_REG_2 */
751 EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
752 /* add value from src_reg into this */
753 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
754 /* store result back */
755 EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
756 /* we're done if this succeeded */
757 PPC_BCC_SHORT(COND_NE, tmp_idx);
759 case BPF_STX | BPF_ATOMIC | BPF_DW:
760 if (imm != BPF_ADD) {
762 "eBPF filter atomic op code %02x (@%d) unsupported\n",
766 /* *(u64 *)(dst + off) += src */
768 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
769 tmp_idx = ctx->idx * 4;
770 EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
771 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
772 EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
773 PPC_BCC_SHORT(COND_NE, tmp_idx);
779 /* dst = *(u8 *)(ul) (src + off) */
780 case BPF_LDX | BPF_MEM | BPF_B:
781 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
782 if (insn_is_zext(&insn[i + 1]))
783 addrs[++i] = ctx->idx * 4;
785 /* dst = *(u16 *)(ul) (src + off) */
786 case BPF_LDX | BPF_MEM | BPF_H:
787 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
788 if (insn_is_zext(&insn[i + 1]))
789 addrs[++i] = ctx->idx * 4;
791 /* dst = *(u32 *)(ul) (src + off) */
792 case BPF_LDX | BPF_MEM | BPF_W:
793 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
794 if (insn_is_zext(&insn[i + 1]))
795 addrs[++i] = ctx->idx * 4;
797 /* dst = *(u64 *)(ul) (src + off) */
798 case BPF_LDX | BPF_MEM | BPF_DW:
799 PPC_BPF_LL(dst_reg, src_reg, off);
804 * 16 byte instruction that uses two 'struct bpf_insn'
806 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
807 imm64 = ((u64)(u32) insn[i].imm) |
808 (((u64)(u32) insn[i+1].imm) << 32);
809 /* Adjust for two bpf instructions */
810 addrs[++i] = ctx->idx * 4;
811 PPC_LI64(dst_reg, imm64);
817 case BPF_JMP | BPF_EXIT:
819 * If this isn't the very last instruction, branch to
820 * the epilogue. If we _are_ the last instruction,
821 * we'll just fall through to the epilogue.
825 /* else fall through to the epilogue */
829 * Call kernel helper or bpf function
831 case BPF_JMP | BPF_CALL:
832 ctx->seen |= SEEN_FUNC;
834 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
835 &func_addr, &func_addr_fixed);
840 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
842 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
843 /* move return value from r3 to BPF_REG_0 */
844 EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
850 case BPF_JMP | BPF_JA:
851 PPC_JMP(addrs[i + 1 + off]);
854 case BPF_JMP | BPF_JGT | BPF_K:
855 case BPF_JMP | BPF_JGT | BPF_X:
856 case BPF_JMP | BPF_JSGT | BPF_K:
857 case BPF_JMP | BPF_JSGT | BPF_X:
858 case BPF_JMP32 | BPF_JGT | BPF_K:
859 case BPF_JMP32 | BPF_JGT | BPF_X:
860 case BPF_JMP32 | BPF_JSGT | BPF_K:
861 case BPF_JMP32 | BPF_JSGT | BPF_X:
864 case BPF_JMP | BPF_JLT | BPF_K:
865 case BPF_JMP | BPF_JLT | BPF_X:
866 case BPF_JMP | BPF_JSLT | BPF_K:
867 case BPF_JMP | BPF_JSLT | BPF_X:
868 case BPF_JMP32 | BPF_JLT | BPF_K:
869 case BPF_JMP32 | BPF_JLT | BPF_X:
870 case BPF_JMP32 | BPF_JSLT | BPF_K:
871 case BPF_JMP32 | BPF_JSLT | BPF_X:
874 case BPF_JMP | BPF_JGE | BPF_K:
875 case BPF_JMP | BPF_JGE | BPF_X:
876 case BPF_JMP | BPF_JSGE | BPF_K:
877 case BPF_JMP | BPF_JSGE | BPF_X:
878 case BPF_JMP32 | BPF_JGE | BPF_K:
879 case BPF_JMP32 | BPF_JGE | BPF_X:
880 case BPF_JMP32 | BPF_JSGE | BPF_K:
881 case BPF_JMP32 | BPF_JSGE | BPF_X:
884 case BPF_JMP | BPF_JLE | BPF_K:
885 case BPF_JMP | BPF_JLE | BPF_X:
886 case BPF_JMP | BPF_JSLE | BPF_K:
887 case BPF_JMP | BPF_JSLE | BPF_X:
888 case BPF_JMP32 | BPF_JLE | BPF_K:
889 case BPF_JMP32 | BPF_JLE | BPF_X:
890 case BPF_JMP32 | BPF_JSLE | BPF_K:
891 case BPF_JMP32 | BPF_JSLE | BPF_X:
894 case BPF_JMP | BPF_JEQ | BPF_K:
895 case BPF_JMP | BPF_JEQ | BPF_X:
896 case BPF_JMP32 | BPF_JEQ | BPF_K:
897 case BPF_JMP32 | BPF_JEQ | BPF_X:
900 case BPF_JMP | BPF_JNE | BPF_K:
901 case BPF_JMP | BPF_JNE | BPF_X:
902 case BPF_JMP32 | BPF_JNE | BPF_K:
903 case BPF_JMP32 | BPF_JNE | BPF_X:
906 case BPF_JMP | BPF_JSET | BPF_K:
907 case BPF_JMP | BPF_JSET | BPF_X:
908 case BPF_JMP32 | BPF_JSET | BPF_K:
909 case BPF_JMP32 | BPF_JSET | BPF_X:
915 case BPF_JMP | BPF_JGT | BPF_X:
916 case BPF_JMP | BPF_JLT | BPF_X:
917 case BPF_JMP | BPF_JGE | BPF_X:
918 case BPF_JMP | BPF_JLE | BPF_X:
919 case BPF_JMP | BPF_JEQ | BPF_X:
920 case BPF_JMP | BPF_JNE | BPF_X:
921 case BPF_JMP32 | BPF_JGT | BPF_X:
922 case BPF_JMP32 | BPF_JLT | BPF_X:
923 case BPF_JMP32 | BPF_JGE | BPF_X:
924 case BPF_JMP32 | BPF_JLE | BPF_X:
925 case BPF_JMP32 | BPF_JEQ | BPF_X:
926 case BPF_JMP32 | BPF_JNE | BPF_X:
927 /* unsigned comparison */
928 if (BPF_CLASS(code) == BPF_JMP32)
929 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
931 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
933 case BPF_JMP | BPF_JSGT | BPF_X:
934 case BPF_JMP | BPF_JSLT | BPF_X:
935 case BPF_JMP | BPF_JSGE | BPF_X:
936 case BPF_JMP | BPF_JSLE | BPF_X:
937 case BPF_JMP32 | BPF_JSGT | BPF_X:
938 case BPF_JMP32 | BPF_JSLT | BPF_X:
939 case BPF_JMP32 | BPF_JSGE | BPF_X:
940 case BPF_JMP32 | BPF_JSLE | BPF_X:
941 /* signed comparison */
942 if (BPF_CLASS(code) == BPF_JMP32)
943 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
945 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
947 case BPF_JMP | BPF_JSET | BPF_X:
948 case BPF_JMP32 | BPF_JSET | BPF_X:
949 if (BPF_CLASS(code) == BPF_JMP) {
950 EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
953 int tmp_reg = b2p[TMP_REG_1];
955 EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
956 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
960 case BPF_JMP | BPF_JNE | BPF_K:
961 case BPF_JMP | BPF_JEQ | BPF_K:
962 case BPF_JMP | BPF_JGT | BPF_K:
963 case BPF_JMP | BPF_JLT | BPF_K:
964 case BPF_JMP | BPF_JGE | BPF_K:
965 case BPF_JMP | BPF_JLE | BPF_K:
966 case BPF_JMP32 | BPF_JNE | BPF_K:
967 case BPF_JMP32 | BPF_JEQ | BPF_K:
968 case BPF_JMP32 | BPF_JGT | BPF_K:
969 case BPF_JMP32 | BPF_JLT | BPF_K:
970 case BPF_JMP32 | BPF_JGE | BPF_K:
971 case BPF_JMP32 | BPF_JLE | BPF_K:
973 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
976 * Need sign-extended load, so only positive
977 * values can be used as imm in cmpldi
979 if (imm >= 0 && imm < 32768) {
981 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
983 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
985 /* sign-extending load */
986 PPC_LI32(b2p[TMP_REG_1], imm);
987 /* ... but unsigned comparison */
989 EMIT(PPC_RAW_CMPLW(dst_reg,
992 EMIT(PPC_RAW_CMPLD(dst_reg,
997 case BPF_JMP | BPF_JSGT | BPF_K:
998 case BPF_JMP | BPF_JSLT | BPF_K:
999 case BPF_JMP | BPF_JSGE | BPF_K:
1000 case BPF_JMP | BPF_JSLE | BPF_K:
1001 case BPF_JMP32 | BPF_JSGT | BPF_K:
1002 case BPF_JMP32 | BPF_JSLT | BPF_K:
1003 case BPF_JMP32 | BPF_JSGE | BPF_K:
1004 case BPF_JMP32 | BPF_JSLE | BPF_K:
1006 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1009 * signed comparison, so any 16-bit value
1010 * can be used in cmpdi
1012 if (imm >= -32768 && imm < 32768) {
1014 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1016 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1018 PPC_LI32(b2p[TMP_REG_1], imm);
1020 EMIT(PPC_RAW_CMPW(dst_reg,
1023 EMIT(PPC_RAW_CMPD(dst_reg,
1028 case BPF_JMP | BPF_JSET | BPF_K:
1029 case BPF_JMP32 | BPF_JSET | BPF_K:
1030 /* andi does not sign-extend the immediate */
1031 if (imm >= 0 && imm < 32768)
1032 /* PPC_ANDI is _only/always_ dot-form */
1033 EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1035 int tmp_reg = b2p[TMP_REG_1];
1037 PPC_LI32(tmp_reg, imm);
1038 if (BPF_CLASS(code) == BPF_JMP) {
1039 EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
1042 EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
1044 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1050 PPC_BCC(true_cond, addrs[i + 1 + off]);
1056 case BPF_JMP | BPF_TAIL_CALL:
1057 ctx->seen |= SEEN_TAILCALL;
1058 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1065 * The filter contains something cruel & unusual.
1066 * We don't handle it, but also there shouldn't be
1067 * anything missing from our list.
1069 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1075 /* Set end-of-body-code address for exit. */
1076 addrs[i] = ctx->idx * 4;