1 // SPDX-License-Identifier: GPL-2.0-only
2 /* bpf_jit_comp.c: BPF JIT compiler
9 #include <linux/moduleloader.h>
10 #include <asm/cacheflush.h>
11 #include <asm/asm-compat.h>
12 #include <linux/netdevice.h>
13 #include <linux/filter.h>
14 #include <linux/if_vlan.h>
16 #include "bpf_jit32.h"
18 static inline void bpf_flush_icache(void *start, void *end)
21 flush_icache_range((unsigned long)start, (unsigned long)end);
24 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
25 struct codegen_context *ctx)
28 const struct sock_filter *filter = fp->insns;
30 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
32 if (ctx->seen & SEEN_DATAREF) {
33 /* If we call any helpers (for loads), save LR */
34 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
35 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
37 /* Back up non-volatile regs. */
38 PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
39 PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
41 if (ctx->seen & SEEN_MEM) {
43 * Conditionally save regs r15-r31 as some will be used
46 for (i = r_M; i < (r_M+16); i++) {
47 if (ctx->seen & (1 << (i-r_M)))
48 PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
51 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
54 if (ctx->seen & SEEN_DATAREF) {
56 * If this filter needs to access skb data,
57 * prepare r_D and r_HL:
58 * r_HL = skb->len - skb->data_len
61 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
63 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
64 PPC_SUB(r_HL, r_HL, r_scratch1);
65 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
68 if (ctx->seen & SEEN_XREG) {
70 * TODO: Could also detect whether first instr. sets X and
71 * avoid this (as below, with A).
76 /* make sure we dont leak kernel information to user */
77 if (bpf_needs_clear_a(&filter[0]))
81 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
85 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
86 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
87 if (ctx->seen & SEEN_DATAREF) {
88 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
90 PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
91 PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
93 if (ctx->seen & SEEN_MEM) {
94 /* Restore any saved non-vol registers */
95 for (i = r_M; i < (r_M+16); i++) {
96 if (ctx->seen & (1 << (i-r_M)))
97 PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
101 /* The RETs have left a return value in R3. */
106 #define CHOOSE_LOAD_FUNC(K, func) \
107 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
109 /* Assemble the body code between the prologue & epilogue. */
110 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
111 struct codegen_context *ctx,
114 const struct sock_filter *filter = fp->insns;
117 unsigned int true_cond;
120 /* Start of epilogue code */
121 unsigned int exit_addr = addrs[flen];
123 for (i = 0; i < flen; i++) {
124 unsigned int K = filter[i].k;
125 u16 code = bpf_anc_helper(&filter[i]);
128 * addrs[] maps a BPF bytecode address into a real offset from
129 * the start of the body code.
131 addrs[i] = ctx->idx * 4;
135 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
136 ctx->seen |= SEEN_XREG;
137 PPC_ADD(r_A, r_A, r_X);
139 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
142 PPC_ADDI(r_A, r_A, IMM_L(K));
144 PPC_ADDIS(r_A, r_A, IMM_HA(K));
146 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
147 ctx->seen |= SEEN_XREG;
148 PPC_SUB(r_A, r_A, r_X);
150 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
153 PPC_ADDI(r_A, r_A, IMM_L(-K));
155 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
157 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
158 ctx->seen |= SEEN_XREG;
159 PPC_MULW(r_A, r_A, r_X);
161 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
163 PPC_MULI(r_A, r_A, K);
165 PPC_LI32(r_scratch1, K);
166 PPC_MULW(r_A, r_A, r_scratch1);
169 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
170 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
171 ctx->seen |= SEEN_XREG;
173 if (ctx->pc_ret0 != -1) {
174 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
176 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
180 if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
181 PPC_DIVWU(r_scratch1, r_A, r_X);
182 PPC_MULW(r_scratch1, r_X, r_scratch1);
183 PPC_SUB(r_A, r_A, r_scratch1);
185 PPC_DIVWU(r_A, r_A, r_X);
188 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
189 PPC_LI32(r_scratch2, K);
190 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
191 PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
192 PPC_SUB(r_A, r_A, r_scratch1);
194 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
197 PPC_LI32(r_scratch1, K);
198 PPC_DIVWU(r_A, r_A, r_scratch1);
200 case BPF_ALU | BPF_AND | BPF_X:
201 ctx->seen |= SEEN_XREG;
202 PPC_AND(r_A, r_A, r_X);
204 case BPF_ALU | BPF_AND | BPF_K:
206 PPC_ANDI(r_A, r_A, K);
208 PPC_LI32(r_scratch1, K);
209 PPC_AND(r_A, r_A, r_scratch1);
212 case BPF_ALU | BPF_OR | BPF_X:
213 ctx->seen |= SEEN_XREG;
214 PPC_OR(r_A, r_A, r_X);
216 case BPF_ALU | BPF_OR | BPF_K:
218 PPC_ORI(r_A, r_A, IMM_L(K));
220 PPC_ORIS(r_A, r_A, IMM_H(K));
222 case BPF_ANC | SKF_AD_ALU_XOR_X:
223 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
224 ctx->seen |= SEEN_XREG;
225 PPC_XOR(r_A, r_A, r_X);
227 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
229 PPC_XORI(r_A, r_A, IMM_L(K));
231 PPC_XORIS(r_A, r_A, IMM_H(K));
233 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
234 ctx->seen |= SEEN_XREG;
235 PPC_SLW(r_A, r_A, r_X);
237 case BPF_ALU | BPF_LSH | BPF_K:
241 PPC_SLWI(r_A, r_A, K);
243 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
244 ctx->seen |= SEEN_XREG;
245 PPC_SRW(r_A, r_A, r_X);
247 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
251 PPC_SRWI(r_A, r_A, K);
253 case BPF_ALU | BPF_NEG:
256 case BPF_RET | BPF_K:
259 if (ctx->pc_ret0 == -1)
263 * If this isn't the very last instruction, branch to
264 * the epilogue if we've stuff to clean up. Otherwise,
265 * if there's nothing to tidy, just return. If we /are/
266 * the last instruction, we're about to fall through to
267 * the epilogue to return.
271 * Note: 'seen' is properly valid only on pass
272 * #2. Both parts of this conditional are the
273 * same instruction size though, meaning the
274 * first pass will still correctly determine the
275 * code size/addresses.
283 case BPF_RET | BPF_A:
292 case BPF_MISC | BPF_TAX: /* X = A */
295 case BPF_MISC | BPF_TXA: /* A = X */
296 ctx->seen |= SEEN_XREG;
300 /*** Constant loads/M[] access ***/
301 case BPF_LD | BPF_IMM: /* A = K */
304 case BPF_LDX | BPF_IMM: /* X = K */
307 case BPF_LD | BPF_MEM: /* A = mem[K] */
308 PPC_MR(r_A, r_M + (K & 0xf));
309 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
311 case BPF_LDX | BPF_MEM: /* X = mem[K] */
312 PPC_MR(r_X, r_M + (K & 0xf));
313 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
315 case BPF_ST: /* mem[K] = A */
316 PPC_MR(r_M + (K & 0xf), r_A);
317 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
319 case BPF_STX: /* mem[K] = X */
320 PPC_MR(r_M + (K & 0xf), r_X);
321 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
323 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
324 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
325 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
327 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
328 PPC_LWZ_OFFS(r_A, r_skb, K);
330 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
331 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
334 /*** Ancillary info loads ***/
335 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
336 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
338 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
341 case BPF_ANC | SKF_AD_IFINDEX:
342 case BPF_ANC | SKF_AD_HATYPE:
343 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
345 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
347 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
349 PPC_CMPDI(r_scratch1, 0);
350 if (ctx->pc_ret0 != -1) {
351 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
353 /* Exit, returning 0; first pass hits here. */
354 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
358 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
359 PPC_LWZ_OFFS(r_A, r_scratch1,
360 offsetof(struct net_device, ifindex));
362 PPC_LHZ_OFFS(r_A, r_scratch1,
363 offsetof(struct net_device, type));
367 case BPF_ANC | SKF_AD_MARK:
368 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
369 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
372 case BPF_ANC | SKF_AD_RXHASH:
373 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
374 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
377 case BPF_ANC | SKF_AD_VLAN_TAG:
378 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
380 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
383 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
384 PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
385 if (PKT_VLAN_PRESENT_BIT)
386 PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT);
387 if (PKT_VLAN_PRESENT_BIT < 7)
388 PPC_ANDI(r_A, r_A, 1);
390 case BPF_ANC | SKF_AD_QUEUE:
391 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
392 queue_mapping) != 2);
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
396 case BPF_ANC | SKF_AD_PKTTYPE:
397 PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
398 PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
399 PPC_SRWI(r_A, r_A, 5);
401 case BPF_ANC | SKF_AD_CPU:
402 PPC_BPF_LOAD_CPU(r_A);
404 /*** Absolute loads from packet header/data ***/
405 case BPF_LD | BPF_W | BPF_ABS:
406 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
408 case BPF_LD | BPF_H | BPF_ABS:
409 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
411 case BPF_LD | BPF_B | BPF_ABS:
412 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
415 ctx->seen |= SEEN_DATAREF;
416 PPC_FUNC_ADDR(r_scratch1, func);
417 PPC_MTLR(r_scratch1);
421 * Helper returns 'lt' condition on error, and an
422 * appropriate return value in r3
424 PPC_BCC(COND_LT, exit_addr);
427 /*** Indirect loads from packet header/data ***/
428 case BPF_LD | BPF_W | BPF_IND:
430 goto common_load_ind;
431 case BPF_LD | BPF_H | BPF_IND:
433 goto common_load_ind;
434 case BPF_LD | BPF_B | BPF_IND:
438 * Load from [X + K]. Negative offsets are tested for
439 * in the helper functions.
441 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
442 PPC_FUNC_ADDR(r_scratch1, func);
443 PPC_MTLR(r_scratch1);
444 PPC_ADDI(r_addr, r_X, IMM_L(K));
446 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
448 /* If error, cr0.LT set */
449 PPC_BCC(COND_LT, exit_addr);
452 case BPF_LDX | BPF_B | BPF_MSH:
453 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
457 /*** Jump and branches ***/
458 case BPF_JMP | BPF_JA:
460 PPC_JMP(addrs[i + 1 + K]);
463 case BPF_JMP | BPF_JGT | BPF_K:
464 case BPF_JMP | BPF_JGT | BPF_X:
467 case BPF_JMP | BPF_JGE | BPF_K:
468 case BPF_JMP | BPF_JGE | BPF_X:
471 case BPF_JMP | BPF_JEQ | BPF_K:
472 case BPF_JMP | BPF_JEQ | BPF_X:
475 case BPF_JMP | BPF_JSET | BPF_K:
476 case BPF_JMP | BPF_JSET | BPF_X:
480 /* same targets, can avoid doing the test :) */
481 if (filter[i].jt == filter[i].jf) {
482 if (filter[i].jt > 0)
483 PPC_JMP(addrs[i + 1 + filter[i].jt]);
488 case BPF_JMP | BPF_JGT | BPF_X:
489 case BPF_JMP | BPF_JGE | BPF_X:
490 case BPF_JMP | BPF_JEQ | BPF_X:
491 ctx->seen |= SEEN_XREG;
494 case BPF_JMP | BPF_JSET | BPF_X:
495 ctx->seen |= SEEN_XREG;
496 PPC_AND_DOT(r_scratch1, r_A, r_X);
498 case BPF_JMP | BPF_JEQ | BPF_K:
499 case BPF_JMP | BPF_JGT | BPF_K:
500 case BPF_JMP | BPF_JGE | BPF_K:
504 PPC_LI32(r_scratch1, K);
505 PPC_CMPLW(r_A, r_scratch1);
508 case BPF_JMP | BPF_JSET | BPF_K:
510 /* PPC_ANDI is /only/ dot-form */
511 PPC_ANDI(r_scratch1, r_A, K);
513 PPC_LI32(r_scratch1, K);
514 PPC_AND_DOT(r_scratch1, r_A,
519 /* Sometimes branches are constructed "backward", with
520 * the false path being the branch and true path being
521 * a fallthrough to the next instruction.
523 if (filter[i].jt == 0)
524 /* Swap the sense of the branch */
525 PPC_BCC(true_cond ^ COND_CMP_TRUE,
526 addrs[i + 1 + filter[i].jf]);
528 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
529 if (filter[i].jf != 0)
530 PPC_JMP(addrs[i + 1 + filter[i].jf]);
534 /* The filter contains something cruel & unusual.
535 * We don't handle it, but also there shouldn't be
536 * anything missing from our list.
538 if (printk_ratelimit())
539 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
545 /* Set end-of-body-code address for exit. */
546 addrs[i] = ctx->idx * 4;
551 void bpf_jit_compile(struct bpf_prog *fp)
553 unsigned int proglen;
554 unsigned int alloclen;
558 struct codegen_context cgctx;
565 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
570 * There are multiple assembly passes as the generated code will change
571 * size as it settles down, figuring out the max branch offsets/exit
574 * The range of standard conditional branches is +/- 32Kbytes. Since
575 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
576 * finish with 8 bytes/instruction. Not feasible, so long jumps are
577 * used, distinct from short branches.
581 * For now, both branch types assemble to 2 words (short branches padded
582 * with a NOP); this is less efficient, but assembly will always complete
583 * after exactly 3 passes:
585 * First pass: No code buffer; Program is "faux-generated" -- no code
586 * emitted but maximum size of output determined (and addrs[] filled
587 * in). Also, we note whether we use M[], whether we use skb data, etc.
588 * All generation choices assumed to be 'worst-case', e.g. branches all
589 * far (2 instructions), return path code reduction not available, etc.
591 * Second pass: Code buffer allocated with size determined previously.
592 * Prologue generated to support features we have seen used. Exit paths
593 * determined and addrs[] is filled in again, as code may be slightly
594 * smaller as a result.
596 * Third pass: Code generated 'for real', and branch destinations
597 * determined from now-accurate addrs[] map.
601 * If we optimise this, near branches will be shorter. On the
602 * first assembly pass, we should err on the side of caution and
603 * generate the biggest code. On subsequent passes, branches will be
604 * generated short or long and code size will reduce. With smaller
605 * code, more branches may fall into the short category, and code will
608 * Finally, if we see one pass generate code the same size as the
609 * previous pass we have converged and should now generate code for
610 * real. Allocating at the end will also save the memory that would
611 * otherwise be wasted by the (small) current code shrinkage.
612 * Preferably, we should do a small number of passes (e.g. 5) and if we
613 * haven't converged by then, get impatient and force code to generate
614 * as-is, even if the odd branch would be left long. The chances of a
615 * long jump are tiny with all but the most enormous of BPF filter
616 * inputs, so we should usually converge on the third pass.
622 /* Scouting faux-generate pass 0 */
623 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
624 /* We hit something illegal or unsupported. */
628 * Pretend to build prologue, given the features we've seen. This will
629 * update ctgtx.idx as it pretends to output instructions, then we can
630 * calculate total size from idx.
632 bpf_jit_build_prologue(fp, 0, &cgctx);
633 bpf_jit_build_epilogue(0, &cgctx);
635 proglen = cgctx.idx * 4;
636 alloclen = proglen + FUNCTION_DESCR_SIZE;
637 image = module_alloc(alloclen);
641 code_base = image + (FUNCTION_DESCR_SIZE/4);
643 /* Code generation passes 1-2 */
644 for (pass = 1; pass < 3; pass++) {
645 /* Now build the prologue, body code & epilogue for real. */
647 bpf_jit_build_prologue(fp, code_base, &cgctx);
648 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
649 bpf_jit_build_epilogue(code_base, &cgctx);
651 if (bpf_jit_enable > 1)
652 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
653 proglen - (cgctx.idx * 4), cgctx.seen);
656 if (bpf_jit_enable > 1)
657 /* Note that we output the base address of the code_base
658 * rather than image, since opcodes are in code_base.
660 bpf_jit_dump(flen, proglen, pass, code_base);
662 bpf_flush_icache(code_base, code_base + (proglen/4));
665 /* Function descriptor nastiness: Address + TOC */
666 ((u64 *)image)[0] = (u64)code_base;
667 ((u64 *)image)[1] = local_paca->kernel_toc;
670 fp->bpf_func = (void *)image;
678 void bpf_jit_free(struct bpf_prog *fp)
681 module_memfree(fp->bpf_func);
683 bpf_prog_unlock_free(fp);