1 /* bpf_jit_comp.c: BPF JIT compiler
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
13 #include <linux/moduleloader.h>
14 #include <asm/cacheflush.h>
15 #include <linux/netdevice.h>
16 #include <linux/filter.h>
17 #include <linux/if_vlan.h>
21 int bpf_jit_enable __read_mostly;
23 static inline void bpf_flush_icache(void *start, void *end)
26 flush_icache_range((unsigned long)start, (unsigned long)end);
29 static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
30 struct codegen_context *ctx)
33 const struct sock_filter *filter = fp->insns;
35 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
37 if (ctx->seen & SEEN_DATAREF) {
38 /* If we call any helpers (for loads), save LR */
39 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
40 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
42 /* Back up non-volatile regs. */
43 PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
44 PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
46 if (ctx->seen & SEEN_MEM) {
48 * Conditionally save regs r15-r31 as some will be used
51 for (i = r_M; i < (r_M+16); i++) {
52 if (ctx->seen & (1 << (i-r_M)))
53 PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
56 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
59 if (ctx->seen & SEEN_DATAREF) {
61 * If this filter needs to access skb data,
62 * prepare r_D and r_HL:
63 * r_HL = skb->len - skb->data_len
66 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69 PPC_SUB(r_HL, r_HL, r_scratch1);
70 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
73 if (ctx->seen & SEEN_XREG) {
75 * TODO: Could also detect whether first instr. sets X and
76 * avoid this (as below, with A).
81 switch (filter[0].code) {
83 case BPF_LD | BPF_W | BPF_LEN:
84 case BPF_LD | BPF_W | BPF_ABS:
85 case BPF_LD | BPF_H | BPF_ABS:
86 case BPF_LD | BPF_B | BPF_ABS:
87 /* first instruction sets A register (or is RET 'constant') */
90 /* make sure we dont leak kernel information to user */
95 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
99 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
100 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
101 if (ctx->seen & SEEN_DATAREF) {
102 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
104 PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
105 PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
107 if (ctx->seen & SEEN_MEM) {
108 /* Restore any saved non-vol registers */
109 for (i = r_M; i < (r_M+16); i++) {
110 if (ctx->seen & (1 << (i-r_M)))
111 PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
115 /* The RETs have left a return value in R3. */
120 #define CHOOSE_LOAD_FUNC(K, func) \
121 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
123 /* Assemble the body code between the prologue & epilogue. */
124 static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
125 struct codegen_context *ctx,
128 const struct sock_filter *filter = fp->insns;
131 unsigned int true_cond;
134 /* Start of epilogue code */
135 unsigned int exit_addr = addrs[flen];
137 for (i = 0; i < flen; i++) {
138 unsigned int K = filter[i].k;
139 u16 code = bpf_anc_helper(&filter[i]);
142 * addrs[] maps a BPF bytecode address into a real offset from
143 * the start of the body code.
145 addrs[i] = ctx->idx * 4;
149 case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
150 ctx->seen |= SEEN_XREG;
151 PPC_ADD(r_A, r_A, r_X);
153 case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
156 PPC_ADDI(r_A, r_A, IMM_L(K));
158 PPC_ADDIS(r_A, r_A, IMM_HA(K));
160 case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
161 ctx->seen |= SEEN_XREG;
162 PPC_SUB(r_A, r_A, r_X);
164 case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
167 PPC_ADDI(r_A, r_A, IMM_L(-K));
169 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
171 case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
172 ctx->seen |= SEEN_XREG;
173 PPC_MUL(r_A, r_A, r_X);
175 case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
177 PPC_MULI(r_A, r_A, K);
179 PPC_LI32(r_scratch1, K);
180 PPC_MUL(r_A, r_A, r_scratch1);
183 case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
184 case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
185 ctx->seen |= SEEN_XREG;
187 if (ctx->pc_ret0 != -1) {
188 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
190 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
194 if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
195 PPC_DIVWU(r_scratch1, r_A, r_X);
196 PPC_MUL(r_scratch1, r_X, r_scratch1);
197 PPC_SUB(r_A, r_A, r_scratch1);
199 PPC_DIVWU(r_A, r_A, r_X);
202 case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
203 PPC_LI32(r_scratch2, K);
204 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
205 PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
206 PPC_SUB(r_A, r_A, r_scratch1);
208 case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
211 PPC_LI32(r_scratch1, K);
212 PPC_DIVWU(r_A, r_A, r_scratch1);
214 case BPF_ALU | BPF_AND | BPF_X:
215 ctx->seen |= SEEN_XREG;
216 PPC_AND(r_A, r_A, r_X);
218 case BPF_ALU | BPF_AND | BPF_K:
220 PPC_ANDI(r_A, r_A, K);
222 PPC_LI32(r_scratch1, K);
223 PPC_AND(r_A, r_A, r_scratch1);
226 case BPF_ALU | BPF_OR | BPF_X:
227 ctx->seen |= SEEN_XREG;
228 PPC_OR(r_A, r_A, r_X);
230 case BPF_ALU | BPF_OR | BPF_K:
232 PPC_ORI(r_A, r_A, IMM_L(K));
234 PPC_ORIS(r_A, r_A, IMM_H(K));
236 case BPF_ANC | SKF_AD_ALU_XOR_X:
237 case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
238 ctx->seen |= SEEN_XREG;
239 PPC_XOR(r_A, r_A, r_X);
241 case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
243 PPC_XORI(r_A, r_A, IMM_L(K));
245 PPC_XORIS(r_A, r_A, IMM_H(K));
247 case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
248 ctx->seen |= SEEN_XREG;
249 PPC_SLW(r_A, r_A, r_X);
251 case BPF_ALU | BPF_LSH | BPF_K:
255 PPC_SLWI(r_A, r_A, K);
257 case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
258 ctx->seen |= SEEN_XREG;
259 PPC_SRW(r_A, r_A, r_X);
261 case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
265 PPC_SRWI(r_A, r_A, K);
267 case BPF_ALU | BPF_NEG:
270 case BPF_RET | BPF_K:
273 if (ctx->pc_ret0 == -1)
277 * If this isn't the very last instruction, branch to
278 * the epilogue if we've stuff to clean up. Otherwise,
279 * if there's nothing to tidy, just return. If we /are/
280 * the last instruction, we're about to fall through to
281 * the epilogue to return.
285 * Note: 'seen' is properly valid only on pass
286 * #2. Both parts of this conditional are the
287 * same instruction size though, meaning the
288 * first pass will still correctly determine the
289 * code size/addresses.
297 case BPF_RET | BPF_A:
306 case BPF_MISC | BPF_TAX: /* X = A */
309 case BPF_MISC | BPF_TXA: /* A = X */
310 ctx->seen |= SEEN_XREG;
314 /*** Constant loads/M[] access ***/
315 case BPF_LD | BPF_IMM: /* A = K */
318 case BPF_LDX | BPF_IMM: /* X = K */
321 case BPF_LD | BPF_MEM: /* A = mem[K] */
322 PPC_MR(r_A, r_M + (K & 0xf));
323 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
325 case BPF_LDX | BPF_MEM: /* X = mem[K] */
326 PPC_MR(r_X, r_M + (K & 0xf));
327 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
329 case BPF_ST: /* mem[K] = A */
330 PPC_MR(r_M + (K & 0xf), r_A);
331 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
333 case BPF_STX: /* mem[K] = X */
334 PPC_MR(r_M + (K & 0xf), r_X);
335 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
337 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
338 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
339 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
341 case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
342 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
345 /*** Ancillary info loads ***/
346 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
347 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
349 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
352 case BPF_ANC | SKF_AD_IFINDEX:
353 case BPF_ANC | SKF_AD_HATYPE:
354 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
356 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
358 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
360 PPC_CMPDI(r_scratch1, 0);
361 if (ctx->pc_ret0 != -1) {
362 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
364 /* Exit, returning 0; first pass hits here. */
365 PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
369 if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
370 PPC_LWZ_OFFS(r_A, r_scratch1,
371 offsetof(struct net_device, ifindex));
373 PPC_LHZ_OFFS(r_A, r_scratch1,
374 offsetof(struct net_device, type));
378 case BPF_ANC | SKF_AD_MARK:
379 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
380 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
383 case BPF_ANC | SKF_AD_RXHASH:
384 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
385 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
388 case BPF_ANC | SKF_AD_VLAN_TAG:
389 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
390 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
391 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
395 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
396 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
398 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
399 PPC_SRWI(r_A, r_A, 12);
402 case BPF_ANC | SKF_AD_QUEUE:
403 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
404 queue_mapping) != 2);
405 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
408 case BPF_ANC | SKF_AD_PKTTYPE:
409 PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
410 PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
411 PPC_SRWI(r_A, r_A, 5);
413 case BPF_ANC | SKF_AD_CPU:
414 PPC_BPF_LOAD_CPU(r_A);
416 /*** Absolute loads from packet header/data ***/
417 case BPF_LD | BPF_W | BPF_ABS:
418 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
420 case BPF_LD | BPF_H | BPF_ABS:
421 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
423 case BPF_LD | BPF_B | BPF_ABS:
424 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
427 ctx->seen |= SEEN_DATAREF;
428 PPC_FUNC_ADDR(r_scratch1, func);
429 PPC_MTLR(r_scratch1);
433 * Helper returns 'lt' condition on error, and an
434 * appropriate return value in r3
436 PPC_BCC(COND_LT, exit_addr);
439 /*** Indirect loads from packet header/data ***/
440 case BPF_LD | BPF_W | BPF_IND:
442 goto common_load_ind;
443 case BPF_LD | BPF_H | BPF_IND:
445 goto common_load_ind;
446 case BPF_LD | BPF_B | BPF_IND:
450 * Load from [X + K]. Negative offsets are tested for
451 * in the helper functions.
453 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
454 PPC_FUNC_ADDR(r_scratch1, func);
455 PPC_MTLR(r_scratch1);
456 PPC_ADDI(r_addr, r_X, IMM_L(K));
458 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
460 /* If error, cr0.LT set */
461 PPC_BCC(COND_LT, exit_addr);
464 case BPF_LDX | BPF_B | BPF_MSH:
465 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
469 /*** Jump and branches ***/
470 case BPF_JMP | BPF_JA:
472 PPC_JMP(addrs[i + 1 + K]);
475 case BPF_JMP | BPF_JGT | BPF_K:
476 case BPF_JMP | BPF_JGT | BPF_X:
479 case BPF_JMP | BPF_JGE | BPF_K:
480 case BPF_JMP | BPF_JGE | BPF_X:
483 case BPF_JMP | BPF_JEQ | BPF_K:
484 case BPF_JMP | BPF_JEQ | BPF_X:
487 case BPF_JMP | BPF_JSET | BPF_K:
488 case BPF_JMP | BPF_JSET | BPF_X:
492 /* same targets, can avoid doing the test :) */
493 if (filter[i].jt == filter[i].jf) {
494 if (filter[i].jt > 0)
495 PPC_JMP(addrs[i + 1 + filter[i].jt]);
500 case BPF_JMP | BPF_JGT | BPF_X:
501 case BPF_JMP | BPF_JGE | BPF_X:
502 case BPF_JMP | BPF_JEQ | BPF_X:
503 ctx->seen |= SEEN_XREG;
506 case BPF_JMP | BPF_JSET | BPF_X:
507 ctx->seen |= SEEN_XREG;
508 PPC_AND_DOT(r_scratch1, r_A, r_X);
510 case BPF_JMP | BPF_JEQ | BPF_K:
511 case BPF_JMP | BPF_JGT | BPF_K:
512 case BPF_JMP | BPF_JGE | BPF_K:
516 PPC_LI32(r_scratch1, K);
517 PPC_CMPLW(r_A, r_scratch1);
520 case BPF_JMP | BPF_JSET | BPF_K:
522 /* PPC_ANDI is /only/ dot-form */
523 PPC_ANDI(r_scratch1, r_A, K);
525 PPC_LI32(r_scratch1, K);
526 PPC_AND_DOT(r_scratch1, r_A,
531 /* Sometimes branches are constructed "backward", with
532 * the false path being the branch and true path being
533 * a fallthrough to the next instruction.
535 if (filter[i].jt == 0)
536 /* Swap the sense of the branch */
537 PPC_BCC(true_cond ^ COND_CMP_TRUE,
538 addrs[i + 1 + filter[i].jf]);
540 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
541 if (filter[i].jf != 0)
542 PPC_JMP(addrs[i + 1 + filter[i].jf]);
546 /* The filter contains something cruel & unusual.
547 * We don't handle it, but also there shouldn't be
548 * anything missing from our list.
550 if (printk_ratelimit())
551 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
557 /* Set end-of-body-code address for exit. */
558 addrs[i] = ctx->idx * 4;
563 void bpf_jit_compile(struct bpf_prog *fp)
565 unsigned int proglen;
566 unsigned int alloclen;
570 struct codegen_context cgctx;
577 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
582 * There are multiple assembly passes as the generated code will change
583 * size as it settles down, figuring out the max branch offsets/exit
586 * The range of standard conditional branches is +/- 32Kbytes. Since
587 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
588 * finish with 8 bytes/instruction. Not feasible, so long jumps are
589 * used, distinct from short branches.
593 * For now, both branch types assemble to 2 words (short branches padded
594 * with a NOP); this is less efficient, but assembly will always complete
595 * after exactly 3 passes:
597 * First pass: No code buffer; Program is "faux-generated" -- no code
598 * emitted but maximum size of output determined (and addrs[] filled
599 * in). Also, we note whether we use M[], whether we use skb data, etc.
600 * All generation choices assumed to be 'worst-case', e.g. branches all
601 * far (2 instructions), return path code reduction not available, etc.
603 * Second pass: Code buffer allocated with size determined previously.
604 * Prologue generated to support features we have seen used. Exit paths
605 * determined and addrs[] is filled in again, as code may be slightly
606 * smaller as a result.
608 * Third pass: Code generated 'for real', and branch destinations
609 * determined from now-accurate addrs[] map.
613 * If we optimise this, near branches will be shorter. On the
614 * first assembly pass, we should err on the side of caution and
615 * generate the biggest code. On subsequent passes, branches will be
616 * generated short or long and code size will reduce. With smaller
617 * code, more branches may fall into the short category, and code will
620 * Finally, if we see one pass generate code the same size as the
621 * previous pass we have converged and should now generate code for
622 * real. Allocating at the end will also save the memory that would
623 * otherwise be wasted by the (small) current code shrinkage.
624 * Preferably, we should do a small number of passes (e.g. 5) and if we
625 * haven't converged by then, get impatient and force code to generate
626 * as-is, even if the odd branch would be left long. The chances of a
627 * long jump are tiny with all but the most enormous of BPF filter
628 * inputs, so we should usually converge on the third pass.
634 /* Scouting faux-generate pass 0 */
635 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
636 /* We hit something illegal or unsupported. */
640 * Pretend to build prologue, given the features we've seen. This will
641 * update ctgtx.idx as it pretends to output instructions, then we can
642 * calculate total size from idx.
644 bpf_jit_build_prologue(fp, 0, &cgctx);
645 bpf_jit_build_epilogue(0, &cgctx);
647 proglen = cgctx.idx * 4;
648 alloclen = proglen + FUNCTION_DESCR_SIZE;
649 image = module_alloc(alloclen);
653 code_base = image + (FUNCTION_DESCR_SIZE/4);
655 /* Code generation passes 1-2 */
656 for (pass = 1; pass < 3; pass++) {
657 /* Now build the prologue, body code & epilogue for real. */
659 bpf_jit_build_prologue(fp, code_base, &cgctx);
660 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
661 bpf_jit_build_epilogue(code_base, &cgctx);
663 if (bpf_jit_enable > 1)
664 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
665 proglen - (cgctx.idx * 4), cgctx.seen);
668 if (bpf_jit_enable > 1)
669 /* Note that we output the base address of the code_base
670 * rather than image, since opcodes are in code_base.
672 bpf_jit_dump(flen, proglen, pass, code_base);
675 bpf_flush_icache(code_base, code_base + (proglen/4));
677 /* Function descriptor nastiness: Address + TOC */
678 ((u64 *)image)[0] = (u64)code_base;
679 ((u64 *)image)[1] = local_paca->kernel_toc;
681 fp->bpf_func = (void *)image;
689 void bpf_jit_free(struct bpf_prog *fp)
692 module_memfree(fp->bpf_func);
694 bpf_prog_unlock_free(fp);