]> Git Repo - linux.git/blame - arch/powerpc/net/bpf_jit_comp64.c
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / arch / powerpc / net / bpf_jit_comp64.c
CommitLineData
156d0e29
NR
1/*
2 * bpf_jit_comp64.c: eBPF JIT compiler
3 *
4 * Copyright 2016 Naveen N. Rao <[email protected]>
5 * IBM Corporation
6 *
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14#include <linux/moduleloader.h>
15#include <asm/cacheflush.h>
ec0c464c 16#include <asm/asm-compat.h>
156d0e29
NR
17#include <linux/netdevice.h>
18#include <linux/filter.h>
19#include <linux/if_vlan.h>
20#include <asm/kprobes.h>
ce076141 21#include <linux/bpf.h>
156d0e29
NR
22
23#include "bpf_jit64.h"
24
156d0e29
NR
25static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
26{
6acdc9a6 27 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
156d0e29
NR
28}
29
30static inline void bpf_flush_icache(void *start, void *end)
31{
32 smp_wmb();
33 flush_icache_range((unsigned long)start, (unsigned long)end);
34}
35
36static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
37{
38 return (ctx->seen & (1 << (31 - b2p[i])));
39}
40
41static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
42{
43 ctx->seen |= (1 << (31 - b2p[i]));
44}
45
46static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
47{
48 /*
49 * We only need a stack frame if:
50 * - we call other functions (kernel helpers), or
51 * - the bpf program uses its stack area
52 * The latter condition is deduced from the usage of BPF_REG_FP
53 */
54 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
55}
56
7b847f52
NR
57/*
58 * When not setting up our own stackframe, the redzone usage is:
59 *
60 * [ prev sp ] <-------------
61 * [ ... ] |
62 * sp (r1) ---> [ stack pointer ] --------------
dbf44daf 63 * [ nv gpr save area ] 6*8
7b847f52
NR
64 * [ tail_call_cnt ] 8
65 * [ local_tmp_var ] 8
66 * [ unused red zone ] 208 bytes protected
67 */
68static int bpf_jit_stack_local(struct codegen_context *ctx)
69{
70 if (bpf_has_stack_frame(ctx))
ac0761eb 71 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
7b847f52
NR
72 else
73 return -(BPF_PPC_STACK_SAVE + 16);
74}
75
ce076141
NR
76static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
77{
78 return bpf_jit_stack_local(ctx) + 8;
79}
80
7b847f52
NR
81static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
82{
83 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
ac0761eb
SD
84 return (bpf_has_stack_frame(ctx) ?
85 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
86 - (8 * (32 - reg));
7b847f52
NR
87
88 pr_err("BPF JIT is asking about unknown registers");
89 BUG();
90}
91
ce076141 92static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
156d0e29 93{
ce076141
NR
94 int i;
95
156d0e29 96 /*
ce076141
NR
97 * Initialize tail_call_cnt if we do tail calls.
98 * Otherwise, put in NOPs so that it can be skipped when we are
99 * invoked through a tail call.
156d0e29 100 */
ce076141
NR
101 if (ctx->seen & SEEN_TAILCALL) {
102 PPC_LI(b2p[TMP_REG_1], 0);
103 /* this goes in the redzone */
104 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
105 } else {
106 PPC_NOP();
107 PPC_NOP();
108 }
156d0e29 109
ce076141 110#define BPF_TAILCALL_PROLOGUE_SIZE 8
156d0e29 111
7b847f52 112 if (bpf_has_stack_frame(ctx)) {
156d0e29
NR
113 /*
114 * We need a stack frame, but we don't necessarily need to
115 * save/restore LR unless we call other functions
116 */
117 if (ctx->seen & SEEN_FUNC) {
118 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
119 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
120 }
121
ac0761eb 122 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
156d0e29
NR
123 }
124
125 /*
126 * Back up non-volatile regs -- BPF registers 6-10
127 * If we haven't created our own stack frame, we save these
128 * in the protected zone below the previous stack frame
129 */
130 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
131 if (bpf_is_seen_register(ctx, i))
7b847f52 132 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
156d0e29 133
156d0e29
NR
134 /* Setup frame pointer to point to the bpf stack area */
135 if (bpf_is_seen_register(ctx, BPF_REG_FP))
136 PPC_ADDI(b2p[BPF_REG_FP], 1,
ac0761eb 137 STACK_FRAME_MIN_SIZE + ctx->stack_size);
156d0e29
NR
138}
139
ce076141 140static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
156d0e29
NR
141{
142 int i;
156d0e29 143
156d0e29
NR
144 /* Restore NVRs */
145 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
146 if (bpf_is_seen_register(ctx, i))
7b847f52 147 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
156d0e29 148
156d0e29 149 /* Tear down our stack frame */
7b847f52 150 if (bpf_has_stack_frame(ctx)) {
ac0761eb 151 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
156d0e29
NR
152 if (ctx->seen & SEEN_FUNC) {
153 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
154 PPC_MTLR(0);
155 }
156 }
ce076141
NR
157}
158
159static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
160{
161 bpf_jit_emit_common_epilogue(image, ctx);
162
163 /* Move result to r3 */
164 PPC_MR(3, b2p[BPF_REG_0]);
156d0e29
NR
165
166 PPC_BLR();
167}
168
e2c95a61
DB
169static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
170 u64 func)
171{
172#ifdef PPC64_ELF_ABI_v1
173 /* func points to the function descriptor */
174 PPC_LI64(b2p[TMP_REG_2], func);
175 /* Load actual entry point from function descriptor */
176 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
177 /* ... and move it to LR */
178 PPC_MTLR(b2p[TMP_REG_1]);
179 /*
180 * Load TOC from function descriptor at offset 8.
181 * We can clobber r2 since we get called through a
182 * function pointer (so caller will save/restore r2)
183 * and since we don't use a TOC ourself.
184 */
185 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
186#else
187 /* We can clobber r12 */
188 PPC_FUNC_ADDR(12, func);
189 PPC_MTLR(12);
190#endif
191 PPC_BLRL();
192}
193
194static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
195 u64 func)
ce076141 196{
4ea69b2f
SD
197 unsigned int i, ctx_idx = ctx->idx;
198
199 /* Load function address into r12 */
200 PPC_LI64(12, func);
201
202 /* For bpf-to-bpf function calls, the callee's address is unknown
203 * until the last extra pass. As seen above, we use PPC_LI64() to
204 * load the callee's address, but this may optimize the number of
205 * instructions required based on the nature of the address.
206 *
207 * Since we don't want the number of instructions emitted to change,
208 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
209 * we always have a five-instruction sequence, which is the maximum
210 * that PPC_LI64() can emit.
211 */
212 for (i = ctx->idx - ctx_idx; i < 5; i++)
213 PPC_NOP();
214
ce076141 215#ifdef PPC64_ELF_ABI_v1
ce076141
NR
216 /*
217 * Load TOC from function descriptor at offset 8.
218 * We can clobber r2 since we get called through a
219 * function pointer (so caller will save/restore r2)
220 * and since we don't use a TOC ourself.
221 */
4ea69b2f
SD
222 PPC_BPF_LL(2, 12, 8);
223 /* Load actual entry point from function descriptor */
224 PPC_BPF_LL(12, 12, 0);
ce076141 225#endif
4ea69b2f
SD
226
227 PPC_MTLR(12);
ce076141
NR
228 PPC_BLRL();
229}
230
231static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
232{
233 /*
234 * By now, the eBPF program has already setup parameters in r3, r4 and r5
235 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
236 * r4/BPF_REG_2 - pointer to bpf_array
237 * r5/BPF_REG_3 - index in bpf_array
238 */
239 int b2p_bpf_array = b2p[BPF_REG_2];
240 int b2p_index = b2p[BPF_REG_3];
241
242 /*
243 * if (index >= array->map.max_entries)
244 * goto out;
245 */
246 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
d269176e 247 PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
ce076141
NR
248 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
249 PPC_BCC(COND_GE, out);
250
251 /*
252 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
253 * goto out;
254 */
255 PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
256 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
257 PPC_BCC(COND_GT, out);
258
259 /*
260 * tail_call_cnt++;
261 */
262 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
263 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
264
265 /* prog = array->ptrs[index]; */
266 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
267 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
268 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
269
270 /*
271 * if (prog == NULL)
272 * goto out;
273 */
274 PPC_CMPLDI(b2p[TMP_REG_1], 0);
275 PPC_BCC(COND_EQ, out);
276
277 /* goto *(prog->bpf_func + prologue_size); */
278 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
279#ifdef PPC64_ELF_ABI_v1
280 /* skip past the function descriptor */
281 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
282 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
283#else
284 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
285#endif
286 PPC_MTCTR(b2p[TMP_REG_1]);
287
288 /* tear down stack, restore NVRs, ... */
289 bpf_jit_emit_common_epilogue(image, ctx);
290
291 PPC_BCTR();
292 /* out: */
293}
294
156d0e29
NR
295/* Assemble the body code between the prologue & epilogue */
296static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
297 struct codegen_context *ctx,
8484ce83 298 u32 *addrs, bool extra_pass)
156d0e29
NR
299{
300 const struct bpf_insn *insn = fp->insnsi;
301 int flen = fp->len;
e2c95a61 302 int i, ret;
156d0e29
NR
303
304 /* Start of epilogue code - will only be valid 2nd pass onwards */
305 u32 exit_addr = addrs[flen];
306
307 for (i = 0; i < flen; i++) {
308 u32 code = insn[i].code;
309 u32 dst_reg = b2p[insn[i].dst_reg];
310 u32 src_reg = b2p[insn[i].src_reg];
311 s16 off = insn[i].off;
312 s32 imm = insn[i].imm;
e2c95a61
DB
313 bool func_addr_fixed;
314 u64 func_addr;
156d0e29 315 u64 imm64;
156d0e29 316 u32 true_cond;
b9c1e60e 317 u32 tmp_idx;
156d0e29
NR
318
319 /*
320 * addrs[] maps a BPF bytecode address into a real offset from
321 * the start of the body code.
322 */
323 addrs[i] = ctx->idx * 4;
324
325 /*
326 * As an optimization, we note down which non-volatile registers
327 * are used so that we can only save/restore those in our
328 * prologue and epilogue. We do this here regardless of whether
329 * the actual BPF instruction uses src/dst registers or not
330 * (for instance, BPF_CALL does not use them). The expectation
331 * is that those instructions will have src_reg/dst_reg set to
332 * 0. Even otherwise, we just lose some prologue/epilogue
333 * optimization but everything else should work without
334 * any issues.
335 */
7b847f52 336 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
156d0e29 337 bpf_set_seen_register(ctx, insn[i].dst_reg);
7b847f52 338 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
156d0e29
NR
339 bpf_set_seen_register(ctx, insn[i].src_reg);
340
341 switch (code) {
342 /*
343 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
344 */
345 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
346 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
347 PPC_ADD(dst_reg, dst_reg, src_reg);
348 goto bpf_alu32_trunc;
349 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
350 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
351 PPC_SUB(dst_reg, dst_reg, src_reg);
352 goto bpf_alu32_trunc;
353 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
354 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
355 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
356 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
357 if (BPF_OP(code) == BPF_SUB)
358 imm = -imm;
359 if (imm) {
360 if (imm >= -32768 && imm < 32768)
361 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
362 else {
363 PPC_LI32(b2p[TMP_REG_1], imm);
364 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
365 }
366 }
367 goto bpf_alu32_trunc;
368 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
369 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
370 if (BPF_CLASS(code) == BPF_ALU)
371 PPC_MULW(dst_reg, dst_reg, src_reg);
372 else
373 PPC_MULD(dst_reg, dst_reg, src_reg);
374 goto bpf_alu32_trunc;
375 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
376 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
377 if (imm >= -32768 && imm < 32768)
378 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
379 else {
380 PPC_LI32(b2p[TMP_REG_1], imm);
381 if (BPF_CLASS(code) == BPF_ALU)
382 PPC_MULW(dst_reg, dst_reg,
383 b2p[TMP_REG_1]);
384 else
385 PPC_MULD(dst_reg, dst_reg,
386 b2p[TMP_REG_1]);
387 }
388 goto bpf_alu32_trunc;
389 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
390 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
156d0e29
NR
391 if (BPF_OP(code) == BPF_MOD) {
392 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
393 PPC_MULW(b2p[TMP_REG_1], src_reg,
394 b2p[TMP_REG_1]);
395 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
396 } else
397 PPC_DIVWU(dst_reg, dst_reg, src_reg);
398 goto bpf_alu32_trunc;
399 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
400 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
156d0e29
NR
401 if (BPF_OP(code) == BPF_MOD) {
402 PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
403 PPC_MULD(b2p[TMP_REG_1], src_reg,
404 b2p[TMP_REG_1]);
405 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
406 } else
407 PPC_DIVD(dst_reg, dst_reg, src_reg);
408 break;
409 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
410 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
411 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
412 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
413 if (imm == 0)
414 return -EINVAL;
415 else if (imm == 1)
416 goto bpf_alu32_trunc;
417
418 PPC_LI32(b2p[TMP_REG_1], imm);
419 switch (BPF_CLASS(code)) {
420 case BPF_ALU:
421 if (BPF_OP(code) == BPF_MOD) {
422 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
423 b2p[TMP_REG_1]);
424 PPC_MULW(b2p[TMP_REG_1],
425 b2p[TMP_REG_1],
426 b2p[TMP_REG_2]);
427 PPC_SUB(dst_reg, dst_reg,
428 b2p[TMP_REG_1]);
429 } else
430 PPC_DIVWU(dst_reg, dst_reg,
431 b2p[TMP_REG_1]);
432 break;
433 case BPF_ALU64:
434 if (BPF_OP(code) == BPF_MOD) {
435 PPC_DIVD(b2p[TMP_REG_2], dst_reg,
436 b2p[TMP_REG_1]);
437 PPC_MULD(b2p[TMP_REG_1],
438 b2p[TMP_REG_1],
439 b2p[TMP_REG_2]);
440 PPC_SUB(dst_reg, dst_reg,
441 b2p[TMP_REG_1]);
442 } else
443 PPC_DIVD(dst_reg, dst_reg,
444 b2p[TMP_REG_1]);
445 break;
446 }
447 goto bpf_alu32_trunc;
448 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
449 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
450 PPC_NEG(dst_reg, dst_reg);
451 goto bpf_alu32_trunc;
452
453 /*
454 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
455 */
456 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
457 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
458 PPC_AND(dst_reg, dst_reg, src_reg);
459 goto bpf_alu32_trunc;
460 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
461 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
462 if (!IMM_H(imm))
463 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
464 else {
465 /* Sign-extended */
466 PPC_LI32(b2p[TMP_REG_1], imm);
467 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
468 }
469 goto bpf_alu32_trunc;
470 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
471 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
472 PPC_OR(dst_reg, dst_reg, src_reg);
473 goto bpf_alu32_trunc;
474 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
475 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
476 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
477 /* Sign-extended */
478 PPC_LI32(b2p[TMP_REG_1], imm);
479 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
480 } else {
481 if (IMM_L(imm))
482 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
483 if (IMM_H(imm))
484 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
485 }
486 goto bpf_alu32_trunc;
487 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
488 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
489 PPC_XOR(dst_reg, dst_reg, src_reg);
490 goto bpf_alu32_trunc;
491 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
492 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
493 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
494 /* Sign-extended */
495 PPC_LI32(b2p[TMP_REG_1], imm);
496 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
497 } else {
498 if (IMM_L(imm))
499 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
500 if (IMM_H(imm))
501 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
502 }
503 goto bpf_alu32_trunc;
504 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
505 /* slw clears top 32 bits */
506 PPC_SLW(dst_reg, dst_reg, src_reg);
507 break;
508 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
509 PPC_SLD(dst_reg, dst_reg, src_reg);
510 break;
511 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
512 /* with imm 0, we still need to clear top 32 bits */
513 PPC_SLWI(dst_reg, dst_reg, imm);
514 break;
515 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
516 if (imm != 0)
517 PPC_SLDI(dst_reg, dst_reg, imm);
518 break;
519 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
520 PPC_SRW(dst_reg, dst_reg, src_reg);
521 break;
522 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
523 PPC_SRD(dst_reg, dst_reg, src_reg);
524 break;
525 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
526 PPC_SRWI(dst_reg, dst_reg, imm);
527 break;
528 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
529 if (imm != 0)
530 PPC_SRDI(dst_reg, dst_reg, imm);
531 break;
532 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
533 PPC_SRAD(dst_reg, dst_reg, src_reg);
534 break;
535 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
536 if (imm != 0)
537 PPC_SRADI(dst_reg, dst_reg, imm);
538 break;
539
540 /*
541 * MOV
542 */
543 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
544 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
545 PPC_MR(dst_reg, src_reg);
546 goto bpf_alu32_trunc;
547 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
548 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
549 PPC_LI32(dst_reg, imm);
550 if (imm < 0)
551 goto bpf_alu32_trunc;
552 break;
553
554bpf_alu32_trunc:
555 /* Truncate to 32-bits */
556 if (BPF_CLASS(code) == BPF_ALU)
557 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
558 break;
559
560 /*
561 * BPF_FROM_BE/LE
562 */
563 case BPF_ALU | BPF_END | BPF_FROM_LE:
564 case BPF_ALU | BPF_END | BPF_FROM_BE:
565#ifdef __BIG_ENDIAN__
566 if (BPF_SRC(code) == BPF_FROM_BE)
567 goto emit_clear;
568#else /* !__BIG_ENDIAN__ */
569 if (BPF_SRC(code) == BPF_FROM_LE)
570 goto emit_clear;
571#endif
572 switch (imm) {
573 case 16:
574 /* Rotate 8 bits left & mask with 0x0000ff00 */
575 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
576 /* Rotate 8 bits right & insert LSB to reg */
577 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
578 /* Move result back to dst_reg */
579 PPC_MR(dst_reg, b2p[TMP_REG_1]);
580 break;
581 case 32:
582 /*
583 * Rotate word left by 8 bits:
584 * 2 bytes are already in their final position
585 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
586 */
587 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
588 /* Rotate 24 bits and insert byte 1 */
589 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
590 /* Rotate 24 bits and insert byte 3 */
591 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
592 PPC_MR(dst_reg, b2p[TMP_REG_1]);
593 break;
594 case 64:
595 /*
596 * Way easier and faster(?) to store the value
597 * into stack and then use ldbrx
598 *
156d0e29
NR
599 * ctx->seen will be reliable in pass2, but
600 * the instructions generated will remain the
601 * same across all passes
602 */
7b847f52
NR
603 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
604 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
156d0e29
NR
605 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
606 break;
607 }
608 break;
609
610emit_clear:
611 switch (imm) {
612 case 16:
613 /* zero-extend 16 bits into 64 bits */
614 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
615 break;
616 case 32:
617 /* zero-extend 32 bits into 64 bits */
618 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
619 break;
620 case 64:
621 /* nop */
622 break;
623 }
624 break;
625
626 /*
627 * BPF_ST(X)
628 */
629 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
630 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
631 if (BPF_CLASS(code) == BPF_ST) {
632 PPC_LI(b2p[TMP_REG_1], imm);
633 src_reg = b2p[TMP_REG_1];
634 }
635 PPC_STB(src_reg, dst_reg, off);
636 break;
637 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
638 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
639 if (BPF_CLASS(code) == BPF_ST) {
640 PPC_LI(b2p[TMP_REG_1], imm);
641 src_reg = b2p[TMP_REG_1];
642 }
643 PPC_STH(src_reg, dst_reg, off);
644 break;
645 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
646 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
647 if (BPF_CLASS(code) == BPF_ST) {
648 PPC_LI32(b2p[TMP_REG_1], imm);
649 src_reg = b2p[TMP_REG_1];
650 }
651 PPC_STW(src_reg, dst_reg, off);
652 break;
653 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
654 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
655 if (BPF_CLASS(code) == BPF_ST) {
656 PPC_LI32(b2p[TMP_REG_1], imm);
657 src_reg = b2p[TMP_REG_1];
658 }
659 PPC_STD(src_reg, dst_reg, off);
660 break;
661
662 /*
663 * BPF_STX XADD (atomic_add)
664 */
665 /* *(u32 *)(dst + off) += src */
666 case BPF_STX | BPF_XADD | BPF_W:
667 /* Get EA into TMP_REG_1 */
668 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
b9c1e60e 669 tmp_idx = ctx->idx * 4;
156d0e29
NR
670 /* load value from memory into TMP_REG_2 */
671 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
672 /* add value from src_reg into this */
673 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
674 /* store result back */
675 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
676 /* we're done if this succeeded */
b9c1e60e 677 PPC_BCC_SHORT(COND_NE, tmp_idx);
156d0e29
NR
678 break;
679 /* *(u64 *)(dst + off) += src */
680 case BPF_STX | BPF_XADD | BPF_DW:
681 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
b9c1e60e 682 tmp_idx = ctx->idx * 4;
156d0e29
NR
683 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
684 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
685 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
b9c1e60e 686 PPC_BCC_SHORT(COND_NE, tmp_idx);
156d0e29
NR
687 break;
688
689 /*
690 * BPF_LDX
691 */
692 /* dst = *(u8 *)(ul) (src + off) */
693 case BPF_LDX | BPF_MEM | BPF_B:
694 PPC_LBZ(dst_reg, src_reg, off);
695 break;
696 /* dst = *(u16 *)(ul) (src + off) */
697 case BPF_LDX | BPF_MEM | BPF_H:
698 PPC_LHZ(dst_reg, src_reg, off);
699 break;
700 /* dst = *(u32 *)(ul) (src + off) */
701 case BPF_LDX | BPF_MEM | BPF_W:
702 PPC_LWZ(dst_reg, src_reg, off);
703 break;
704 /* dst = *(u64 *)(ul) (src + off) */
705 case BPF_LDX | BPF_MEM | BPF_DW:
706 PPC_LD(dst_reg, src_reg, off);
707 break;
708
709 /*
710 * Doubleword load
711 * 16 byte instruction that uses two 'struct bpf_insn'
712 */
713 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
714 imm64 = ((u64)(u32) insn[i].imm) |
715 (((u64)(u32) insn[i+1].imm) << 32);
716 /* Adjust for two bpf instructions */
717 addrs[++i] = ctx->idx * 4;
718 PPC_LI64(dst_reg, imm64);
719 break;
720
721 /*
722 * Return/Exit
723 */
724 case BPF_JMP | BPF_EXIT:
725 /*
726 * If this isn't the very last instruction, branch to
727 * the epilogue. If we _are_ the last instruction,
728 * we'll just fall through to the epilogue.
729 */
730 if (i != flen - 1)
731 PPC_JMP(exit_addr);
732 /* else fall through to the epilogue */
733 break;
734
735 /*
8484ce83 736 * Call kernel helper or bpf function
156d0e29
NR
737 */
738 case BPF_JMP | BPF_CALL:
739 ctx->seen |= SEEN_FUNC;
8484ce83 740
e2c95a61
DB
741 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
742 &func_addr, &func_addr_fixed);
743 if (ret < 0)
744 return ret;
156d0e29 745
e2c95a61
DB
746 if (func_addr_fixed)
747 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
748 else
749 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
156d0e29
NR
750 /* move return value from r3 to BPF_REG_0 */
751 PPC_MR(b2p[BPF_REG_0], 3);
156d0e29
NR
752 break;
753
754 /*
755 * Jumps and branches
756 */
757 case BPF_JMP | BPF_JA:
758 PPC_JMP(addrs[i + 1 + off]);
759 break;
760
761 case BPF_JMP | BPF_JGT | BPF_K:
762 case BPF_JMP | BPF_JGT | BPF_X:
763 case BPF_JMP | BPF_JSGT | BPF_K:
764 case BPF_JMP | BPF_JSGT | BPF_X:
765 true_cond = COND_GT;
766 goto cond_branch;
20dbf5cc
DB
767 case BPF_JMP | BPF_JLT | BPF_K:
768 case BPF_JMP | BPF_JLT | BPF_X:
769 case BPF_JMP | BPF_JSLT | BPF_K:
770 case BPF_JMP | BPF_JSLT | BPF_X:
771 true_cond = COND_LT;
772 goto cond_branch;
156d0e29
NR
773 case BPF_JMP | BPF_JGE | BPF_K:
774 case BPF_JMP | BPF_JGE | BPF_X:
775 case BPF_JMP | BPF_JSGE | BPF_K:
776 case BPF_JMP | BPF_JSGE | BPF_X:
777 true_cond = COND_GE;
778 goto cond_branch;
20dbf5cc
DB
779 case BPF_JMP | BPF_JLE | BPF_K:
780 case BPF_JMP | BPF_JLE | BPF_X:
781 case BPF_JMP | BPF_JSLE | BPF_K:
782 case BPF_JMP | BPF_JSLE | BPF_X:
783 true_cond = COND_LE;
784 goto cond_branch;
156d0e29
NR
785 case BPF_JMP | BPF_JEQ | BPF_K:
786 case BPF_JMP | BPF_JEQ | BPF_X:
787 true_cond = COND_EQ;
788 goto cond_branch;
789 case BPF_JMP | BPF_JNE | BPF_K:
790 case BPF_JMP | BPF_JNE | BPF_X:
791 true_cond = COND_NE;
792 goto cond_branch;
793 case BPF_JMP | BPF_JSET | BPF_K:
794 case BPF_JMP | BPF_JSET | BPF_X:
795 true_cond = COND_NE;
796 /* Fall through */
797
798cond_branch:
799 switch (code) {
800 case BPF_JMP | BPF_JGT | BPF_X:
20dbf5cc 801 case BPF_JMP | BPF_JLT | BPF_X:
156d0e29 802 case BPF_JMP | BPF_JGE | BPF_X:
20dbf5cc 803 case BPF_JMP | BPF_JLE | BPF_X:
156d0e29
NR
804 case BPF_JMP | BPF_JEQ | BPF_X:
805 case BPF_JMP | BPF_JNE | BPF_X:
806 /* unsigned comparison */
807 PPC_CMPLD(dst_reg, src_reg);
808 break;
809 case BPF_JMP | BPF_JSGT | BPF_X:
20dbf5cc 810 case BPF_JMP | BPF_JSLT | BPF_X:
156d0e29 811 case BPF_JMP | BPF_JSGE | BPF_X:
20dbf5cc 812 case BPF_JMP | BPF_JSLE | BPF_X:
156d0e29
NR
813 /* signed comparison */
814 PPC_CMPD(dst_reg, src_reg);
815 break;
816 case BPF_JMP | BPF_JSET | BPF_X:
817 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
818 break;
819 case BPF_JMP | BPF_JNE | BPF_K:
820 case BPF_JMP | BPF_JEQ | BPF_K:
821 case BPF_JMP | BPF_JGT | BPF_K:
20dbf5cc 822 case BPF_JMP | BPF_JLT | BPF_K:
156d0e29 823 case BPF_JMP | BPF_JGE | BPF_K:
20dbf5cc 824 case BPF_JMP | BPF_JLE | BPF_K:
156d0e29
NR
825 /*
826 * Need sign-extended load, so only positive
827 * values can be used as imm in cmpldi
828 */
829 if (imm >= 0 && imm < 32768)
830 PPC_CMPLDI(dst_reg, imm);
831 else {
832 /* sign-extending load */
833 PPC_LI32(b2p[TMP_REG_1], imm);
834 /* ... but unsigned comparison */
835 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
836 }
837 break;
838 case BPF_JMP | BPF_JSGT | BPF_K:
20dbf5cc 839 case BPF_JMP | BPF_JSLT | BPF_K:
156d0e29 840 case BPF_JMP | BPF_JSGE | BPF_K:
20dbf5cc 841 case BPF_JMP | BPF_JSLE | BPF_K:
156d0e29
NR
842 /*
843 * signed comparison, so any 16-bit value
844 * can be used in cmpdi
845 */
846 if (imm >= -32768 && imm < 32768)
847 PPC_CMPDI(dst_reg, imm);
848 else {
849 PPC_LI32(b2p[TMP_REG_1], imm);
850 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
851 }
852 break;
853 case BPF_JMP | BPF_JSET | BPF_K:
854 /* andi does not sign-extend the immediate */
855 if (imm >= 0 && imm < 32768)
856 /* PPC_ANDI is _only/always_ dot-form */
857 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
858 else {
859 PPC_LI32(b2p[TMP_REG_1], imm);
860 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
861 b2p[TMP_REG_1]);
862 }
863 break;
864 }
865 PPC_BCC(true_cond, addrs[i + 1 + off]);
866 break;
867
156d0e29 868 /*
ce076141 869 * Tail call
156d0e29 870 */
71189fa9 871 case BPF_JMP | BPF_TAIL_CALL:
ce076141
NR
872 ctx->seen |= SEEN_TAILCALL;
873 bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
874 break;
156d0e29
NR
875
876 default:
877 /*
878 * The filter contains something cruel & unusual.
879 * We don't handle it, but also there shouldn't be
880 * anything missing from our list.
881 */
882 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
883 code, i);
884 return -ENOTSUPP;
885 }
886 }
887
888 /* Set end-of-body-code address for exit. */
889 addrs[i] = ctx->idx * 4;
890
891 return 0;
892}
893
8484ce83
SD
894struct powerpc64_jit_data {
895 struct bpf_binary_header *header;
896 u32 *addrs;
897 u8 *image;
898 u32 proglen;
899 struct codegen_context ctx;
900};
901
156d0e29
NR
902struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
903{
904 u32 proglen;
905 u32 alloclen;
906 u8 *image = NULL;
907 u32 *code_base;
908 u32 *addrs;
8484ce83 909 struct powerpc64_jit_data *jit_data;
156d0e29
NR
910 struct codegen_context cgctx;
911 int pass;
912 int flen;
913 struct bpf_binary_header *bpf_hdr;
b7b7013c
NR
914 struct bpf_prog *org_fp = fp;
915 struct bpf_prog *tmp_fp;
916 bool bpf_blinded = false;
8484ce83 917 bool extra_pass = false;
156d0e29 918
60b58afc 919 if (!fp->jit_requested)
b7b7013c
NR
920 return org_fp;
921
922 tmp_fp = bpf_jit_blind_constants(org_fp);
923 if (IS_ERR(tmp_fp))
924 return org_fp;
925
926 if (tmp_fp != org_fp) {
927 bpf_blinded = true;
928 fp = tmp_fp;
929 }
156d0e29 930
8484ce83
SD
931 jit_data = fp->aux->jit_data;
932 if (!jit_data) {
933 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
934 if (!jit_data) {
935 fp = org_fp;
936 goto out;
937 }
938 fp->aux->jit_data = jit_data;
939 }
940
156d0e29 941 flen = fp->len;
8484ce83
SD
942 addrs = jit_data->addrs;
943 if (addrs) {
944 cgctx = jit_data->ctx;
945 image = jit_data->image;
946 bpf_hdr = jit_data->header;
947 proglen = jit_data->proglen;
948 alloclen = proglen + FUNCTION_DESCR_SIZE;
949 extra_pass = true;
950 goto skip_init_ctx;
951 }
952
6396bb22 953 addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
b7b7013c
NR
954 if (addrs == NULL) {
955 fp = org_fp;
8484ce83 956 goto out_addrs;
b7b7013c
NR
957 }
958
959 memset(&cgctx, 0, sizeof(struct codegen_context));
156d0e29 960
ac0761eb
SD
961 /* Make sure that the stack is quadword aligned. */
962 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
963
156d0e29 964 /* Scouting faux-generate pass 0 */
8484ce83 965 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
156d0e29 966 /* We hit something illegal or unsupported. */
b7b7013c 967 fp = org_fp;
8484ce83 968 goto out_addrs;
b7b7013c 969 }
156d0e29
NR
970
971 /*
972 * Pretend to build prologue, given the features we've seen. This will
973 * update ctgtx.idx as it pretends to output instructions, then we can
974 * calculate total size from idx.
975 */
976 bpf_jit_build_prologue(0, &cgctx);
977 bpf_jit_build_epilogue(0, &cgctx);
978
979 proglen = cgctx.idx * 4;
980 alloclen = proglen + FUNCTION_DESCR_SIZE;
981
982 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
983 bpf_jit_fill_ill_insns);
b7b7013c
NR
984 if (!bpf_hdr) {
985 fp = org_fp;
8484ce83 986 goto out_addrs;
b7b7013c 987 }
156d0e29 988
8484ce83 989skip_init_ctx:
156d0e29
NR
990 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
991
992 /* Code generation passes 1-2 */
993 for (pass = 1; pass < 3; pass++) {
994 /* Now build the prologue, body code & epilogue for real. */
995 cgctx.idx = 0;
996 bpf_jit_build_prologue(code_base, &cgctx);
8484ce83 997 bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
156d0e29
NR
998 bpf_jit_build_epilogue(code_base, &cgctx);
999
1000 if (bpf_jit_enable > 1)
1001 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1002 proglen - (cgctx.idx * 4), cgctx.seen);
1003 }
1004
1005 if (bpf_jit_enable > 1)
1006 /*
1007 * Note that we output the base address of the code_base
1008 * rather than image, since opcodes are in code_base.
1009 */
1010 bpf_jit_dump(flen, proglen, pass, code_base);
1011
156d0e29 1012#ifdef PPC64_ELF_ABI_v1
052de33c
DB
1013 /* Function descriptor nastiness: Address + TOC */
1014 ((u64 *)image)[0] = (u64)code_base;
1015 ((u64 *)image)[1] = local_paca->kernel_toc;
156d0e29 1016#endif
052de33c
DB
1017
1018 fp->bpf_func = (void *)image;
1019 fp->jited = 1;
783d28dd 1020 fp->jited_len = alloclen;
156d0e29 1021
10528b9c 1022 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
8484ce83
SD
1023 if (!fp->is_func || extra_pass) {
1024out_addrs:
1025 kfree(addrs);
1026 kfree(jit_data);
1027 fp->aux->jit_data = NULL;
1028 } else {
1029 jit_data->addrs = addrs;
1030 jit_data->ctx = cgctx;
1031 jit_data->proglen = proglen;
1032 jit_data->image = image;
1033 jit_data->header = bpf_hdr;
1034 }
156d0e29
NR
1035
1036out:
b7b7013c
NR
1037 if (bpf_blinded)
1038 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1039
156d0e29
NR
1040 return fp;
1041}
1042
74451e66 1043/* Overriding bpf_jit_free() as we don't set images read-only. */
156d0e29
NR
1044void bpf_jit_free(struct bpf_prog *fp)
1045{
1046 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1047 struct bpf_binary_header *bpf_hdr = (void *)addr;
1048
1049 if (fp->jited)
1050 bpf_jit_binary_free(bpf_hdr);
1051
1052 bpf_prog_unlock_free(fp);
1053}
This page took 0.294668 seconds and 4 git commands to generate.