]> Git Repo - linux.git/blob - arch/powerpc/net/bpf_jit.h
smpboot: Mark idle_init() as __always_inlined to work around aggressive compiler...
[linux.git] / arch / powerpc / net / bpf_jit.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * bpf_jit.h: BPF JIT compiler for PPC
4  *
5  * Copyright 2011 Matt Evans <[email protected]>, IBM Corporation
6  *           2016 Naveen N. Rao <[email protected]>
7  */
8 #ifndef _BPF_JIT_H
9 #define _BPF_JIT_H
10
11 #ifndef __ASSEMBLY__
12
13 #include <asm/types.h>
14 #include <asm/ppc-opcode.h>
15
16 #ifdef PPC64_ELF_ABI_v1
17 #define FUNCTION_DESCR_SIZE     24
18 #else
19 #define FUNCTION_DESCR_SIZE     0
20 #endif
21
22 #define PLANT_INSTR(d, idx, instr)                                            \
23         do { if (d) { (d)[idx] = instr; } idx++; } while (0)
24 #define EMIT(instr)             PLANT_INSTR(image, ctx->idx, instr)
25
26 /* Long jump; (unconditional 'branch') */
27 #define PPC_JMP(dest)           EMIT(PPC_INST_BRANCH |                        \
28                                      (((dest) - (ctx->idx * 4)) & 0x03fffffc))
29 /* blr; (unconditional 'branch' with link) to absolute address */
30 #define PPC_BL_ABS(dest)        EMIT(PPC_INST_BL |                            \
31                                      (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
32 /* "cond" here covers BO:BI fields. */
33 #define PPC_BCC_SHORT(cond, dest)       EMIT(PPC_INST_BRANCH_COND |           \
34                                              (((cond) & 0x3ff) << 16) |       \
35                                              (((dest) - (ctx->idx * 4)) &     \
36                                               0xfffc))
37 /* Sign-extended 32-bit immediate load */
38 #define PPC_LI32(d, i)          do {                                          \
39                 if ((int)(uintptr_t)(i) >= -32768 &&                          \
40                                 (int)(uintptr_t)(i) < 32768)                  \
41                         EMIT(PPC_RAW_LI(d, i));                               \
42                 else {                                                        \
43                         EMIT(PPC_RAW_LIS(d, IMM_H(i)));                       \
44                         if (IMM_L(i))                                         \
45                                 EMIT(PPC_RAW_ORI(d, d, IMM_L(i)));            \
46                 } } while(0)
47
48 #ifdef CONFIG_PPC32
49 #define PPC_EX32(r, i)          EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
50 #endif
51
52 #define PPC_LI64(d, i)          do {                                          \
53                 if ((long)(i) >= -2147483648 &&                               \
54                                 (long)(i) < 2147483648)                       \
55                         PPC_LI32(d, i);                                       \
56                 else {                                                        \
57                         if (!((uintptr_t)(i) & 0xffff800000000000ULL))        \
58                                 EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) &   \
59                                                 0xffff));                     \
60                         else {                                                \
61                                 EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \
62                                 if ((uintptr_t)(i) & 0x0000ffff00000000ULL)   \
63                                         EMIT(PPC_RAW_ORI(d, d,                \
64                                           ((uintptr_t)(i) >> 32) & 0xffff));  \
65                         }                                                     \
66                         EMIT(PPC_RAW_SLDI(d, d, 32));                         \
67                         if ((uintptr_t)(i) & 0x00000000ffff0000ULL)           \
68                                 EMIT(PPC_RAW_ORIS(d, d,                       \
69                                          ((uintptr_t)(i) >> 16) & 0xffff));   \
70                         if ((uintptr_t)(i) & 0x000000000000ffffULL)           \
71                                 EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) &       \
72                                                         0xffff));             \
73                 } } while (0)
74
75 #ifdef CONFIG_PPC64
76 #define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
77 #else
78 #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
79 #endif
80
81 static inline bool is_nearbranch(int offset)
82 {
83         return (offset < 32768) && (offset >= -32768);
84 }
85
86 /*
87  * The fly in the ointment of code size changing from pass to pass is
88  * avoided by padding the short branch case with a NOP.  If code size differs
89  * with different branch reaches we will have the issue of code moving from
90  * one pass to the next and will need a few passes to converge on a stable
91  * state.
92  */
93 #define PPC_BCC(cond, dest)     do {                                          \
94                 if (is_nearbranch((dest) - (ctx->idx * 4))) {                 \
95                         PPC_BCC_SHORT(cond, dest);                            \
96                         EMIT(PPC_RAW_NOP());                                  \
97                 } else {                                                      \
98                         /* Flip the 'T or F' bit to invert comparison */      \
99                         PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4);  \
100                         PPC_JMP(dest);                                        \
101                 } } while(0)
102
103 /* To create a branch condition, select a bit of cr0... */
104 #define CR0_LT          0
105 #define CR0_GT          1
106 #define CR0_EQ          2
107 /* ...and modify BO[3] */
108 #define COND_CMP_TRUE   0x100
109 #define COND_CMP_FALSE  0x000
110 /* Together, they make all required comparisons: */
111 #define COND_GT         (CR0_GT | COND_CMP_TRUE)
112 #define COND_GE         (CR0_LT | COND_CMP_FALSE)
113 #define COND_EQ         (CR0_EQ | COND_CMP_TRUE)
114 #define COND_NE         (CR0_EQ | COND_CMP_FALSE)
115 #define COND_LT         (CR0_LT | COND_CMP_TRUE)
116 #define COND_LE         (CR0_GT | COND_CMP_FALSE)
117
118 #define SEEN_FUNC       0x20000000 /* might call external helpers */
119 #define SEEN_STACK      0x40000000 /* uses BPF stack */
120 #define SEEN_TAILCALL   0x80000000 /* uses tail calls */
121
122 #define SEEN_VREG_MASK  0x1ff80000 /* Volatile registers r3-r12 */
123 #define SEEN_NVREG_MASK 0x0003ffff /* Non volatile registers r14-r31 */
124
125 #ifdef CONFIG_PPC64
126 extern const int b2p[MAX_BPF_JIT_REG + 2];
127 #else
128 extern const int b2p[MAX_BPF_JIT_REG + 1];
129 #endif
130
131 struct codegen_context {
132         /*
133          * This is used to track register usage as well
134          * as calls to external helpers.
135          * - register usage is tracked with corresponding
136          *   bits (r3-r31)
137          * - rest of the bits can be used to track other
138          *   things -- for now, we use bits 0 to 2
139          *   encoded in SEEN_* macros above
140          */
141         unsigned int seen;
142         unsigned int idx;
143         unsigned int stack_size;
144         int b2p[ARRAY_SIZE(b2p)];
145 };
146
147 static inline void bpf_flush_icache(void *start, void *end)
148 {
149         smp_wmb();      /* smp write barrier */
150         flush_icache_range((unsigned long)start, (unsigned long)end);
151 }
152
153 static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
154 {
155         return ctx->seen & (1 << (31 - i));
156 }
157
158 static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
159 {
160         ctx->seen |= 1 << (31 - i);
161 }
162
163 static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
164 {
165         ctx->seen &= ~(1 << (31 - i));
166 }
167
168 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
169 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
170                        u32 *addrs, bool extra_pass);
171 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
172 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
173 void bpf_jit_realloc_regs(struct codegen_context *ctx);
174
175 #endif
176
177 #endif
This page took 0.04433 seconds and 4 git commands to generate.