]> Git Repo - linux.git/blob - kernel/bpf/core.c
Btrfs: heuristic: implement sampling logic
[linux.git] / kernel / bpf / core.c
1 /*
2  * Linux Socket Filter - Kernel level socket filtering
3  *
4  * Based on the design of the Berkeley Packet Filter. The new
5  * internal format has been designed by PLUMgrid:
6  *
7  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8  *
9  * Authors:
10  *
11  *      Jay Schulist <[email protected]>
12  *      Alexei Starovoitov <[email protected]>
13  *      Daniel Borkmann <[email protected]>
14  *
15  * This program is free software; you can redistribute it and/or
16  * modify it under the terms of the GNU General Public License
17  * as published by the Free Software Foundation; either version
18  * 2 of the License, or (at your option) any later version.
19  *
20  * Andi Kleen - Fix a few bad bugs and races.
21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22  */
23
24 #include <linux/filter.h>
25 #include <linux/skbuff.h>
26 #include <linux/vmalloc.h>
27 #include <linux/random.h>
28 #include <linux/moduleloader.h>
29 #include <linux/bpf.h>
30 #include <linux/frame.h>
31 #include <linux/rbtree_latch.h>
32 #include <linux/kallsyms.h>
33 #include <linux/rcupdate.h>
34
35 #include <asm/unaligned.h>
36
37 /* Registers */
38 #define BPF_R0  regs[BPF_REG_0]
39 #define BPF_R1  regs[BPF_REG_1]
40 #define BPF_R2  regs[BPF_REG_2]
41 #define BPF_R3  regs[BPF_REG_3]
42 #define BPF_R4  regs[BPF_REG_4]
43 #define BPF_R5  regs[BPF_REG_5]
44 #define BPF_R6  regs[BPF_REG_6]
45 #define BPF_R7  regs[BPF_REG_7]
46 #define BPF_R8  regs[BPF_REG_8]
47 #define BPF_R9  regs[BPF_REG_9]
48 #define BPF_R10 regs[BPF_REG_10]
49
50 /* Named registers */
51 #define DST     regs[insn->dst_reg]
52 #define SRC     regs[insn->src_reg]
53 #define FP      regs[BPF_REG_FP]
54 #define ARG1    regs[BPF_REG_ARG1]
55 #define CTX     regs[BPF_REG_CTX]
56 #define IMM     insn->imm
57
58 /* No hurry in this branch
59  *
60  * Exported for the bpf jit load helper.
61  */
62 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
63 {
64         u8 *ptr = NULL;
65
66         if (k >= SKF_NET_OFF)
67                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
68         else if (k >= SKF_LL_OFF)
69                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
70
71         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
72                 return ptr;
73
74         return NULL;
75 }
76
77 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
78 {
79         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
80         struct bpf_prog_aux *aux;
81         struct bpf_prog *fp;
82
83         size = round_up(size, PAGE_SIZE);
84         fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
85         if (fp == NULL)
86                 return NULL;
87
88         kmemcheck_annotate_bitfield(fp, meta);
89
90         aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
91         if (aux == NULL) {
92                 vfree(fp);
93                 return NULL;
94         }
95
96         fp->pages = size / PAGE_SIZE;
97         fp->aux = aux;
98         fp->aux->prog = fp;
99
100         INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
101
102         return fp;
103 }
104 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
105
106 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
107                                   gfp_t gfp_extra_flags)
108 {
109         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
110         struct bpf_prog *fp;
111         u32 pages, delta;
112         int ret;
113
114         BUG_ON(fp_old == NULL);
115
116         size = round_up(size, PAGE_SIZE);
117         pages = size / PAGE_SIZE;
118         if (pages <= fp_old->pages)
119                 return fp_old;
120
121         delta = pages - fp_old->pages;
122         ret = __bpf_prog_charge(fp_old->aux->user, delta);
123         if (ret)
124                 return NULL;
125
126         fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
127         if (fp == NULL) {
128                 __bpf_prog_uncharge(fp_old->aux->user, delta);
129         } else {
130                 kmemcheck_annotate_bitfield(fp, meta);
131
132                 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
133                 fp->pages = pages;
134                 fp->aux->prog = fp;
135
136                 /* We keep fp->aux from fp_old around in the new
137                  * reallocated structure.
138                  */
139                 fp_old->aux = NULL;
140                 __bpf_prog_free(fp_old);
141         }
142
143         return fp;
144 }
145
146 void __bpf_prog_free(struct bpf_prog *fp)
147 {
148         kfree(fp->aux);
149         vfree(fp);
150 }
151
152 int bpf_prog_calc_tag(struct bpf_prog *fp)
153 {
154         const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
155         u32 raw_size = bpf_prog_tag_scratch_size(fp);
156         u32 digest[SHA_DIGEST_WORDS];
157         u32 ws[SHA_WORKSPACE_WORDS];
158         u32 i, bsize, psize, blocks;
159         struct bpf_insn *dst;
160         bool was_ld_map;
161         u8 *raw, *todo;
162         __be32 *result;
163         __be64 *bits;
164
165         raw = vmalloc(raw_size);
166         if (!raw)
167                 return -ENOMEM;
168
169         sha_init(digest);
170         memset(ws, 0, sizeof(ws));
171
172         /* We need to take out the map fd for the digest calculation
173          * since they are unstable from user space side.
174          */
175         dst = (void *)raw;
176         for (i = 0, was_ld_map = false; i < fp->len; i++) {
177                 dst[i] = fp->insnsi[i];
178                 if (!was_ld_map &&
179                     dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
180                     dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
181                         was_ld_map = true;
182                         dst[i].imm = 0;
183                 } else if (was_ld_map &&
184                            dst[i].code == 0 &&
185                            dst[i].dst_reg == 0 &&
186                            dst[i].src_reg == 0 &&
187                            dst[i].off == 0) {
188                         was_ld_map = false;
189                         dst[i].imm = 0;
190                 } else {
191                         was_ld_map = false;
192                 }
193         }
194
195         psize = bpf_prog_insn_size(fp);
196         memset(&raw[psize], 0, raw_size - psize);
197         raw[psize++] = 0x80;
198
199         bsize  = round_up(psize, SHA_MESSAGE_BYTES);
200         blocks = bsize / SHA_MESSAGE_BYTES;
201         todo   = raw;
202         if (bsize - psize >= sizeof(__be64)) {
203                 bits = (__be64 *)(todo + bsize - sizeof(__be64));
204         } else {
205                 bits = (__be64 *)(todo + bsize + bits_offset);
206                 blocks++;
207         }
208         *bits = cpu_to_be64((psize - 1) << 3);
209
210         while (blocks--) {
211                 sha_transform(digest, todo, ws);
212                 todo += SHA_MESSAGE_BYTES;
213         }
214
215         result = (__force __be32 *)digest;
216         for (i = 0; i < SHA_DIGEST_WORDS; i++)
217                 result[i] = cpu_to_be32(digest[i]);
218         memcpy(fp->tag, result, sizeof(fp->tag));
219
220         vfree(raw);
221         return 0;
222 }
223
224 static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
225 {
226         return BPF_CLASS(insn->code) == BPF_JMP  &&
227                /* Call and Exit are both special jumps with no
228                 * target inside the BPF instruction image.
229                 */
230                BPF_OP(insn->code) != BPF_CALL &&
231                BPF_OP(insn->code) != BPF_EXIT;
232 }
233
234 static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
235 {
236         struct bpf_insn *insn = prog->insnsi;
237         u32 i, insn_cnt = prog->len;
238
239         for (i = 0; i < insn_cnt; i++, insn++) {
240                 if (!bpf_is_jmp_and_has_target(insn))
241                         continue;
242
243                 /* Adjust offset of jmps if we cross boundaries. */
244                 if (i < pos && i + insn->off + 1 > pos)
245                         insn->off += delta;
246                 else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
247                         insn->off -= delta;
248         }
249 }
250
251 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
252                                        const struct bpf_insn *patch, u32 len)
253 {
254         u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
255         struct bpf_prog *prog_adj;
256
257         /* Since our patchlet doesn't expand the image, we're done. */
258         if (insn_delta == 0) {
259                 memcpy(prog->insnsi + off, patch, sizeof(*patch));
260                 return prog;
261         }
262
263         insn_adj_cnt = prog->len + insn_delta;
264
265         /* Several new instructions need to be inserted. Make room
266          * for them. Likely, there's no need for a new allocation as
267          * last page could have large enough tailroom.
268          */
269         prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
270                                     GFP_USER);
271         if (!prog_adj)
272                 return NULL;
273
274         prog_adj->len = insn_adj_cnt;
275
276         /* Patching happens in 3 steps:
277          *
278          * 1) Move over tail of insnsi from next instruction onwards,
279          *    so we can patch the single target insn with one or more
280          *    new ones (patching is always from 1 to n insns, n > 0).
281          * 2) Inject new instructions at the target location.
282          * 3) Adjust branch offsets if necessary.
283          */
284         insn_rest = insn_adj_cnt - off - len;
285
286         memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
287                 sizeof(*patch) * insn_rest);
288         memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
289
290         bpf_adj_branches(prog_adj, off, insn_delta);
291
292         return prog_adj;
293 }
294
295 #ifdef CONFIG_BPF_JIT
296 static __always_inline void
297 bpf_get_prog_addr_region(const struct bpf_prog *prog,
298                          unsigned long *symbol_start,
299                          unsigned long *symbol_end)
300 {
301         const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
302         unsigned long addr = (unsigned long)hdr;
303
304         WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
305
306         *symbol_start = addr;
307         *symbol_end   = addr + hdr->pages * PAGE_SIZE;
308 }
309
310 static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
311 {
312         BUILD_BUG_ON(sizeof("bpf_prog_") +
313                      sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
314
315         sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
316         sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
317         *sym = 0;
318 }
319
320 static __always_inline unsigned long
321 bpf_get_prog_addr_start(struct latch_tree_node *n)
322 {
323         unsigned long symbol_start, symbol_end;
324         const struct bpf_prog_aux *aux;
325
326         aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
327         bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
328
329         return symbol_start;
330 }
331
332 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
333                                           struct latch_tree_node *b)
334 {
335         return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
336 }
337
338 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
339 {
340         unsigned long val = (unsigned long)key;
341         unsigned long symbol_start, symbol_end;
342         const struct bpf_prog_aux *aux;
343
344         aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
345         bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
346
347         if (val < symbol_start)
348                 return -1;
349         if (val >= symbol_end)
350                 return  1;
351
352         return 0;
353 }
354
355 static const struct latch_tree_ops bpf_tree_ops = {
356         .less   = bpf_tree_less,
357         .comp   = bpf_tree_comp,
358 };
359
360 static DEFINE_SPINLOCK(bpf_lock);
361 static LIST_HEAD(bpf_kallsyms);
362 static struct latch_tree_root bpf_tree __cacheline_aligned;
363
364 int bpf_jit_kallsyms __read_mostly;
365
366 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
367 {
368         WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
369         list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
370         latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
371 }
372
373 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
374 {
375         if (list_empty(&aux->ksym_lnode))
376                 return;
377
378         latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
379         list_del_rcu(&aux->ksym_lnode);
380 }
381
382 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
383 {
384         return fp->jited && !bpf_prog_was_classic(fp);
385 }
386
387 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
388 {
389         return list_empty(&fp->aux->ksym_lnode) ||
390                fp->aux->ksym_lnode.prev == LIST_POISON2;
391 }
392
393 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
394 {
395         if (!bpf_prog_kallsyms_candidate(fp) ||
396             !capable(CAP_SYS_ADMIN))
397                 return;
398
399         spin_lock_bh(&bpf_lock);
400         bpf_prog_ksym_node_add(fp->aux);
401         spin_unlock_bh(&bpf_lock);
402 }
403
404 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
405 {
406         if (!bpf_prog_kallsyms_candidate(fp))
407                 return;
408
409         spin_lock_bh(&bpf_lock);
410         bpf_prog_ksym_node_del(fp->aux);
411         spin_unlock_bh(&bpf_lock);
412 }
413
414 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
415 {
416         struct latch_tree_node *n;
417
418         if (!bpf_jit_kallsyms_enabled())
419                 return NULL;
420
421         n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
422         return n ?
423                container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
424                NULL;
425 }
426
427 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
428                                  unsigned long *off, char *sym)
429 {
430         unsigned long symbol_start, symbol_end;
431         struct bpf_prog *prog;
432         char *ret = NULL;
433
434         rcu_read_lock();
435         prog = bpf_prog_kallsyms_find(addr);
436         if (prog) {
437                 bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
438                 bpf_get_prog_name(prog, sym);
439
440                 ret = sym;
441                 if (size)
442                         *size = symbol_end - symbol_start;
443                 if (off)
444                         *off  = addr - symbol_start;
445         }
446         rcu_read_unlock();
447
448         return ret;
449 }
450
451 bool is_bpf_text_address(unsigned long addr)
452 {
453         bool ret;
454
455         rcu_read_lock();
456         ret = bpf_prog_kallsyms_find(addr) != NULL;
457         rcu_read_unlock();
458
459         return ret;
460 }
461
462 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
463                     char *sym)
464 {
465         unsigned long symbol_start, symbol_end;
466         struct bpf_prog_aux *aux;
467         unsigned int it = 0;
468         int ret = -ERANGE;
469
470         if (!bpf_jit_kallsyms_enabled())
471                 return ret;
472
473         rcu_read_lock();
474         list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
475                 if (it++ != symnum)
476                         continue;
477
478                 bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
479                 bpf_get_prog_name(aux->prog, sym);
480
481                 *value = symbol_start;
482                 *type  = BPF_SYM_ELF_TYPE;
483
484                 ret = 0;
485                 break;
486         }
487         rcu_read_unlock();
488
489         return ret;
490 }
491
492 struct bpf_binary_header *
493 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
494                      unsigned int alignment,
495                      bpf_jit_fill_hole_t bpf_fill_ill_insns)
496 {
497         struct bpf_binary_header *hdr;
498         unsigned int size, hole, start;
499
500         /* Most of BPF filters are really small, but if some of them
501          * fill a page, allow at least 128 extra bytes to insert a
502          * random section of illegal instructions.
503          */
504         size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
505         hdr = module_alloc(size);
506         if (hdr == NULL)
507                 return NULL;
508
509         /* Fill space with illegal/arch-dep instructions. */
510         bpf_fill_ill_insns(hdr, size);
511
512         hdr->pages = size / PAGE_SIZE;
513         hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
514                      PAGE_SIZE - sizeof(*hdr));
515         start = (get_random_int() % hole) & ~(alignment - 1);
516
517         /* Leave a random number of instructions before BPF code. */
518         *image_ptr = &hdr->image[start];
519
520         return hdr;
521 }
522
523 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
524 {
525         module_memfree(hdr);
526 }
527
528 /* This symbol is only overridden by archs that have different
529  * requirements than the usual eBPF JITs, f.e. when they only
530  * implement cBPF JIT, do not set images read-only, etc.
531  */
532 void __weak bpf_jit_free(struct bpf_prog *fp)
533 {
534         if (fp->jited) {
535                 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
536
537                 bpf_jit_binary_unlock_ro(hdr);
538                 bpf_jit_binary_free(hdr);
539
540                 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
541         }
542
543         bpf_prog_unlock_free(fp);
544 }
545
546 int bpf_jit_harden __read_mostly;
547
548 static int bpf_jit_blind_insn(const struct bpf_insn *from,
549                               const struct bpf_insn *aux,
550                               struct bpf_insn *to_buff)
551 {
552         struct bpf_insn *to = to_buff;
553         u32 imm_rnd = get_random_int();
554         s16 off;
555
556         BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
557         BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
558
559         if (from->imm == 0 &&
560             (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
561              from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
562                 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
563                 goto out;
564         }
565
566         switch (from->code) {
567         case BPF_ALU | BPF_ADD | BPF_K:
568         case BPF_ALU | BPF_SUB | BPF_K:
569         case BPF_ALU | BPF_AND | BPF_K:
570         case BPF_ALU | BPF_OR  | BPF_K:
571         case BPF_ALU | BPF_XOR | BPF_K:
572         case BPF_ALU | BPF_MUL | BPF_K:
573         case BPF_ALU | BPF_MOV | BPF_K:
574         case BPF_ALU | BPF_DIV | BPF_K:
575         case BPF_ALU | BPF_MOD | BPF_K:
576                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
577                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
578                 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
579                 break;
580
581         case BPF_ALU64 | BPF_ADD | BPF_K:
582         case BPF_ALU64 | BPF_SUB | BPF_K:
583         case BPF_ALU64 | BPF_AND | BPF_K:
584         case BPF_ALU64 | BPF_OR  | BPF_K:
585         case BPF_ALU64 | BPF_XOR | BPF_K:
586         case BPF_ALU64 | BPF_MUL | BPF_K:
587         case BPF_ALU64 | BPF_MOV | BPF_K:
588         case BPF_ALU64 | BPF_DIV | BPF_K:
589         case BPF_ALU64 | BPF_MOD | BPF_K:
590                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
591                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
592                 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
593                 break;
594
595         case BPF_JMP | BPF_JEQ  | BPF_K:
596         case BPF_JMP | BPF_JNE  | BPF_K:
597         case BPF_JMP | BPF_JGT  | BPF_K:
598         case BPF_JMP | BPF_JLT  | BPF_K:
599         case BPF_JMP | BPF_JGE  | BPF_K:
600         case BPF_JMP | BPF_JLE  | BPF_K:
601         case BPF_JMP | BPF_JSGT | BPF_K:
602         case BPF_JMP | BPF_JSLT | BPF_K:
603         case BPF_JMP | BPF_JSGE | BPF_K:
604         case BPF_JMP | BPF_JSLE | BPF_K:
605         case BPF_JMP | BPF_JSET | BPF_K:
606                 /* Accommodate for extra offset in case of a backjump. */
607                 off = from->off;
608                 if (off < 0)
609                         off -= 2;
610                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
611                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
612                 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
613                 break;
614
615         case BPF_LD | BPF_ABS | BPF_W:
616         case BPF_LD | BPF_ABS | BPF_H:
617         case BPF_LD | BPF_ABS | BPF_B:
618                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
619                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
620                 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
621                 break;
622
623         case BPF_LD | BPF_IND | BPF_W:
624         case BPF_LD | BPF_IND | BPF_H:
625         case BPF_LD | BPF_IND | BPF_B:
626                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
627                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
628                 *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
629                 *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
630                 break;
631
632         case BPF_LD | BPF_IMM | BPF_DW:
633                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
634                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
635                 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
636                 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
637                 break;
638         case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
639                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
640                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
641                 *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
642                 break;
643
644         case BPF_ST | BPF_MEM | BPF_DW:
645         case BPF_ST | BPF_MEM | BPF_W:
646         case BPF_ST | BPF_MEM | BPF_H:
647         case BPF_ST | BPF_MEM | BPF_B:
648                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
649                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
650                 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
651                 break;
652         }
653 out:
654         return to - to_buff;
655 }
656
657 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
658                                               gfp_t gfp_extra_flags)
659 {
660         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
661         struct bpf_prog *fp;
662
663         fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
664         if (fp != NULL) {
665                 kmemcheck_annotate_bitfield(fp, meta);
666
667                 /* aux->prog still points to the fp_other one, so
668                  * when promoting the clone to the real program,
669                  * this still needs to be adapted.
670                  */
671                 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
672         }
673
674         return fp;
675 }
676
677 static void bpf_prog_clone_free(struct bpf_prog *fp)
678 {
679         /* aux was stolen by the other clone, so we cannot free
680          * it from this path! It will be freed eventually by the
681          * other program on release.
682          *
683          * At this point, we don't need a deferred release since
684          * clone is guaranteed to not be locked.
685          */
686         fp->aux = NULL;
687         __bpf_prog_free(fp);
688 }
689
690 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
691 {
692         /* We have to repoint aux->prog to self, as we don't
693          * know whether fp here is the clone or the original.
694          */
695         fp->aux->prog = fp;
696         bpf_prog_clone_free(fp_other);
697 }
698
699 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
700 {
701         struct bpf_insn insn_buff[16], aux[2];
702         struct bpf_prog *clone, *tmp;
703         int insn_delta, insn_cnt;
704         struct bpf_insn *insn;
705         int i, rewritten;
706
707         if (!bpf_jit_blinding_enabled())
708                 return prog;
709
710         clone = bpf_prog_clone_create(prog, GFP_USER);
711         if (!clone)
712                 return ERR_PTR(-ENOMEM);
713
714         insn_cnt = clone->len;
715         insn = clone->insnsi;
716
717         for (i = 0; i < insn_cnt; i++, insn++) {
718                 /* We temporarily need to hold the original ld64 insn
719                  * so that we can still access the first part in the
720                  * second blinding run.
721                  */
722                 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
723                     insn[1].code == 0)
724                         memcpy(aux, insn, sizeof(aux));
725
726                 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
727                 if (!rewritten)
728                         continue;
729
730                 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
731                 if (!tmp) {
732                         /* Patching may have repointed aux->prog during
733                          * realloc from the original one, so we need to
734                          * fix it up here on error.
735                          */
736                         bpf_jit_prog_release_other(prog, clone);
737                         return ERR_PTR(-ENOMEM);
738                 }
739
740                 clone = tmp;
741                 insn_delta = rewritten - 1;
742
743                 /* Walk new program and skip insns we just inserted. */
744                 insn = clone->insnsi + i + insn_delta;
745                 insn_cnt += insn_delta;
746                 i        += insn_delta;
747         }
748
749         return clone;
750 }
751 #endif /* CONFIG_BPF_JIT */
752
753 /* Base function for offset calculation. Needs to go into .text section,
754  * therefore keeping it non-static as well; will also be used by JITs
755  * anyway later on, so do not let the compiler omit it.
756  */
757 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
758 {
759         return 0;
760 }
761 EXPORT_SYMBOL_GPL(__bpf_call_base);
762
763 /**
764  *      __bpf_prog_run - run eBPF program on a given context
765  *      @ctx: is the data we are operating on
766  *      @insn: is the array of eBPF instructions
767  *
768  * Decode and execute eBPF instructions.
769  */
770 static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
771                                     u64 *stack)
772 {
773         u64 tmp;
774         static const void *jumptable[256] = {
775                 [0 ... 255] = &&default_label,
776                 /* Now overwrite non-defaults ... */
777                 /* 32 bit ALU operations */
778                 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
779                 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
780                 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
781                 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
782                 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
783                 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
784                 [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
785                 [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
786                 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
787                 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
788                 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
789                 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
790                 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
791                 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
792                 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
793                 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
794                 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
795                 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
796                 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
797                 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
798                 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
799                 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
800                 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
801                 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
802                 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
803                 /* 64 bit ALU operations */
804                 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
805                 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
806                 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
807                 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
808                 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
809                 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
810                 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
811                 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
812                 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
813                 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
814                 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
815                 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
816                 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
817                 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
818                 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
819                 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
820                 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
821                 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
822                 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
823                 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
824                 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
825                 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
826                 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
827                 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
828                 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
829                 /* Call instruction */
830                 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
831                 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
832                 /* Jumps */
833                 [BPF_JMP | BPF_JA] = &&JMP_JA,
834                 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
835                 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
836                 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
837                 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
838                 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
839                 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
840                 [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X,
841                 [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K,
842                 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
843                 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
844                 [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X,
845                 [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K,
846                 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
847                 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
848                 [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X,
849                 [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K,
850                 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
851                 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
852                 [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X,
853                 [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K,
854                 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
855                 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
856                 /* Program return */
857                 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
858                 /* Store instructions */
859                 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
860                 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
861                 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
862                 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
863                 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
864                 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
865                 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
866                 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
867                 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
868                 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
869                 /* Load instructions */
870                 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
871                 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
872                 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
873                 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
874                 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
875                 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
876                 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
877                 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
878                 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
879                 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
880                 [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW,
881         };
882         u32 tail_call_cnt = 0;
883         void *ptr;
884         int off;
885
886 #define CONT     ({ insn++; goto select_insn; })
887 #define CONT_JMP ({ insn++; goto select_insn; })
888
889 select_insn:
890         goto *jumptable[insn->code];
891
892         /* ALU */
893 #define ALU(OPCODE, OP)                 \
894         ALU64_##OPCODE##_X:             \
895                 DST = DST OP SRC;       \
896                 CONT;                   \
897         ALU_##OPCODE##_X:               \
898                 DST = (u32) DST OP (u32) SRC;   \
899                 CONT;                   \
900         ALU64_##OPCODE##_K:             \
901                 DST = DST OP IMM;               \
902                 CONT;                   \
903         ALU_##OPCODE##_K:               \
904                 DST = (u32) DST OP (u32) IMM;   \
905                 CONT;
906
907         ALU(ADD,  +)
908         ALU(SUB,  -)
909         ALU(AND,  &)
910         ALU(OR,   |)
911         ALU(LSH, <<)
912         ALU(RSH, >>)
913         ALU(XOR,  ^)
914         ALU(MUL,  *)
915 #undef ALU
916         ALU_NEG:
917                 DST = (u32) -DST;
918                 CONT;
919         ALU64_NEG:
920                 DST = -DST;
921                 CONT;
922         ALU_MOV_X:
923                 DST = (u32) SRC;
924                 CONT;
925         ALU_MOV_K:
926                 DST = (u32) IMM;
927                 CONT;
928         ALU64_MOV_X:
929                 DST = SRC;
930                 CONT;
931         ALU64_MOV_K:
932                 DST = IMM;
933                 CONT;
934         LD_IMM_DW:
935                 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
936                 insn++;
937                 CONT;
938         ALU64_ARSH_X:
939                 (*(s64 *) &DST) >>= SRC;
940                 CONT;
941         ALU64_ARSH_K:
942                 (*(s64 *) &DST) >>= IMM;
943                 CONT;
944         ALU64_MOD_X:
945                 if (unlikely(SRC == 0))
946                         return 0;
947                 div64_u64_rem(DST, SRC, &tmp);
948                 DST = tmp;
949                 CONT;
950         ALU_MOD_X:
951                 if (unlikely(SRC == 0))
952                         return 0;
953                 tmp = (u32) DST;
954                 DST = do_div(tmp, (u32) SRC);
955                 CONT;
956         ALU64_MOD_K:
957                 div64_u64_rem(DST, IMM, &tmp);
958                 DST = tmp;
959                 CONT;
960         ALU_MOD_K:
961                 tmp = (u32) DST;
962                 DST = do_div(tmp, (u32) IMM);
963                 CONT;
964         ALU64_DIV_X:
965                 if (unlikely(SRC == 0))
966                         return 0;
967                 DST = div64_u64(DST, SRC);
968                 CONT;
969         ALU_DIV_X:
970                 if (unlikely(SRC == 0))
971                         return 0;
972                 tmp = (u32) DST;
973                 do_div(tmp, (u32) SRC);
974                 DST = (u32) tmp;
975                 CONT;
976         ALU64_DIV_K:
977                 DST = div64_u64(DST, IMM);
978                 CONT;
979         ALU_DIV_K:
980                 tmp = (u32) DST;
981                 do_div(tmp, (u32) IMM);
982                 DST = (u32) tmp;
983                 CONT;
984         ALU_END_TO_BE:
985                 switch (IMM) {
986                 case 16:
987                         DST = (__force u16) cpu_to_be16(DST);
988                         break;
989                 case 32:
990                         DST = (__force u32) cpu_to_be32(DST);
991                         break;
992                 case 64:
993                         DST = (__force u64) cpu_to_be64(DST);
994                         break;
995                 }
996                 CONT;
997         ALU_END_TO_LE:
998                 switch (IMM) {
999                 case 16:
1000                         DST = (__force u16) cpu_to_le16(DST);
1001                         break;
1002                 case 32:
1003                         DST = (__force u32) cpu_to_le32(DST);
1004                         break;
1005                 case 64:
1006                         DST = (__force u64) cpu_to_le64(DST);
1007                         break;
1008                 }
1009                 CONT;
1010
1011         /* CALL */
1012         JMP_CALL:
1013                 /* Function call scratches BPF_R1-BPF_R5 registers,
1014                  * preserves BPF_R6-BPF_R9, and stores return value
1015                  * into BPF_R0.
1016                  */
1017                 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1018                                                        BPF_R4, BPF_R5);
1019                 CONT;
1020
1021         JMP_TAIL_CALL: {
1022                 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1023                 struct bpf_array *array = container_of(map, struct bpf_array, map);
1024                 struct bpf_prog *prog;
1025                 u32 index = BPF_R3;
1026
1027                 if (unlikely(index >= array->map.max_entries))
1028                         goto out;
1029                 if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
1030                         goto out;
1031
1032                 tail_call_cnt++;
1033
1034                 prog = READ_ONCE(array->ptrs[index]);
1035                 if (!prog)
1036                         goto out;
1037
1038                 /* ARG1 at this point is guaranteed to point to CTX from
1039                  * the verifier side due to the fact that the tail call is
1040                  * handeled like a helper, that is, bpf_tail_call_proto,
1041                  * where arg1_type is ARG_PTR_TO_CTX.
1042                  */
1043                 insn = prog->insnsi;
1044                 goto select_insn;
1045 out:
1046                 CONT;
1047         }
1048         /* JMP */
1049         JMP_JA:
1050                 insn += insn->off;
1051                 CONT;
1052         JMP_JEQ_X:
1053                 if (DST == SRC) {
1054                         insn += insn->off;
1055                         CONT_JMP;
1056                 }
1057                 CONT;
1058         JMP_JEQ_K:
1059                 if (DST == IMM) {
1060                         insn += insn->off;
1061                         CONT_JMP;
1062                 }
1063                 CONT;
1064         JMP_JNE_X:
1065                 if (DST != SRC) {
1066                         insn += insn->off;
1067                         CONT_JMP;
1068                 }
1069                 CONT;
1070         JMP_JNE_K:
1071                 if (DST != IMM) {
1072                         insn += insn->off;
1073                         CONT_JMP;
1074                 }
1075                 CONT;
1076         JMP_JGT_X:
1077                 if (DST > SRC) {
1078                         insn += insn->off;
1079                         CONT_JMP;
1080                 }
1081                 CONT;
1082         JMP_JGT_K:
1083                 if (DST > IMM) {
1084                         insn += insn->off;
1085                         CONT_JMP;
1086                 }
1087                 CONT;
1088         JMP_JLT_X:
1089                 if (DST < SRC) {
1090                         insn += insn->off;
1091                         CONT_JMP;
1092                 }
1093                 CONT;
1094         JMP_JLT_K:
1095                 if (DST < IMM) {
1096                         insn += insn->off;
1097                         CONT_JMP;
1098                 }
1099                 CONT;
1100         JMP_JGE_X:
1101                 if (DST >= SRC) {
1102                         insn += insn->off;
1103                         CONT_JMP;
1104                 }
1105                 CONT;
1106         JMP_JGE_K:
1107                 if (DST >= IMM) {
1108                         insn += insn->off;
1109                         CONT_JMP;
1110                 }
1111                 CONT;
1112         JMP_JLE_X:
1113                 if (DST <= SRC) {
1114                         insn += insn->off;
1115                         CONT_JMP;
1116                 }
1117                 CONT;
1118         JMP_JLE_K:
1119                 if (DST <= IMM) {
1120                         insn += insn->off;
1121                         CONT_JMP;
1122                 }
1123                 CONT;
1124         JMP_JSGT_X:
1125                 if (((s64) DST) > ((s64) SRC)) {
1126                         insn += insn->off;
1127                         CONT_JMP;
1128                 }
1129                 CONT;
1130         JMP_JSGT_K:
1131                 if (((s64) DST) > ((s64) IMM)) {
1132                         insn += insn->off;
1133                         CONT_JMP;
1134                 }
1135                 CONT;
1136         JMP_JSLT_X:
1137                 if (((s64) DST) < ((s64) SRC)) {
1138                         insn += insn->off;
1139                         CONT_JMP;
1140                 }
1141                 CONT;
1142         JMP_JSLT_K:
1143                 if (((s64) DST) < ((s64) IMM)) {
1144                         insn += insn->off;
1145                         CONT_JMP;
1146                 }
1147                 CONT;
1148         JMP_JSGE_X:
1149                 if (((s64) DST) >= ((s64) SRC)) {
1150                         insn += insn->off;
1151                         CONT_JMP;
1152                 }
1153                 CONT;
1154         JMP_JSGE_K:
1155                 if (((s64) DST) >= ((s64) IMM)) {
1156                         insn += insn->off;
1157                         CONT_JMP;
1158                 }
1159                 CONT;
1160         JMP_JSLE_X:
1161                 if (((s64) DST) <= ((s64) SRC)) {
1162                         insn += insn->off;
1163                         CONT_JMP;
1164                 }
1165                 CONT;
1166         JMP_JSLE_K:
1167                 if (((s64) DST) <= ((s64) IMM)) {
1168                         insn += insn->off;
1169                         CONT_JMP;
1170                 }
1171                 CONT;
1172         JMP_JSET_X:
1173                 if (DST & SRC) {
1174                         insn += insn->off;
1175                         CONT_JMP;
1176                 }
1177                 CONT;
1178         JMP_JSET_K:
1179                 if (DST & IMM) {
1180                         insn += insn->off;
1181                         CONT_JMP;
1182                 }
1183                 CONT;
1184         JMP_EXIT:
1185                 return BPF_R0;
1186
1187         /* STX and ST and LDX*/
1188 #define LDST(SIZEOP, SIZE)                                              \
1189         STX_MEM_##SIZEOP:                                               \
1190                 *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1191                 CONT;                                                   \
1192         ST_MEM_##SIZEOP:                                                \
1193                 *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1194                 CONT;                                                   \
1195         LDX_MEM_##SIZEOP:                                               \
1196                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1197                 CONT;
1198
1199         LDST(B,   u8)
1200         LDST(H,  u16)
1201         LDST(W,  u32)
1202         LDST(DW, u64)
1203 #undef LDST
1204         STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
1205                 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
1206                            (DST + insn->off));
1207                 CONT;
1208         STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
1209                 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
1210                              (DST + insn->off));
1211                 CONT;
1212         LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
1213                 off = IMM;
1214 load_word:
1215                 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
1216                  * appearing in the programs where ctx == skb
1217                  * (see may_access_skb() in the verifier). All programs
1218                  * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
1219                  * bpf_convert_filter() saves it in BPF_R6, internal BPF
1220                  * verifier will check that BPF_R6 == ctx.
1221                  *
1222                  * BPF_ABS and BPF_IND are wrappers of function calls,
1223                  * so they scratch BPF_R1-BPF_R5 registers, preserve
1224                  * BPF_R6-BPF_R9, and store return value into BPF_R0.
1225                  *
1226                  * Implicit input:
1227                  *   ctx == skb == BPF_R6 == CTX
1228                  *
1229                  * Explicit input:
1230                  *   SRC == any register
1231                  *   IMM == 32-bit immediate
1232                  *
1233                  * Output:
1234                  *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
1235                  */
1236
1237                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
1238                 if (likely(ptr != NULL)) {
1239                         BPF_R0 = get_unaligned_be32(ptr);
1240                         CONT;
1241                 }
1242
1243                 return 0;
1244         LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
1245                 off = IMM;
1246 load_half:
1247                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
1248                 if (likely(ptr != NULL)) {
1249                         BPF_R0 = get_unaligned_be16(ptr);
1250                         CONT;
1251                 }
1252
1253                 return 0;
1254         LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
1255                 off = IMM;
1256 load_byte:
1257                 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
1258                 if (likely(ptr != NULL)) {
1259                         BPF_R0 = *(u8 *)ptr;
1260                         CONT;
1261                 }
1262
1263                 return 0;
1264         LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
1265                 off = IMM + SRC;
1266                 goto load_word;
1267         LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
1268                 off = IMM + SRC;
1269                 goto load_half;
1270         LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
1271                 off = IMM + SRC;
1272                 goto load_byte;
1273
1274         default_label:
1275                 /* If we ever reach this, we have a bug somewhere. */
1276                 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
1277                 return 0;
1278 }
1279 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
1280
1281 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
1282 #define DEFINE_BPF_PROG_RUN(stack_size) \
1283 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
1284 { \
1285         u64 stack[stack_size / sizeof(u64)]; \
1286         u64 regs[MAX_BPF_REG]; \
1287 \
1288         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
1289         ARG1 = (u64) (unsigned long) ctx; \
1290         return ___bpf_prog_run(regs, insn, stack); \
1291 }
1292
1293 #define EVAL1(FN, X) FN(X)
1294 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
1295 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
1296 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
1297 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
1298 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
1299
1300 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
1301 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
1302 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
1303
1304 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
1305
1306 static unsigned int (*interpreters[])(const void *ctx,
1307                                       const struct bpf_insn *insn) = {
1308 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
1309 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
1310 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
1311 };
1312
1313 bool bpf_prog_array_compatible(struct bpf_array *array,
1314                                const struct bpf_prog *fp)
1315 {
1316         if (!array->owner_prog_type) {
1317                 /* There's no owner yet where we could check for
1318                  * compatibility.
1319                  */
1320                 array->owner_prog_type = fp->type;
1321                 array->owner_jited = fp->jited;
1322
1323                 return true;
1324         }
1325
1326         return array->owner_prog_type == fp->type &&
1327                array->owner_jited == fp->jited;
1328 }
1329
1330 static int bpf_check_tail_call(const struct bpf_prog *fp)
1331 {
1332         struct bpf_prog_aux *aux = fp->aux;
1333         int i;
1334
1335         for (i = 0; i < aux->used_map_cnt; i++) {
1336                 struct bpf_map *map = aux->used_maps[i];
1337                 struct bpf_array *array;
1338
1339                 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
1340                         continue;
1341
1342                 array = container_of(map, struct bpf_array, map);
1343                 if (!bpf_prog_array_compatible(array, fp))
1344                         return -EINVAL;
1345         }
1346
1347         return 0;
1348 }
1349
1350 /**
1351  *      bpf_prog_select_runtime - select exec runtime for BPF program
1352  *      @fp: bpf_prog populated with internal BPF program
1353  *      @err: pointer to error variable
1354  *
1355  * Try to JIT eBPF program, if JIT is not available, use interpreter.
1356  * The BPF program will be executed via BPF_PROG_RUN() macro.
1357  */
1358 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1359 {
1360         u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
1361
1362         fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
1363
1364         /* eBPF JITs can rewrite the program in case constant
1365          * blinding is active. However, in case of error during
1366          * blinding, bpf_int_jit_compile() must always return a
1367          * valid program, which in this case would simply not
1368          * be JITed, but falls back to the interpreter.
1369          */
1370         fp = bpf_int_jit_compile(fp);
1371         bpf_prog_lock_ro(fp);
1372
1373         /* The tail call compatibility check can only be done at
1374          * this late stage as we need to determine, if we deal
1375          * with JITed or non JITed program concatenations and not
1376          * all eBPF JITs might immediately support all features.
1377          */
1378         *err = bpf_check_tail_call(fp);
1379
1380         return fp;
1381 }
1382 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
1383
1384 static void bpf_prog_free_deferred(struct work_struct *work)
1385 {
1386         struct bpf_prog_aux *aux;
1387
1388         aux = container_of(work, struct bpf_prog_aux, work);
1389         bpf_jit_free(aux->prog);
1390 }
1391
1392 /* Free internal BPF program */
1393 void bpf_prog_free(struct bpf_prog *fp)
1394 {
1395         struct bpf_prog_aux *aux = fp->aux;
1396
1397         INIT_WORK(&aux->work, bpf_prog_free_deferred);
1398         schedule_work(&aux->work);
1399 }
1400 EXPORT_SYMBOL_GPL(bpf_prog_free);
1401
1402 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
1403 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
1404
1405 void bpf_user_rnd_init_once(void)
1406 {
1407         prandom_init_once(&bpf_user_rnd_state);
1408 }
1409
1410 BPF_CALL_0(bpf_user_rnd_u32)
1411 {
1412         /* Should someone ever have the rather unwise idea to use some
1413          * of the registers passed into this function, then note that
1414          * this function is called from native eBPF and classic-to-eBPF
1415          * transformations. Register assignments from both sides are
1416          * different, f.e. classic always sets fn(ctx, A, X) here.
1417          */
1418         struct rnd_state *state;
1419         u32 res;
1420
1421         state = &get_cpu_var(bpf_user_rnd_state);
1422         res = prandom_u32_state(state);
1423         put_cpu_var(bpf_user_rnd_state);
1424
1425         return res;
1426 }
1427
1428 /* Weak definitions of helper functions in case we don't have bpf syscall. */
1429 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
1430 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
1431 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
1432
1433 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
1434 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
1435 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
1436 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
1437
1438 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
1439 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
1440 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
1441 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
1442
1443 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
1444 {
1445         return NULL;
1446 }
1447
1448 u64 __weak
1449 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1450                  void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
1451 {
1452         return -ENOTSUPP;
1453 }
1454
1455 /* Always built-in helper functions. */
1456 const struct bpf_func_proto bpf_tail_call_proto = {
1457         .func           = NULL,
1458         .gpl_only       = false,
1459         .ret_type       = RET_VOID,
1460         .arg1_type      = ARG_PTR_TO_CTX,
1461         .arg2_type      = ARG_CONST_MAP_PTR,
1462         .arg3_type      = ARG_ANYTHING,
1463 };
1464
1465 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
1466  * It is encouraged to implement bpf_int_jit_compile() instead, so that
1467  * eBPF and implicitly also cBPF can get JITed!
1468  */
1469 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
1470 {
1471         return prog;
1472 }
1473
1474 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
1475  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
1476  */
1477 void __weak bpf_jit_compile(struct bpf_prog *prog)
1478 {
1479 }
1480
1481 bool __weak bpf_helper_changes_pkt_data(void *func)
1482 {
1483         return false;
1484 }
1485
1486 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
1487  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
1488  */
1489 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
1490                          int len)
1491 {
1492         return -EFAULT;
1493 }
1494
1495 /* All definitions of tracepoints related to BPF. */
1496 #define CREATE_TRACE_POINTS
1497 #include <linux/bpf_trace.h>
1498
1499 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
1500
1501 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
1502 EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
This page took 0.135402 seconds and 4 git commands to generate.