]> Git Repo - linux.git/blob - kernel/bpf/core.c
vmcore: convert copy_oldmem_page() to take an iov_iter
[linux.git] / kernel / bpf / core.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *      Jay Schulist <[email protected]>
13  *      Alexei Starovoitov <[email protected]>
14  *      Daniel Borkmann <[email protected]>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19
20 #include <uapi/linux/btf.h>
21 #include <linux/filter.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/random.h>
25 #include <linux/moduleloader.h>
26 #include <linux/bpf.h>
27 #include <linux/btf.h>
28 #include <linux/objtool.h>
29 #include <linux/rbtree_latch.h>
30 #include <linux/kallsyms.h>
31 #include <linux/rcupdate.h>
32 #include <linux/perf_event.h>
33 #include <linux/extable.h>
34 #include <linux/log2.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/nodemask.h>
37
38 #include <asm/barrier.h>
39 #include <asm/unaligned.h>
40
41 /* Registers */
42 #define BPF_R0  regs[BPF_REG_0]
43 #define BPF_R1  regs[BPF_REG_1]
44 #define BPF_R2  regs[BPF_REG_2]
45 #define BPF_R3  regs[BPF_REG_3]
46 #define BPF_R4  regs[BPF_REG_4]
47 #define BPF_R5  regs[BPF_REG_5]
48 #define BPF_R6  regs[BPF_REG_6]
49 #define BPF_R7  regs[BPF_REG_7]
50 #define BPF_R8  regs[BPF_REG_8]
51 #define BPF_R9  regs[BPF_REG_9]
52 #define BPF_R10 regs[BPF_REG_10]
53
54 /* Named registers */
55 #define DST     regs[insn->dst_reg]
56 #define SRC     regs[insn->src_reg]
57 #define FP      regs[BPF_REG_FP]
58 #define AX      regs[BPF_REG_AX]
59 #define ARG1    regs[BPF_REG_ARG1]
60 #define CTX     regs[BPF_REG_CTX]
61 #define IMM     insn->imm
62
63 /* No hurry in this branch
64  *
65  * Exported for the bpf jit load helper.
66  */
67 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
68 {
69         u8 *ptr = NULL;
70
71         if (k >= SKF_NET_OFF)
72                 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
73         else if (k >= SKF_LL_OFF)
74                 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
75
76         if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
77                 return ptr;
78
79         return NULL;
80 }
81
82 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
83 {
84         gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
85         struct bpf_prog_aux *aux;
86         struct bpf_prog *fp;
87
88         size = round_up(size, PAGE_SIZE);
89         fp = __vmalloc(size, gfp_flags);
90         if (fp == NULL)
91                 return NULL;
92
93         aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT | gfp_extra_flags);
94         if (aux == NULL) {
95                 vfree(fp);
96                 return NULL;
97         }
98         fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
99         if (!fp->active) {
100                 vfree(fp);
101                 kfree(aux);
102                 return NULL;
103         }
104
105         fp->pages = size / PAGE_SIZE;
106         fp->aux = aux;
107         fp->aux->prog = fp;
108         fp->jit_requested = ebpf_jit_enabled();
109         fp->blinding_requested = bpf_jit_blinding_enabled(fp);
110
111         INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
112         mutex_init(&fp->aux->used_maps_mutex);
113         mutex_init(&fp->aux->dst_mutex);
114
115         return fp;
116 }
117
118 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
119 {
120         gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
121         struct bpf_prog *prog;
122         int cpu;
123
124         prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
125         if (!prog)
126                 return NULL;
127
128         prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
129         if (!prog->stats) {
130                 free_percpu(prog->active);
131                 kfree(prog->aux);
132                 vfree(prog);
133                 return NULL;
134         }
135
136         for_each_possible_cpu(cpu) {
137                 struct bpf_prog_stats *pstats;
138
139                 pstats = per_cpu_ptr(prog->stats, cpu);
140                 u64_stats_init(&pstats->syncp);
141         }
142         return prog;
143 }
144 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
145
146 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
147 {
148         if (!prog->aux->nr_linfo || !prog->jit_requested)
149                 return 0;
150
151         prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
152                                           sizeof(*prog->aux->jited_linfo),
153                                           GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
154         if (!prog->aux->jited_linfo)
155                 return -ENOMEM;
156
157         return 0;
158 }
159
160 void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
161 {
162         if (prog->aux->jited_linfo &&
163             (!prog->jited || !prog->aux->jited_linfo[0])) {
164                 kvfree(prog->aux->jited_linfo);
165                 prog->aux->jited_linfo = NULL;
166         }
167
168         kfree(prog->aux->kfunc_tab);
169         prog->aux->kfunc_tab = NULL;
170 }
171
172 /* The jit engine is responsible to provide an array
173  * for insn_off to the jited_off mapping (insn_to_jit_off).
174  *
175  * The idx to this array is the insn_off.  Hence, the insn_off
176  * here is relative to the prog itself instead of the main prog.
177  * This array has one entry for each xlated bpf insn.
178  *
179  * jited_off is the byte off to the last byte of the jited insn.
180  *
181  * Hence, with
182  * insn_start:
183  *      The first bpf insn off of the prog.  The insn off
184  *      here is relative to the main prog.
185  *      e.g. if prog is a subprog, insn_start > 0
186  * linfo_idx:
187  *      The prog's idx to prog->aux->linfo and jited_linfo
188  *
189  * jited_linfo[linfo_idx] = prog->bpf_func
190  *
191  * For i > linfo_idx,
192  *
193  * jited_linfo[i] = prog->bpf_func +
194  *      insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
195  */
196 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
197                                const u32 *insn_to_jit_off)
198 {
199         u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
200         const struct bpf_line_info *linfo;
201         void **jited_linfo;
202
203         if (!prog->aux->jited_linfo)
204                 /* Userspace did not provide linfo */
205                 return;
206
207         linfo_idx = prog->aux->linfo_idx;
208         linfo = &prog->aux->linfo[linfo_idx];
209         insn_start = linfo[0].insn_off;
210         insn_end = insn_start + prog->len;
211
212         jited_linfo = &prog->aux->jited_linfo[linfo_idx];
213         jited_linfo[0] = prog->bpf_func;
214
215         nr_linfo = prog->aux->nr_linfo - linfo_idx;
216
217         for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
218                 /* The verifier ensures that linfo[i].insn_off is
219                  * strictly increasing
220                  */
221                 jited_linfo[i] = prog->bpf_func +
222                         insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
223 }
224
225 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
226                                   gfp_t gfp_extra_flags)
227 {
228         gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_ZERO | gfp_extra_flags;
229         struct bpf_prog *fp;
230         u32 pages;
231
232         size = round_up(size, PAGE_SIZE);
233         pages = size / PAGE_SIZE;
234         if (pages <= fp_old->pages)
235                 return fp_old;
236
237         fp = __vmalloc(size, gfp_flags);
238         if (fp) {
239                 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
240                 fp->pages = pages;
241                 fp->aux->prog = fp;
242
243                 /* We keep fp->aux from fp_old around in the new
244                  * reallocated structure.
245                  */
246                 fp_old->aux = NULL;
247                 fp_old->stats = NULL;
248                 fp_old->active = NULL;
249                 __bpf_prog_free(fp_old);
250         }
251
252         return fp;
253 }
254
255 void __bpf_prog_free(struct bpf_prog *fp)
256 {
257         if (fp->aux) {
258                 mutex_destroy(&fp->aux->used_maps_mutex);
259                 mutex_destroy(&fp->aux->dst_mutex);
260                 kfree(fp->aux->poke_tab);
261                 kfree(fp->aux);
262         }
263         free_percpu(fp->stats);
264         free_percpu(fp->active);
265         vfree(fp);
266 }
267
268 int bpf_prog_calc_tag(struct bpf_prog *fp)
269 {
270         const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
271         u32 raw_size = bpf_prog_tag_scratch_size(fp);
272         u32 digest[SHA1_DIGEST_WORDS];
273         u32 ws[SHA1_WORKSPACE_WORDS];
274         u32 i, bsize, psize, blocks;
275         struct bpf_insn *dst;
276         bool was_ld_map;
277         u8 *raw, *todo;
278         __be32 *result;
279         __be64 *bits;
280
281         raw = vmalloc(raw_size);
282         if (!raw)
283                 return -ENOMEM;
284
285         sha1_init(digest);
286         memset(ws, 0, sizeof(ws));
287
288         /* We need to take out the map fd for the digest calculation
289          * since they are unstable from user space side.
290          */
291         dst = (void *)raw;
292         for (i = 0, was_ld_map = false; i < fp->len; i++) {
293                 dst[i] = fp->insnsi[i];
294                 if (!was_ld_map &&
295                     dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
296                     (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
297                      dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
298                         was_ld_map = true;
299                         dst[i].imm = 0;
300                 } else if (was_ld_map &&
301                            dst[i].code == 0 &&
302                            dst[i].dst_reg == 0 &&
303                            dst[i].src_reg == 0 &&
304                            dst[i].off == 0) {
305                         was_ld_map = false;
306                         dst[i].imm = 0;
307                 } else {
308                         was_ld_map = false;
309                 }
310         }
311
312         psize = bpf_prog_insn_size(fp);
313         memset(&raw[psize], 0, raw_size - psize);
314         raw[psize++] = 0x80;
315
316         bsize  = round_up(psize, SHA1_BLOCK_SIZE);
317         blocks = bsize / SHA1_BLOCK_SIZE;
318         todo   = raw;
319         if (bsize - psize >= sizeof(__be64)) {
320                 bits = (__be64 *)(todo + bsize - sizeof(__be64));
321         } else {
322                 bits = (__be64 *)(todo + bsize + bits_offset);
323                 blocks++;
324         }
325         *bits = cpu_to_be64((psize - 1) << 3);
326
327         while (blocks--) {
328                 sha1_transform(digest, todo, ws);
329                 todo += SHA1_BLOCK_SIZE;
330         }
331
332         result = (__force __be32 *)digest;
333         for (i = 0; i < SHA1_DIGEST_WORDS; i++)
334                 result[i] = cpu_to_be32(digest[i]);
335         memcpy(fp->tag, result, sizeof(fp->tag));
336
337         vfree(raw);
338         return 0;
339 }
340
341 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
342                                 s32 end_new, s32 curr, const bool probe_pass)
343 {
344         const s64 imm_min = S32_MIN, imm_max = S32_MAX;
345         s32 delta = end_new - end_old;
346         s64 imm = insn->imm;
347
348         if (curr < pos && curr + imm + 1 >= end_old)
349                 imm += delta;
350         else if (curr >= end_new && curr + imm + 1 < end_new)
351                 imm -= delta;
352         if (imm < imm_min || imm > imm_max)
353                 return -ERANGE;
354         if (!probe_pass)
355                 insn->imm = imm;
356         return 0;
357 }
358
359 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
360                                 s32 end_new, s32 curr, const bool probe_pass)
361 {
362         const s32 off_min = S16_MIN, off_max = S16_MAX;
363         s32 delta = end_new - end_old;
364         s32 off = insn->off;
365
366         if (curr < pos && curr + off + 1 >= end_old)
367                 off += delta;
368         else if (curr >= end_new && curr + off + 1 < end_new)
369                 off -= delta;
370         if (off < off_min || off > off_max)
371                 return -ERANGE;
372         if (!probe_pass)
373                 insn->off = off;
374         return 0;
375 }
376
377 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
378                             s32 end_new, const bool probe_pass)
379 {
380         u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
381         struct bpf_insn *insn = prog->insnsi;
382         int ret = 0;
383
384         for (i = 0; i < insn_cnt; i++, insn++) {
385                 u8 code;
386
387                 /* In the probing pass we still operate on the original,
388                  * unpatched image in order to check overflows before we
389                  * do any other adjustments. Therefore skip the patchlet.
390                  */
391                 if (probe_pass && i == pos) {
392                         i = end_new;
393                         insn = prog->insnsi + end_old;
394                 }
395                 if (bpf_pseudo_func(insn)) {
396                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
397                                                    end_new, i, probe_pass);
398                         if (ret)
399                                 return ret;
400                         continue;
401                 }
402                 code = insn->code;
403                 if ((BPF_CLASS(code) != BPF_JMP &&
404                      BPF_CLASS(code) != BPF_JMP32) ||
405                     BPF_OP(code) == BPF_EXIT)
406                         continue;
407                 /* Adjust offset of jmps if we cross patch boundaries. */
408                 if (BPF_OP(code) == BPF_CALL) {
409                         if (insn->src_reg != BPF_PSEUDO_CALL)
410                                 continue;
411                         ret = bpf_adj_delta_to_imm(insn, pos, end_old,
412                                                    end_new, i, probe_pass);
413                 } else {
414                         ret = bpf_adj_delta_to_off(insn, pos, end_old,
415                                                    end_new, i, probe_pass);
416                 }
417                 if (ret)
418                         break;
419         }
420
421         return ret;
422 }
423
424 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
425 {
426         struct bpf_line_info *linfo;
427         u32 i, nr_linfo;
428
429         nr_linfo = prog->aux->nr_linfo;
430         if (!nr_linfo || !delta)
431                 return;
432
433         linfo = prog->aux->linfo;
434
435         for (i = 0; i < nr_linfo; i++)
436                 if (off < linfo[i].insn_off)
437                         break;
438
439         /* Push all off < linfo[i].insn_off by delta */
440         for (; i < nr_linfo; i++)
441                 linfo[i].insn_off += delta;
442 }
443
444 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
445                                        const struct bpf_insn *patch, u32 len)
446 {
447         u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
448         const u32 cnt_max = S16_MAX;
449         struct bpf_prog *prog_adj;
450         int err;
451
452         /* Since our patchlet doesn't expand the image, we're done. */
453         if (insn_delta == 0) {
454                 memcpy(prog->insnsi + off, patch, sizeof(*patch));
455                 return prog;
456         }
457
458         insn_adj_cnt = prog->len + insn_delta;
459
460         /* Reject anything that would potentially let the insn->off
461          * target overflow when we have excessive program expansions.
462          * We need to probe here before we do any reallocation where
463          * we afterwards may not fail anymore.
464          */
465         if (insn_adj_cnt > cnt_max &&
466             (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
467                 return ERR_PTR(err);
468
469         /* Several new instructions need to be inserted. Make room
470          * for them. Likely, there's no need for a new allocation as
471          * last page could have large enough tailroom.
472          */
473         prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
474                                     GFP_USER);
475         if (!prog_adj)
476                 return ERR_PTR(-ENOMEM);
477
478         prog_adj->len = insn_adj_cnt;
479
480         /* Patching happens in 3 steps:
481          *
482          * 1) Move over tail of insnsi from next instruction onwards,
483          *    so we can patch the single target insn with one or more
484          *    new ones (patching is always from 1 to n insns, n > 0).
485          * 2) Inject new instructions at the target location.
486          * 3) Adjust branch offsets if necessary.
487          */
488         insn_rest = insn_adj_cnt - off - len;
489
490         memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
491                 sizeof(*patch) * insn_rest);
492         memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
493
494         /* We are guaranteed to not fail at this point, otherwise
495          * the ship has sailed to reverse to the original state. An
496          * overflow cannot happen at this point.
497          */
498         BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
499
500         bpf_adj_linfo(prog_adj, off, insn_delta);
501
502         return prog_adj;
503 }
504
505 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
506 {
507         /* Branch offsets can't overflow when program is shrinking, no need
508          * to call bpf_adj_branches(..., true) here
509          */
510         memmove(prog->insnsi + off, prog->insnsi + off + cnt,
511                 sizeof(struct bpf_insn) * (prog->len - off - cnt));
512         prog->len -= cnt;
513
514         return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
515 }
516
517 static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
518 {
519         int i;
520
521         for (i = 0; i < fp->aux->func_cnt; i++)
522                 bpf_prog_kallsyms_del(fp->aux->func[i]);
523 }
524
525 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
526 {
527         bpf_prog_kallsyms_del_subprogs(fp);
528         bpf_prog_kallsyms_del(fp);
529 }
530
531 #ifdef CONFIG_BPF_JIT
532 /* All BPF JIT sysctl knobs here. */
533 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
534 int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
535 int bpf_jit_harden   __read_mostly;
536 long bpf_jit_limit   __read_mostly;
537 long bpf_jit_limit_max __read_mostly;
538
539 static void
540 bpf_prog_ksym_set_addr(struct bpf_prog *prog)
541 {
542         WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
543
544         prog->aux->ksym.start = (unsigned long) prog->bpf_func;
545         prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
546 }
547
548 static void
549 bpf_prog_ksym_set_name(struct bpf_prog *prog)
550 {
551         char *sym = prog->aux->ksym.name;
552         const char *end = sym + KSYM_NAME_LEN;
553         const struct btf_type *type;
554         const char *func_name;
555
556         BUILD_BUG_ON(sizeof("bpf_prog_") +
557                      sizeof(prog->tag) * 2 +
558                      /* name has been null terminated.
559                       * We should need +1 for the '_' preceding
560                       * the name.  However, the null character
561                       * is double counted between the name and the
562                       * sizeof("bpf_prog_") above, so we omit
563                       * the +1 here.
564                       */
565                      sizeof(prog->aux->name) > KSYM_NAME_LEN);
566
567         sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
568         sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
569
570         /* prog->aux->name will be ignored if full btf name is available */
571         if (prog->aux->func_info_cnt) {
572                 type = btf_type_by_id(prog->aux->btf,
573                                       prog->aux->func_info[prog->aux->func_idx].type_id);
574                 func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
575                 snprintf(sym, (size_t)(end - sym), "_%s", func_name);
576                 return;
577         }
578
579         if (prog->aux->name[0])
580                 snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
581         else
582                 *sym = 0;
583 }
584
585 static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
586 {
587         return container_of(n, struct bpf_ksym, tnode)->start;
588 }
589
590 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
591                                           struct latch_tree_node *b)
592 {
593         return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
594 }
595
596 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
597 {
598         unsigned long val = (unsigned long)key;
599         const struct bpf_ksym *ksym;
600
601         ksym = container_of(n, struct bpf_ksym, tnode);
602
603         if (val < ksym->start)
604                 return -1;
605         if (val >= ksym->end)
606                 return  1;
607
608         return 0;
609 }
610
611 static const struct latch_tree_ops bpf_tree_ops = {
612         .less   = bpf_tree_less,
613         .comp   = bpf_tree_comp,
614 };
615
616 static DEFINE_SPINLOCK(bpf_lock);
617 static LIST_HEAD(bpf_kallsyms);
618 static struct latch_tree_root bpf_tree __cacheline_aligned;
619
620 void bpf_ksym_add(struct bpf_ksym *ksym)
621 {
622         spin_lock_bh(&bpf_lock);
623         WARN_ON_ONCE(!list_empty(&ksym->lnode));
624         list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
625         latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
626         spin_unlock_bh(&bpf_lock);
627 }
628
629 static void __bpf_ksym_del(struct bpf_ksym *ksym)
630 {
631         if (list_empty(&ksym->lnode))
632                 return;
633
634         latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
635         list_del_rcu(&ksym->lnode);
636 }
637
638 void bpf_ksym_del(struct bpf_ksym *ksym)
639 {
640         spin_lock_bh(&bpf_lock);
641         __bpf_ksym_del(ksym);
642         spin_unlock_bh(&bpf_lock);
643 }
644
645 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
646 {
647         return fp->jited && !bpf_prog_was_classic(fp);
648 }
649
650 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
651 {
652         return list_empty(&fp->aux->ksym.lnode) ||
653                fp->aux->ksym.lnode.prev == LIST_POISON2;
654 }
655
656 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
657 {
658         if (!bpf_prog_kallsyms_candidate(fp) ||
659             !bpf_capable())
660                 return;
661
662         bpf_prog_ksym_set_addr(fp);
663         bpf_prog_ksym_set_name(fp);
664         fp->aux->ksym.prog = true;
665
666         bpf_ksym_add(&fp->aux->ksym);
667 }
668
669 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
670 {
671         if (!bpf_prog_kallsyms_candidate(fp))
672                 return;
673
674         bpf_ksym_del(&fp->aux->ksym);
675 }
676
677 static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
678 {
679         struct latch_tree_node *n;
680
681         n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
682         return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
683 }
684
685 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
686                                  unsigned long *off, char *sym)
687 {
688         struct bpf_ksym *ksym;
689         char *ret = NULL;
690
691         rcu_read_lock();
692         ksym = bpf_ksym_find(addr);
693         if (ksym) {
694                 unsigned long symbol_start = ksym->start;
695                 unsigned long symbol_end = ksym->end;
696
697                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
698
699                 ret = sym;
700                 if (size)
701                         *size = symbol_end - symbol_start;
702                 if (off)
703                         *off  = addr - symbol_start;
704         }
705         rcu_read_unlock();
706
707         return ret;
708 }
709
710 bool is_bpf_text_address(unsigned long addr)
711 {
712         bool ret;
713
714         rcu_read_lock();
715         ret = bpf_ksym_find(addr) != NULL;
716         rcu_read_unlock();
717
718         return ret;
719 }
720
721 static struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
722 {
723         struct bpf_ksym *ksym = bpf_ksym_find(addr);
724
725         return ksym && ksym->prog ?
726                container_of(ksym, struct bpf_prog_aux, ksym)->prog :
727                NULL;
728 }
729
730 const struct exception_table_entry *search_bpf_extables(unsigned long addr)
731 {
732         const struct exception_table_entry *e = NULL;
733         struct bpf_prog *prog;
734
735         rcu_read_lock();
736         prog = bpf_prog_ksym_find(addr);
737         if (!prog)
738                 goto out;
739         if (!prog->aux->num_exentries)
740                 goto out;
741
742         e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
743 out:
744         rcu_read_unlock();
745         return e;
746 }
747
748 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
749                     char *sym)
750 {
751         struct bpf_ksym *ksym;
752         unsigned int it = 0;
753         int ret = -ERANGE;
754
755         if (!bpf_jit_kallsyms_enabled())
756                 return ret;
757
758         rcu_read_lock();
759         list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
760                 if (it++ != symnum)
761                         continue;
762
763                 strncpy(sym, ksym->name, KSYM_NAME_LEN);
764
765                 *value = ksym->start;
766                 *type  = BPF_SYM_ELF_TYPE;
767
768                 ret = 0;
769                 break;
770         }
771         rcu_read_unlock();
772
773         return ret;
774 }
775
776 int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
777                                 struct bpf_jit_poke_descriptor *poke)
778 {
779         struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
780         static const u32 poke_tab_max = 1024;
781         u32 slot = prog->aux->size_poke_tab;
782         u32 size = slot + 1;
783
784         if (size > poke_tab_max)
785                 return -ENOSPC;
786         if (poke->tailcall_target || poke->tailcall_target_stable ||
787             poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
788                 return -EINVAL;
789
790         switch (poke->reason) {
791         case BPF_POKE_REASON_TAIL_CALL:
792                 if (!poke->tail_call.map)
793                         return -EINVAL;
794                 break;
795         default:
796                 return -EINVAL;
797         }
798
799         tab = krealloc(tab, size * sizeof(*poke), GFP_KERNEL);
800         if (!tab)
801                 return -ENOMEM;
802
803         memcpy(&tab[slot], poke, sizeof(*poke));
804         prog->aux->size_poke_tab = size;
805         prog->aux->poke_tab = tab;
806
807         return slot;
808 }
809
810 /*
811  * BPF program pack allocator.
812  *
813  * Most BPF programs are pretty small. Allocating a hole page for each
814  * program is sometime a waste. Many small bpf program also adds pressure
815  * to instruction TLB. To solve this issue, we introduce a BPF program pack
816  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
817  * to host BPF programs.
818  */
819 #define BPF_PROG_CHUNK_SHIFT    6
820 #define BPF_PROG_CHUNK_SIZE     (1 << BPF_PROG_CHUNK_SHIFT)
821 #define BPF_PROG_CHUNK_MASK     (~(BPF_PROG_CHUNK_SIZE - 1))
822
823 struct bpf_prog_pack {
824         struct list_head list;
825         void *ptr;
826         unsigned long bitmap[];
827 };
828
829 #define BPF_PROG_SIZE_TO_NBITS(size)    (round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
830
831 static size_t bpf_prog_pack_size = -1;
832 static size_t bpf_prog_pack_mask = -1;
833
834 static int bpf_prog_chunk_count(void)
835 {
836         WARN_ON_ONCE(bpf_prog_pack_size == -1);
837         return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
838 }
839
840 static DEFINE_MUTEX(pack_mutex);
841 static LIST_HEAD(pack_list);
842
843 /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
844  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
845  */
846 #ifdef PMD_SIZE
847 #define BPF_HPAGE_SIZE PMD_SIZE
848 #define BPF_HPAGE_MASK PMD_MASK
849 #else
850 #define BPF_HPAGE_SIZE PAGE_SIZE
851 #define BPF_HPAGE_MASK PAGE_MASK
852 #endif
853
854 static size_t select_bpf_prog_pack_size(void)
855 {
856         size_t size;
857         void *ptr;
858
859         size = BPF_HPAGE_SIZE * num_online_nodes();
860         ptr = module_alloc(size);
861
862         /* Test whether we can get huge pages. If not just use PAGE_SIZE
863          * packs.
864          */
865         if (!ptr || !is_vm_area_hugepages(ptr)) {
866                 size = PAGE_SIZE;
867                 bpf_prog_pack_mask = PAGE_MASK;
868         } else {
869                 bpf_prog_pack_mask = BPF_HPAGE_MASK;
870         }
871
872         vfree(ptr);
873         return size;
874 }
875
876 static struct bpf_prog_pack *alloc_new_pack(void)
877 {
878         struct bpf_prog_pack *pack;
879
880         pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())),
881                        GFP_KERNEL);
882         if (!pack)
883                 return NULL;
884         pack->ptr = module_alloc(bpf_prog_pack_size);
885         if (!pack->ptr) {
886                 kfree(pack);
887                 return NULL;
888         }
889         bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
890         list_add_tail(&pack->list, &pack_list);
891
892         set_vm_flush_reset_perms(pack->ptr);
893         set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
894         set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
895         return pack;
896 }
897
898 static void *bpf_prog_pack_alloc(u32 size)
899 {
900         unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
901         struct bpf_prog_pack *pack;
902         unsigned long pos;
903         void *ptr = NULL;
904
905         mutex_lock(&pack_mutex);
906         if (bpf_prog_pack_size == -1)
907                 bpf_prog_pack_size = select_bpf_prog_pack_size();
908
909         if (size > bpf_prog_pack_size) {
910                 size = round_up(size, PAGE_SIZE);
911                 ptr = module_alloc(size);
912                 if (ptr) {
913                         set_vm_flush_reset_perms(ptr);
914                         set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
915                         set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
916                 }
917                 goto out;
918         }
919         list_for_each_entry(pack, &pack_list, list) {
920                 pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
921                                                  nbits, 0);
922                 if (pos < bpf_prog_chunk_count())
923                         goto found_free_area;
924         }
925
926         pack = alloc_new_pack();
927         if (!pack)
928                 goto out;
929
930         pos = 0;
931
932 found_free_area:
933         bitmap_set(pack->bitmap, pos, nbits);
934         ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
935
936 out:
937         mutex_unlock(&pack_mutex);
938         return ptr;
939 }
940
941 static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
942 {
943         struct bpf_prog_pack *pack = NULL, *tmp;
944         unsigned int nbits;
945         unsigned long pos;
946         void *pack_ptr;
947
948         mutex_lock(&pack_mutex);
949         if (hdr->size > bpf_prog_pack_size) {
950                 module_memfree(hdr);
951                 goto out;
952         }
953
954         pack_ptr = (void *)((unsigned long)hdr & bpf_prog_pack_mask);
955
956         list_for_each_entry(tmp, &pack_list, list) {
957                 if (tmp->ptr == pack_ptr) {
958                         pack = tmp;
959                         break;
960                 }
961         }
962
963         if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
964                 goto out;
965
966         nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
967         pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
968
969         bitmap_clear(pack->bitmap, pos, nbits);
970         if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
971                                        bpf_prog_chunk_count(), 0) == 0) {
972                 list_del(&pack->list);
973                 module_memfree(pack->ptr);
974                 kfree(pack);
975         }
976 out:
977         mutex_unlock(&pack_mutex);
978 }
979
980 static atomic_long_t bpf_jit_current;
981
982 /* Can be overridden by an arch's JIT compiler if it has a custom,
983  * dedicated BPF backend memory area, or if neither of the two
984  * below apply.
985  */
986 u64 __weak bpf_jit_alloc_exec_limit(void)
987 {
988 #if defined(MODULES_VADDR)
989         return MODULES_END - MODULES_VADDR;
990 #else
991         return VMALLOC_END - VMALLOC_START;
992 #endif
993 }
994
995 static int __init bpf_jit_charge_init(void)
996 {
997         /* Only used as heuristic here to derive limit. */
998         bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
999         bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
1000                                             PAGE_SIZE), LONG_MAX);
1001         return 0;
1002 }
1003 pure_initcall(bpf_jit_charge_init);
1004
1005 int bpf_jit_charge_modmem(u32 size)
1006 {
1007         if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
1008                 if (!bpf_capable()) {
1009                         atomic_long_sub(size, &bpf_jit_current);
1010                         return -EPERM;
1011                 }
1012         }
1013
1014         return 0;
1015 }
1016
1017 void bpf_jit_uncharge_modmem(u32 size)
1018 {
1019         atomic_long_sub(size, &bpf_jit_current);
1020 }
1021
1022 void *__weak bpf_jit_alloc_exec(unsigned long size)
1023 {
1024         return module_alloc(size);
1025 }
1026
1027 void __weak bpf_jit_free_exec(void *addr)
1028 {
1029         module_memfree(addr);
1030 }
1031
1032 struct bpf_binary_header *
1033 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1034                      unsigned int alignment,
1035                      bpf_jit_fill_hole_t bpf_fill_ill_insns)
1036 {
1037         struct bpf_binary_header *hdr;
1038         u32 size, hole, start;
1039
1040         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1041                      alignment > BPF_IMAGE_ALIGNMENT);
1042
1043         /* Most of BPF filters are really small, but if some of them
1044          * fill a page, allow at least 128 extra bytes to insert a
1045          * random section of illegal instructions.
1046          */
1047         size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1048
1049         if (bpf_jit_charge_modmem(size))
1050                 return NULL;
1051         hdr = bpf_jit_alloc_exec(size);
1052         if (!hdr) {
1053                 bpf_jit_uncharge_modmem(size);
1054                 return NULL;
1055         }
1056
1057         /* Fill space with illegal/arch-dep instructions. */
1058         bpf_fill_ill_insns(hdr, size);
1059
1060         hdr->size = size;
1061         hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1062                      PAGE_SIZE - sizeof(*hdr));
1063         start = (get_random_int() % hole) & ~(alignment - 1);
1064
1065         /* Leave a random number of instructions before BPF code. */
1066         *image_ptr = &hdr->image[start];
1067
1068         return hdr;
1069 }
1070
1071 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1072 {
1073         u32 size = hdr->size;
1074
1075         bpf_jit_free_exec(hdr);
1076         bpf_jit_uncharge_modmem(size);
1077 }
1078
1079 /* Allocate jit binary from bpf_prog_pack allocator.
1080  * Since the allocated memory is RO+X, the JIT engine cannot write directly
1081  * to the memory. To solve this problem, a RW buffer is also allocated at
1082  * as the same time. The JIT engine should calculate offsets based on the
1083  * RO memory address, but write JITed program to the RW buffer. Once the
1084  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
1085  * the JITed program to the RO memory.
1086  */
1087 struct bpf_binary_header *
1088 bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
1089                           unsigned int alignment,
1090                           struct bpf_binary_header **rw_header,
1091                           u8 **rw_image,
1092                           bpf_jit_fill_hole_t bpf_fill_ill_insns)
1093 {
1094         struct bpf_binary_header *ro_header;
1095         u32 size, hole, start;
1096
1097         WARN_ON_ONCE(!is_power_of_2(alignment) ||
1098                      alignment > BPF_IMAGE_ALIGNMENT);
1099
1100         /* add 16 bytes for a random section of illegal instructions */
1101         size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
1102
1103         if (bpf_jit_charge_modmem(size))
1104                 return NULL;
1105         ro_header = bpf_prog_pack_alloc(size);
1106         if (!ro_header) {
1107                 bpf_jit_uncharge_modmem(size);
1108                 return NULL;
1109         }
1110
1111         *rw_header = kvmalloc(size, GFP_KERNEL);
1112         if (!*rw_header) {
1113                 bpf_arch_text_copy(&ro_header->size, &size, sizeof(size));
1114                 bpf_prog_pack_free(ro_header);
1115                 bpf_jit_uncharge_modmem(size);
1116                 return NULL;
1117         }
1118
1119         /* Fill space with illegal/arch-dep instructions. */
1120         bpf_fill_ill_insns(*rw_header, size);
1121         (*rw_header)->size = size;
1122
1123         hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
1124                      BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
1125         start = (get_random_int() % hole) & ~(alignment - 1);
1126
1127         *image_ptr = &ro_header->image[start];
1128         *rw_image = &(*rw_header)->image[start];
1129
1130         return ro_header;
1131 }
1132
1133 /* Copy JITed text from rw_header to its final location, the ro_header. */
1134 int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
1135                                  struct bpf_binary_header *ro_header,
1136                                  struct bpf_binary_header *rw_header)
1137 {
1138         void *ptr;
1139
1140         ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
1141
1142         kvfree(rw_header);
1143
1144         if (IS_ERR(ptr)) {
1145                 bpf_prog_pack_free(ro_header);
1146                 return PTR_ERR(ptr);
1147         }
1148         prog->aux->use_bpf_prog_pack = true;
1149         return 0;
1150 }
1151
1152 /* bpf_jit_binary_pack_free is called in two different scenarios:
1153  *   1) when the program is freed after;
1154  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
1155  * For case 2), we need to free both the RO memory and the RW buffer.
1156  *
1157  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1158  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1159  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1160  * bpf_arch_text_copy (when jit fails).
1161  */
1162 void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
1163                               struct bpf_binary_header *rw_header)
1164 {
1165         u32 size = ro_header->size;
1166
1167         bpf_prog_pack_free(ro_header);
1168         kvfree(rw_header);
1169         bpf_jit_uncharge_modmem(size);
1170 }
1171
1172 static inline struct bpf_binary_header *
1173 bpf_jit_binary_hdr(const struct bpf_prog *fp)
1174 {
1175         unsigned long real_start = (unsigned long)fp->bpf_func;
1176         unsigned long addr;
1177
1178         if (fp->aux->use_bpf_prog_pack)
1179                 addr = real_start & BPF_PROG_CHUNK_MASK;
1180         else
1181                 addr = real_start & PAGE_MASK;
1182
1183         return (void *)addr;
1184 }
1185
1186 /* This symbol is only overridden by archs that have different
1187  * requirements than the usual eBPF JITs, f.e. when they only
1188  * implement cBPF JIT, do not set images read-only, etc.
1189  */
1190 void __weak bpf_jit_free(struct bpf_prog *fp)
1191 {
1192         if (fp->jited) {
1193                 struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
1194
1195                 if (fp->aux->use_bpf_prog_pack)
1196                         bpf_jit_binary_pack_free(hdr, NULL /* rw_buffer */);
1197                 else
1198                         bpf_jit_binary_free(hdr);
1199
1200                 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
1201         }
1202
1203         bpf_prog_unlock_free(fp);
1204 }
1205
1206 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1207                           const struct bpf_insn *insn, bool extra_pass,
1208                           u64 *func_addr, bool *func_addr_fixed)
1209 {
1210         s16 off = insn->off;
1211         s32 imm = insn->imm;
1212         u8 *addr;
1213
1214         *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1215         if (!*func_addr_fixed) {
1216                 /* Place-holder address till the last pass has collected
1217                  * all addresses for JITed subprograms in which case we
1218                  * can pick them up from prog->aux.
1219                  */
1220                 if (!extra_pass)
1221                         addr = NULL;
1222                 else if (prog->aux->func &&
1223                          off >= 0 && off < prog->aux->func_cnt)
1224                         addr = (u8 *)prog->aux->func[off]->bpf_func;
1225                 else
1226                         return -EINVAL;
1227         } else {
1228                 /* Address of a BPF helper call. Since part of the core
1229                  * kernel, it's always at a fixed location. __bpf_call_base
1230                  * and the helper with imm relative to it are both in core
1231                  * kernel.
1232                  */
1233                 addr = (u8 *)__bpf_call_base + imm;
1234         }
1235
1236         *func_addr = (unsigned long)addr;
1237         return 0;
1238 }
1239
1240 static int bpf_jit_blind_insn(const struct bpf_insn *from,
1241                               const struct bpf_insn *aux,
1242                               struct bpf_insn *to_buff,
1243                               bool emit_zext)
1244 {
1245         struct bpf_insn *to = to_buff;
1246         u32 imm_rnd = get_random_int();
1247         s16 off;
1248
1249         BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
1250         BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
1251
1252         /* Constraints on AX register:
1253          *
1254          * AX register is inaccessible from user space. It is mapped in
1255          * all JITs, and used here for constant blinding rewrites. It is
1256          * typically "stateless" meaning its contents are only valid within
1257          * the executed instruction, but not across several instructions.
1258          * There are a few exceptions however which are further detailed
1259          * below.
1260          *
1261          * Constant blinding is only used by JITs, not in the interpreter.
1262          * The interpreter uses AX in some occasions as a local temporary
1263          * register e.g. in DIV or MOD instructions.
1264          *
1265          * In restricted circumstances, the verifier can also use the AX
1266          * register for rewrites as long as they do not interfere with
1267          * the above cases!
1268          */
1269         if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
1270                 goto out;
1271
1272         if (from->imm == 0 &&
1273             (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
1274              from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
1275                 *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
1276                 goto out;
1277         }
1278
1279         switch (from->code) {
1280         case BPF_ALU | BPF_ADD | BPF_K:
1281         case BPF_ALU | BPF_SUB | BPF_K:
1282         case BPF_ALU | BPF_AND | BPF_K:
1283         case BPF_ALU | BPF_OR  | BPF_K:
1284         case BPF_ALU | BPF_XOR | BPF_K:
1285         case BPF_ALU | BPF_MUL | BPF_K:
1286         case BPF_ALU | BPF_MOV | BPF_K:
1287         case BPF_ALU | BPF_DIV | BPF_K:
1288         case BPF_ALU | BPF_MOD | BPF_K:
1289                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1290                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1291                 *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
1292                 break;
1293
1294         case BPF_ALU64 | BPF_ADD | BPF_K:
1295         case BPF_ALU64 | BPF_SUB | BPF_K:
1296         case BPF_ALU64 | BPF_AND | BPF_K:
1297         case BPF_ALU64 | BPF_OR  | BPF_K:
1298         case BPF_ALU64 | BPF_XOR | BPF_K:
1299         case BPF_ALU64 | BPF_MUL | BPF_K:
1300         case BPF_ALU64 | BPF_MOV | BPF_K:
1301         case BPF_ALU64 | BPF_DIV | BPF_K:
1302         case BPF_ALU64 | BPF_MOD | BPF_K:
1303                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1304                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1305                 *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
1306                 break;
1307
1308         case BPF_JMP | BPF_JEQ  | BPF_K:
1309         case BPF_JMP | BPF_JNE  | BPF_K:
1310         case BPF_JMP | BPF_JGT  | BPF_K:
1311         case BPF_JMP | BPF_JLT  | BPF_K:
1312         case BPF_JMP | BPF_JGE  | BPF_K:
1313         case BPF_JMP | BPF_JLE  | BPF_K:
1314         case BPF_JMP | BPF_JSGT | BPF_K:
1315         case BPF_JMP | BPF_JSLT | BPF_K:
1316         case BPF_JMP | BPF_JSGE | BPF_K:
1317         case BPF_JMP | BPF_JSLE | BPF_K:
1318         case BPF_JMP | BPF_JSET | BPF_K:
1319                 /* Accommodate for extra offset in case of a backjump. */
1320                 off = from->off;
1321                 if (off < 0)
1322                         off -= 2;
1323                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1324                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1325                 *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
1326                 break;
1327
1328         case BPF_JMP32 | BPF_JEQ  | BPF_K:
1329         case BPF_JMP32 | BPF_JNE  | BPF_K:
1330         case BPF_JMP32 | BPF_JGT  | BPF_K:
1331         case BPF_JMP32 | BPF_JLT  | BPF_K:
1332         case BPF_JMP32 | BPF_JGE  | BPF_K:
1333         case BPF_JMP32 | BPF_JLE  | BPF_K:
1334         case BPF_JMP32 | BPF_JSGT | BPF_K:
1335         case BPF_JMP32 | BPF_JSLT | BPF_K:
1336         case BPF_JMP32 | BPF_JSGE | BPF_K:
1337         case BPF_JMP32 | BPF_JSLE | BPF_K:
1338         case BPF_JMP32 | BPF_JSET | BPF_K:
1339                 /* Accommodate for extra offset in case of a backjump. */
1340                 off = from->off;
1341                 if (off < 0)
1342                         off -= 2;
1343                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1344                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1345                 *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1346                                       off);
1347                 break;
1348
1349         case BPF_LD | BPF_IMM | BPF_DW:
1350                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
1351                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1352                 *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
1353                 *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
1354                 break;
1355         case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
1356                 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
1357                 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1358                 if (emit_zext)
1359                         *to++ = BPF_ZEXT_REG(BPF_REG_AX);
1360                 *to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
1361                 break;
1362
1363         case BPF_ST | BPF_MEM | BPF_DW:
1364         case BPF_ST | BPF_MEM | BPF_W:
1365         case BPF_ST | BPF_MEM | BPF_H:
1366         case BPF_ST | BPF_MEM | BPF_B:
1367                 *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1368                 *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1369                 *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
1370                 break;
1371         }
1372 out:
1373         return to - to_buff;
1374 }
1375
1376 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
1377                                               gfp_t gfp_extra_flags)
1378 {
1379         gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
1380         struct bpf_prog *fp;
1381
1382         fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
1383         if (fp != NULL) {
1384                 /* aux->prog still points to the fp_other one, so
1385                  * when promoting the clone to the real program,
1386                  * this still needs to be adapted.
1387                  */
1388                 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
1389         }
1390
1391         return fp;
1392 }
1393
1394 static void bpf_prog_clone_free(struct bpf_prog *fp)
1395 {
1396         /* aux was stolen by the other clone, so we cannot free
1397          * it from this path! It will be freed eventually by the
1398          * other program on release.
1399          *
1400          * At this point, we don't need a deferred release since
1401          * clone is guaranteed to not be locked.
1402          */
1403         fp->aux = NULL;
1404         fp->stats = NULL;
1405         fp->active = NULL;
1406         __bpf_prog_free(fp);
1407 }
1408
1409 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
1410 {
1411         /* We have to repoint aux->prog to self, as we don't
1412          * know whether fp here is the clone or the original.
1413          */
1414         fp->aux->prog = fp;
1415         bpf_prog_clone_free(fp_other);
1416 }
1417
1418 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
1419 {
1420         struct bpf_insn insn_buff[16], aux[2];
1421         struct bpf_prog *clone, *tmp;
1422         int insn_delta, insn_cnt;
1423         struct bpf_insn *insn;
1424         int i, rewritten;
1425
1426         if (!prog->blinding_requested || prog->blinded)
1427                 return prog;
1428
1429         clone = bpf_prog_clone_create(prog, GFP_USER);
1430         if (!clone)
1431                 return ERR_PTR(-ENOMEM);
1432
1433         insn_cnt = clone->len;
1434         insn = clone->insnsi;
1435
1436         for (i = 0; i < insn_cnt; i++, insn++) {
1437                 /* We temporarily need to hold the original ld64 insn
1438                  * so that we can still access the first part in the
1439                  * second blinding run.
1440                  */
1441                 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
1442                     insn[1].code == 0)
1443                         memcpy(aux, insn, sizeof(aux));
1444
1445                 rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1446                                                 clone->aux->verifier_zext);
1447                 if (!rewritten)
1448                         continue;
1449
1450                 tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
1451                 if (IS_ERR(tmp)) {
1452                         /* Patching may have repointed aux->prog during
1453                          * realloc from the original one, so we need to
1454                          * fix it up here on error.
1455                          */
1456                         bpf_jit_prog_release_other(prog, clone);
1457                         return tmp;
1458                 }
1459
1460                 clone = tmp;
1461                 insn_delta = rewritten - 1;
1462
1463                 /* Walk new program and skip insns we just inserted. */
1464                 insn = clone->insnsi + i + insn_delta;
1465                 insn_cnt += insn_delta;
1466                 i        += insn_delta;
1467         }
1468
1469         clone->blinded = 1;
1470         return clone;
1471 }
1472 #endif /* CONFIG_BPF_JIT */
1473
1474 /* Base function for offset calculation. Needs to go into .text section,
1475  * therefore keeping it non-static as well; will also be used by JITs
1476  * anyway later on, so do not let the compiler omit it. This also needs
1477  * to go into kallsyms for correlation from e.g. bpftool, so naming
1478  * must not change.
1479  */
1480 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1481 {
1482         return 0;
1483 }
1484 EXPORT_SYMBOL_GPL(__bpf_call_base);
1485
1486 /* All UAPI available opcodes. */
1487 #define BPF_INSN_MAP(INSN_2, INSN_3)            \
1488         /* 32 bit ALU operations. */            \
1489         /*   Register based. */                 \
1490         INSN_3(ALU, ADD,  X),                   \
1491         INSN_3(ALU, SUB,  X),                   \
1492         INSN_3(ALU, AND,  X),                   \
1493         INSN_3(ALU, OR,   X),                   \
1494         INSN_3(ALU, LSH,  X),                   \
1495         INSN_3(ALU, RSH,  X),                   \
1496         INSN_3(ALU, XOR,  X),                   \
1497         INSN_3(ALU, MUL,  X),                   \
1498         INSN_3(ALU, MOV,  X),                   \
1499         INSN_3(ALU, ARSH, X),                   \
1500         INSN_3(ALU, DIV,  X),                   \
1501         INSN_3(ALU, MOD,  X),                   \
1502         INSN_2(ALU, NEG),                       \
1503         INSN_3(ALU, END, TO_BE),                \
1504         INSN_3(ALU, END, TO_LE),                \
1505         /*   Immediate based. */                \
1506         INSN_3(ALU, ADD,  K),                   \
1507         INSN_3(ALU, SUB,  K),                   \
1508         INSN_3(ALU, AND,  K),                   \
1509         INSN_3(ALU, OR,   K),                   \
1510         INSN_3(ALU, LSH,  K),                   \
1511         INSN_3(ALU, RSH,  K),                   \
1512         INSN_3(ALU, XOR,  K),                   \
1513         INSN_3(ALU, MUL,  K),                   \
1514         INSN_3(ALU, MOV,  K),                   \
1515         INSN_3(ALU, ARSH, K),                   \
1516         INSN_3(ALU, DIV,  K),                   \
1517         INSN_3(ALU, MOD,  K),                   \
1518         /* 64 bit ALU operations. */            \
1519         /*   Register based. */                 \
1520         INSN_3(ALU64, ADD,  X),                 \
1521         INSN_3(ALU64, SUB,  X),                 \
1522         INSN_3(ALU64, AND,  X),                 \
1523         INSN_3(ALU64, OR,   X),                 \
1524         INSN_3(ALU64, LSH,  X),                 \
1525         INSN_3(ALU64, RSH,  X),                 \
1526         INSN_3(ALU64, XOR,  X),                 \
1527         INSN_3(ALU64, MUL,  X),                 \
1528         INSN_3(ALU64, MOV,  X),                 \
1529         INSN_3(ALU64, ARSH, X),                 \
1530         INSN_3(ALU64, DIV,  X),                 \
1531         INSN_3(ALU64, MOD,  X),                 \
1532         INSN_2(ALU64, NEG),                     \
1533         /*   Immediate based. */                \
1534         INSN_3(ALU64, ADD,  K),                 \
1535         INSN_3(ALU64, SUB,  K),                 \
1536         INSN_3(ALU64, AND,  K),                 \
1537         INSN_3(ALU64, OR,   K),                 \
1538         INSN_3(ALU64, LSH,  K),                 \
1539         INSN_3(ALU64, RSH,  K),                 \
1540         INSN_3(ALU64, XOR,  K),                 \
1541         INSN_3(ALU64, MUL,  K),                 \
1542         INSN_3(ALU64, MOV,  K),                 \
1543         INSN_3(ALU64, ARSH, K),                 \
1544         INSN_3(ALU64, DIV,  K),                 \
1545         INSN_3(ALU64, MOD,  K),                 \
1546         /* Call instruction. */                 \
1547         INSN_2(JMP, CALL),                      \
1548         /* Exit instruction. */                 \
1549         INSN_2(JMP, EXIT),                      \
1550         /* 32-bit Jump instructions. */         \
1551         /*   Register based. */                 \
1552         INSN_3(JMP32, JEQ,  X),                 \
1553         INSN_3(JMP32, JNE,  X),                 \
1554         INSN_3(JMP32, JGT,  X),                 \
1555         INSN_3(JMP32, JLT,  X),                 \
1556         INSN_3(JMP32, JGE,  X),                 \
1557         INSN_3(JMP32, JLE,  X),                 \
1558         INSN_3(JMP32, JSGT, X),                 \
1559         INSN_3(JMP32, JSLT, X),                 \
1560         INSN_3(JMP32, JSGE, X),                 \
1561         INSN_3(JMP32, JSLE, X),                 \
1562         INSN_3(JMP32, JSET, X),                 \
1563         /*   Immediate based. */                \
1564         INSN_3(JMP32, JEQ,  K),                 \
1565         INSN_3(JMP32, JNE,  K),                 \
1566         INSN_3(JMP32, JGT,  K),                 \
1567         INSN_3(JMP32, JLT,  K),                 \
1568         INSN_3(JMP32, JGE,  K),                 \
1569         INSN_3(JMP32, JLE,  K),                 \
1570         INSN_3(JMP32, JSGT, K),                 \
1571         INSN_3(JMP32, JSLT, K),                 \
1572         INSN_3(JMP32, JSGE, K),                 \
1573         INSN_3(JMP32, JSLE, K),                 \
1574         INSN_3(JMP32, JSET, K),                 \
1575         /* Jump instructions. */                \
1576         /*   Register based. */                 \
1577         INSN_3(JMP, JEQ,  X),                   \
1578         INSN_3(JMP, JNE,  X),                   \
1579         INSN_3(JMP, JGT,  X),                   \
1580         INSN_3(JMP, JLT,  X),                   \
1581         INSN_3(JMP, JGE,  X),                   \
1582         INSN_3(JMP, JLE,  X),                   \
1583         INSN_3(JMP, JSGT, X),                   \
1584         INSN_3(JMP, JSLT, X),                   \
1585         INSN_3(JMP, JSGE, X),                   \
1586         INSN_3(JMP, JSLE, X),                   \
1587         INSN_3(JMP, JSET, X),                   \
1588         /*   Immediate based. */                \
1589         INSN_3(JMP, JEQ,  K),                   \
1590         INSN_3(JMP, JNE,  K),                   \
1591         INSN_3(JMP, JGT,  K),                   \
1592         INSN_3(JMP, JLT,  K),                   \
1593         INSN_3(JMP, JGE,  K),                   \
1594         INSN_3(JMP, JLE,  K),                   \
1595         INSN_3(JMP, JSGT, K),                   \
1596         INSN_3(JMP, JSLT, K),                   \
1597         INSN_3(JMP, JSGE, K),                   \
1598         INSN_3(JMP, JSLE, K),                   \
1599         INSN_3(JMP, JSET, K),                   \
1600         INSN_2(JMP, JA),                        \
1601         /* Store instructions. */               \
1602         /*   Register based. */                 \
1603         INSN_3(STX, MEM,  B),                   \
1604         INSN_3(STX, MEM,  H),                   \
1605         INSN_3(STX, MEM,  W),                   \
1606         INSN_3(STX, MEM,  DW),                  \
1607         INSN_3(STX, ATOMIC, W),                 \
1608         INSN_3(STX, ATOMIC, DW),                \
1609         /*   Immediate based. */                \
1610         INSN_3(ST, MEM, B),                     \
1611         INSN_3(ST, MEM, H),                     \
1612         INSN_3(ST, MEM, W),                     \
1613         INSN_3(ST, MEM, DW),                    \
1614         /* Load instructions. */                \
1615         /*   Register based. */                 \
1616         INSN_3(LDX, MEM, B),                    \
1617         INSN_3(LDX, MEM, H),                    \
1618         INSN_3(LDX, MEM, W),                    \
1619         INSN_3(LDX, MEM, DW),                   \
1620         /*   Immediate based. */                \
1621         INSN_3(LD, IMM, DW)
1622
1623 bool bpf_opcode_in_insntable(u8 code)
1624 {
1625 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
1626 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
1627         static const bool public_insntable[256] = {
1628                 [0 ... 255] = false,
1629                 /* Now overwrite non-defaults ... */
1630                 BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1631                 /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1632                 [BPF_LD | BPF_ABS | BPF_B] = true,
1633                 [BPF_LD | BPF_ABS | BPF_H] = true,
1634                 [BPF_LD | BPF_ABS | BPF_W] = true,
1635                 [BPF_LD | BPF_IND | BPF_B] = true,
1636                 [BPF_LD | BPF_IND | BPF_H] = true,
1637                 [BPF_LD | BPF_IND | BPF_W] = true,
1638         };
1639 #undef BPF_INSN_3_TBL
1640 #undef BPF_INSN_2_TBL
1641         return public_insntable[code];
1642 }
1643
1644 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1645 u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
1646 {
1647         memset(dst, 0, size);
1648         return -EFAULT;
1649 }
1650
1651 /**
1652  *      ___bpf_prog_run - run eBPF program on a given context
1653  *      @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
1654  *      @insn: is the array of eBPF instructions
1655  *
1656  * Decode and execute eBPF instructions.
1657  *
1658  * Return: whatever value is in %BPF_R0 at program exit
1659  */
1660 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1661 {
1662 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
1663 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1664         static const void * const jumptable[256] __annotate_jump_table = {
1665                 [0 ... 255] = &&default_label,
1666                 /* Now overwrite non-defaults ... */
1667                 BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
1668                 /* Non-UAPI available opcodes. */
1669                 [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
1670                 [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1671                 [BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
1672                 [BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
1673                 [BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
1674                 [BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
1675                 [BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
1676         };
1677 #undef BPF_INSN_3_LBL
1678 #undef BPF_INSN_2_LBL
1679         u32 tail_call_cnt = 0;
1680
1681 #define CONT     ({ insn++; goto select_insn; })
1682 #define CONT_JMP ({ insn++; goto select_insn; })
1683
1684 select_insn:
1685         goto *jumptable[insn->code];
1686
1687         /* Explicitly mask the register-based shift amounts with 63 or 31
1688          * to avoid undefined behavior. Normally this won't affect the
1689          * generated code, for example, in case of native 64 bit archs such
1690          * as x86-64 or arm64, the compiler is optimizing the AND away for
1691          * the interpreter. In case of JITs, each of the JIT backends compiles
1692          * the BPF shift operations to machine instructions which produce
1693          * implementation-defined results in such a case; the resulting
1694          * contents of the register may be arbitrary, but program behaviour
1695          * as a whole remains defined. In other words, in case of JIT backends,
1696          * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
1697          */
1698         /* ALU (shifts) */
1699 #define SHT(OPCODE, OP)                                 \
1700         ALU64_##OPCODE##_X:                             \
1701                 DST = DST OP (SRC & 63);                \
1702                 CONT;                                   \
1703         ALU_##OPCODE##_X:                               \
1704                 DST = (u32) DST OP ((u32) SRC & 31);    \
1705                 CONT;                                   \
1706         ALU64_##OPCODE##_K:                             \
1707                 DST = DST OP IMM;                       \
1708                 CONT;                                   \
1709         ALU_##OPCODE##_K:                               \
1710                 DST = (u32) DST OP (u32) IMM;           \
1711                 CONT;
1712         /* ALU (rest) */
1713 #define ALU(OPCODE, OP)                                 \
1714         ALU64_##OPCODE##_X:                             \
1715                 DST = DST OP SRC;                       \
1716                 CONT;                                   \
1717         ALU_##OPCODE##_X:                               \
1718                 DST = (u32) DST OP (u32) SRC;           \
1719                 CONT;                                   \
1720         ALU64_##OPCODE##_K:                             \
1721                 DST = DST OP IMM;                       \
1722                 CONT;                                   \
1723         ALU_##OPCODE##_K:                               \
1724                 DST = (u32) DST OP (u32) IMM;           \
1725                 CONT;
1726         ALU(ADD,  +)
1727         ALU(SUB,  -)
1728         ALU(AND,  &)
1729         ALU(OR,   |)
1730         ALU(XOR,  ^)
1731         ALU(MUL,  *)
1732         SHT(LSH, <<)
1733         SHT(RSH, >>)
1734 #undef SHT
1735 #undef ALU
1736         ALU_NEG:
1737                 DST = (u32) -DST;
1738                 CONT;
1739         ALU64_NEG:
1740                 DST = -DST;
1741                 CONT;
1742         ALU_MOV_X:
1743                 DST = (u32) SRC;
1744                 CONT;
1745         ALU_MOV_K:
1746                 DST = (u32) IMM;
1747                 CONT;
1748         ALU64_MOV_X:
1749                 DST = SRC;
1750                 CONT;
1751         ALU64_MOV_K:
1752                 DST = IMM;
1753                 CONT;
1754         LD_IMM_DW:
1755                 DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
1756                 insn++;
1757                 CONT;
1758         ALU_ARSH_X:
1759                 DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
1760                 CONT;
1761         ALU_ARSH_K:
1762                 DST = (u64) (u32) (((s32) DST) >> IMM);
1763                 CONT;
1764         ALU64_ARSH_X:
1765                 (*(s64 *) &DST) >>= (SRC & 63);
1766                 CONT;
1767         ALU64_ARSH_K:
1768                 (*(s64 *) &DST) >>= IMM;
1769                 CONT;
1770         ALU64_MOD_X:
1771                 div64_u64_rem(DST, SRC, &AX);
1772                 DST = AX;
1773                 CONT;
1774         ALU_MOD_X:
1775                 AX = (u32) DST;
1776                 DST = do_div(AX, (u32) SRC);
1777                 CONT;
1778         ALU64_MOD_K:
1779                 div64_u64_rem(DST, IMM, &AX);
1780                 DST = AX;
1781                 CONT;
1782         ALU_MOD_K:
1783                 AX = (u32) DST;
1784                 DST = do_div(AX, (u32) IMM);
1785                 CONT;
1786         ALU64_DIV_X:
1787                 DST = div64_u64(DST, SRC);
1788                 CONT;
1789         ALU_DIV_X:
1790                 AX = (u32) DST;
1791                 do_div(AX, (u32) SRC);
1792                 DST = (u32) AX;
1793                 CONT;
1794         ALU64_DIV_K:
1795                 DST = div64_u64(DST, IMM);
1796                 CONT;
1797         ALU_DIV_K:
1798                 AX = (u32) DST;
1799                 do_div(AX, (u32) IMM);
1800                 DST = (u32) AX;
1801                 CONT;
1802         ALU_END_TO_BE:
1803                 switch (IMM) {
1804                 case 16:
1805                         DST = (__force u16) cpu_to_be16(DST);
1806                         break;
1807                 case 32:
1808                         DST = (__force u32) cpu_to_be32(DST);
1809                         break;
1810                 case 64:
1811                         DST = (__force u64) cpu_to_be64(DST);
1812                         break;
1813                 }
1814                 CONT;
1815         ALU_END_TO_LE:
1816                 switch (IMM) {
1817                 case 16:
1818                         DST = (__force u16) cpu_to_le16(DST);
1819                         break;
1820                 case 32:
1821                         DST = (__force u32) cpu_to_le32(DST);
1822                         break;
1823                 case 64:
1824                         DST = (__force u64) cpu_to_le64(DST);
1825                         break;
1826                 }
1827                 CONT;
1828
1829         /* CALL */
1830         JMP_CALL:
1831                 /* Function call scratches BPF_R1-BPF_R5 registers,
1832                  * preserves BPF_R6-BPF_R9, and stores return value
1833                  * into BPF_R0.
1834                  */
1835                 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
1836                                                        BPF_R4, BPF_R5);
1837                 CONT;
1838
1839         JMP_CALL_ARGS:
1840                 BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
1841                                                             BPF_R3, BPF_R4,
1842                                                             BPF_R5,
1843                                                             insn + insn->off + 1);
1844                 CONT;
1845
1846         JMP_TAIL_CALL: {
1847                 struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1848                 struct bpf_array *array = container_of(map, struct bpf_array, map);
1849                 struct bpf_prog *prog;
1850                 u32 index = BPF_R3;
1851
1852                 if (unlikely(index >= array->map.max_entries))
1853                         goto out;
1854
1855                 if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
1856                         goto out;
1857
1858                 tail_call_cnt++;
1859
1860                 prog = READ_ONCE(array->ptrs[index]);
1861                 if (!prog)
1862                         goto out;
1863
1864                 /* ARG1 at this point is guaranteed to point to CTX from
1865                  * the verifier side due to the fact that the tail call is
1866                  * handled like a helper, that is, bpf_tail_call_proto,
1867                  * where arg1_type is ARG_PTR_TO_CTX.
1868                  */
1869                 insn = prog->insnsi;
1870                 goto select_insn;
1871 out:
1872                 CONT;
1873         }
1874         JMP_JA:
1875                 insn += insn->off;
1876                 CONT;
1877         JMP_EXIT:
1878                 return BPF_R0;
1879         /* JMP */
1880 #define COND_JMP(SIGN, OPCODE, CMP_OP)                          \
1881         JMP_##OPCODE##_X:                                       \
1882                 if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {     \
1883                         insn += insn->off;                      \
1884                         CONT_JMP;                               \
1885                 }                                               \
1886                 CONT;                                           \
1887         JMP32_##OPCODE##_X:                                     \
1888                 if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {     \
1889                         insn += insn->off;                      \
1890                         CONT_JMP;                               \
1891                 }                                               \
1892                 CONT;                                           \
1893         JMP_##OPCODE##_K:                                       \
1894                 if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {     \
1895                         insn += insn->off;                      \
1896                         CONT_JMP;                               \
1897                 }                                               \
1898                 CONT;                                           \
1899         JMP32_##OPCODE##_K:                                     \
1900                 if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {     \
1901                         insn += insn->off;                      \
1902                         CONT_JMP;                               \
1903                 }                                               \
1904                 CONT;
1905         COND_JMP(u, JEQ, ==)
1906         COND_JMP(u, JNE, !=)
1907         COND_JMP(u, JGT, >)
1908         COND_JMP(u, JLT, <)
1909         COND_JMP(u, JGE, >=)
1910         COND_JMP(u, JLE, <=)
1911         COND_JMP(u, JSET, &)
1912         COND_JMP(s, JSGT, >)
1913         COND_JMP(s, JSLT, <)
1914         COND_JMP(s, JSGE, >=)
1915         COND_JMP(s, JSLE, <=)
1916 #undef COND_JMP
1917         /* ST, STX and LDX*/
1918         ST_NOSPEC:
1919                 /* Speculation barrier for mitigating Speculative Store Bypass.
1920                  * In case of arm64, we rely on the firmware mitigation as
1921                  * controlled via the ssbd kernel parameter. Whenever the
1922                  * mitigation is enabled, it works for all of the kernel code
1923                  * with no need to provide any additional instructions here.
1924                  * In case of x86, we use 'lfence' insn for mitigation. We
1925                  * reuse preexisting logic from Spectre v1 mitigation that
1926                  * happens to produce the required code on x86 for v4 as well.
1927                  */
1928 #ifdef CONFIG_X86
1929                 barrier_nospec();
1930 #endif
1931                 CONT;
1932 #define LDST(SIZEOP, SIZE)                                              \
1933         STX_MEM_##SIZEOP:                                               \
1934                 *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
1935                 CONT;                                                   \
1936         ST_MEM_##SIZEOP:                                                \
1937                 *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
1938                 CONT;                                                   \
1939         LDX_MEM_##SIZEOP:                                               \
1940                 DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
1941                 CONT;
1942
1943         LDST(B,   u8)
1944         LDST(H,  u16)
1945         LDST(W,  u32)
1946         LDST(DW, u64)
1947 #undef LDST
1948 #define LDX_PROBE(SIZEOP, SIZE)                                                 \
1949         LDX_PROBE_MEM_##SIZEOP:                                                 \
1950                 bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) (SRC + insn->off));      \
1951                 CONT;
1952         LDX_PROBE(B,  1)
1953         LDX_PROBE(H,  2)
1954         LDX_PROBE(W,  4)
1955         LDX_PROBE(DW, 8)
1956 #undef LDX_PROBE
1957
1958 #define ATOMIC_ALU_OP(BOP, KOP)                                         \
1959                 case BOP:                                               \
1960                         if (BPF_SIZE(insn->code) == BPF_W)              \
1961                                 atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
1962                                              (DST + insn->off));        \
1963                         else                                            \
1964                                 atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
1965                                                (DST + insn->off));      \
1966                         break;                                          \
1967                 case BOP | BPF_FETCH:                                   \
1968                         if (BPF_SIZE(insn->code) == BPF_W)              \
1969                                 SRC = (u32) atomic_fetch_##KOP(         \
1970                                         (u32) SRC,                      \
1971                                         (atomic_t *)(unsigned long) (DST + insn->off)); \
1972                         else                                            \
1973                                 SRC = (u64) atomic64_fetch_##KOP(       \
1974                                         (u64) SRC,                      \
1975                                         (atomic64_t *)(unsigned long) (DST + insn->off)); \
1976                         break;
1977
1978         STX_ATOMIC_DW:
1979         STX_ATOMIC_W:
1980                 switch (IMM) {
1981                 ATOMIC_ALU_OP(BPF_ADD, add)
1982                 ATOMIC_ALU_OP(BPF_AND, and)
1983                 ATOMIC_ALU_OP(BPF_OR, or)
1984                 ATOMIC_ALU_OP(BPF_XOR, xor)
1985 #undef ATOMIC_ALU_OP
1986
1987                 case BPF_XCHG:
1988                         if (BPF_SIZE(insn->code) == BPF_W)
1989                                 SRC = (u32) atomic_xchg(
1990                                         (atomic_t *)(unsigned long) (DST + insn->off),
1991                                         (u32) SRC);
1992                         else
1993                                 SRC = (u64) atomic64_xchg(
1994                                         (atomic64_t *)(unsigned long) (DST + insn->off),
1995                                         (u64) SRC);
1996                         break;
1997                 case BPF_CMPXCHG:
1998                         if (BPF_SIZE(insn->code) == BPF_W)
1999                                 BPF_R0 = (u32) atomic_cmpxchg(
2000                                         (atomic_t *)(unsigned long) (DST + insn->off),
2001                                         (u32) BPF_R0, (u32) SRC);
2002                         else
2003                                 BPF_R0 = (u64) atomic64_cmpxchg(
2004                                         (atomic64_t *)(unsigned long) (DST + insn->off),
2005                                         (u64) BPF_R0, (u64) SRC);
2006                         break;
2007
2008                 default:
2009                         goto default_label;
2010                 }
2011                 CONT;
2012
2013         default_label:
2014                 /* If we ever reach this, we have a bug somewhere. Die hard here
2015                  * instead of just returning 0; we could be somewhere in a subprog,
2016                  * so execution could continue otherwise which we do /not/ want.
2017                  *
2018                  * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
2019                  */
2020                 pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
2021                         insn->code, insn->imm);
2022                 BUG_ON(1);
2023                 return 0;
2024 }
2025
2026 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2027 #define DEFINE_BPF_PROG_RUN(stack_size) \
2028 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2029 { \
2030         u64 stack[stack_size / sizeof(u64)]; \
2031         u64 regs[MAX_BPF_EXT_REG]; \
2032 \
2033         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2034         ARG1 = (u64) (unsigned long) ctx; \
2035         return ___bpf_prog_run(regs, insn); \
2036 }
2037
2038 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
2039 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
2040 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
2041                                       const struct bpf_insn *insn) \
2042 { \
2043         u64 stack[stack_size / sizeof(u64)]; \
2044         u64 regs[MAX_BPF_EXT_REG]; \
2045 \
2046         FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2047         BPF_R1 = r1; \
2048         BPF_R2 = r2; \
2049         BPF_R3 = r3; \
2050         BPF_R4 = r4; \
2051         BPF_R5 = r5; \
2052         return ___bpf_prog_run(regs, insn); \
2053 }
2054
2055 #define EVAL1(FN, X) FN(X)
2056 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2057 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2058 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2059 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2060 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2061
2062 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2063 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2064 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2065
2066 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
2067 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
2068 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
2069
2070 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2071
2072 static unsigned int (*interpreters[])(const void *ctx,
2073                                       const struct bpf_insn *insn) = {
2074 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2075 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2076 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2077 };
2078 #undef PROG_NAME_LIST
2079 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2080 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
2081                                   const struct bpf_insn *insn) = {
2082 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2083 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2084 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2085 };
2086 #undef PROG_NAME_LIST
2087
2088 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
2089 {
2090         stack_depth = max_t(u32, stack_depth, 1);
2091         insn->off = (s16) insn->imm;
2092         insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
2093                 __bpf_call_base_args;
2094         insn->code = BPF_JMP | BPF_CALL_ARGS;
2095 }
2096
2097 #else
2098 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2099                                          const struct bpf_insn *insn)
2100 {
2101         /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
2102          * is not working properly, so warn about it!
2103          */
2104         WARN_ON_ONCE(1);
2105         return 0;
2106 }
2107 #endif
2108
2109 bool bpf_prog_map_compatible(struct bpf_map *map,
2110                              const struct bpf_prog *fp)
2111 {
2112         bool ret;
2113
2114         if (fp->kprobe_override)
2115                 return false;
2116
2117         spin_lock(&map->owner.lock);
2118         if (!map->owner.type) {
2119                 /* There's no owner yet where we could check for
2120                  * compatibility.
2121                  */
2122                 map->owner.type  = fp->type;
2123                 map->owner.jited = fp->jited;
2124                 map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
2125                 ret = true;
2126         } else {
2127                 ret = map->owner.type  == fp->type &&
2128                       map->owner.jited == fp->jited &&
2129                       map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
2130         }
2131         spin_unlock(&map->owner.lock);
2132
2133         return ret;
2134 }
2135
2136 static int bpf_check_tail_call(const struct bpf_prog *fp)
2137 {
2138         struct bpf_prog_aux *aux = fp->aux;
2139         int i, ret = 0;
2140
2141         mutex_lock(&aux->used_maps_mutex);
2142         for (i = 0; i < aux->used_map_cnt; i++) {
2143                 struct bpf_map *map = aux->used_maps[i];
2144
2145                 if (!map_type_contains_progs(map))
2146                         continue;
2147
2148                 if (!bpf_prog_map_compatible(map, fp)) {
2149                         ret = -EINVAL;
2150                         goto out;
2151                 }
2152         }
2153
2154 out:
2155         mutex_unlock(&aux->used_maps_mutex);
2156         return ret;
2157 }
2158
2159 static void bpf_prog_select_func(struct bpf_prog *fp)
2160 {
2161 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2162         u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
2163
2164         fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
2165 #else
2166         fp->bpf_func = __bpf_prog_ret0_warn;
2167 #endif
2168 }
2169
2170 /**
2171  *      bpf_prog_select_runtime - select exec runtime for BPF program
2172  *      @fp: bpf_prog populated with BPF program
2173  *      @err: pointer to error variable
2174  *
2175  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2176  * The BPF program will be executed via bpf_prog_run() function.
2177  *
2178  * Return: the &fp argument along with &err set to 0 for success or
2179  * a negative errno code on failure
2180  */
2181 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2182 {
2183         /* In case of BPF to BPF calls, verifier did all the prep
2184          * work with regards to JITing, etc.
2185          */
2186         bool jit_needed = false;
2187
2188         if (fp->bpf_func)
2189                 goto finalize;
2190
2191         if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2192             bpf_prog_has_kfunc_call(fp))
2193                 jit_needed = true;
2194
2195         bpf_prog_select_func(fp);
2196
2197         /* eBPF JITs can rewrite the program in case constant
2198          * blinding is active. However, in case of error during
2199          * blinding, bpf_int_jit_compile() must always return a
2200          * valid program, which in this case would simply not
2201          * be JITed, but falls back to the interpreter.
2202          */
2203         if (!bpf_prog_is_dev_bound(fp->aux)) {
2204                 *err = bpf_prog_alloc_jited_linfo(fp);
2205                 if (*err)
2206                         return fp;
2207
2208                 fp = bpf_int_jit_compile(fp);
2209                 bpf_prog_jit_attempt_done(fp);
2210                 if (!fp->jited && jit_needed) {
2211                         *err = -ENOTSUPP;
2212                         return fp;
2213                 }
2214         } else {
2215                 *err = bpf_prog_offload_compile(fp);
2216                 if (*err)
2217                         return fp;
2218         }
2219
2220 finalize:
2221         bpf_prog_lock_ro(fp);
2222
2223         /* The tail call compatibility check can only be done at
2224          * this late stage as we need to determine, if we deal
2225          * with JITed or non JITed program concatenations and not
2226          * all eBPF JITs might immediately support all features.
2227          */
2228         *err = bpf_check_tail_call(fp);
2229
2230         return fp;
2231 }
2232 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2233
2234 static unsigned int __bpf_prog_ret1(const void *ctx,
2235                                     const struct bpf_insn *insn)
2236 {
2237         return 1;
2238 }
2239
2240 static struct bpf_prog_dummy {
2241         struct bpf_prog prog;
2242 } dummy_bpf_prog = {
2243         .prog = {
2244                 .bpf_func = __bpf_prog_ret1,
2245         },
2246 };
2247
2248 struct bpf_empty_prog_array bpf_empty_prog_array = {
2249         .null_prog = NULL,
2250 };
2251 EXPORT_SYMBOL(bpf_empty_prog_array);
2252
2253 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2254 {
2255         if (prog_cnt)
2256                 return kzalloc(sizeof(struct bpf_prog_array) +
2257                                sizeof(struct bpf_prog_array_item) *
2258                                (prog_cnt + 1),
2259                                flags);
2260
2261         return &bpf_empty_prog_array.hdr;
2262 }
2263
2264 void bpf_prog_array_free(struct bpf_prog_array *progs)
2265 {
2266         if (!progs || progs == &bpf_empty_prog_array.hdr)
2267                 return;
2268         kfree_rcu(progs, rcu);
2269 }
2270
2271 int bpf_prog_array_length(struct bpf_prog_array *array)
2272 {
2273         struct bpf_prog_array_item *item;
2274         u32 cnt = 0;
2275
2276         for (item = array->items; item->prog; item++)
2277                 if (item->prog != &dummy_bpf_prog.prog)
2278                         cnt++;
2279         return cnt;
2280 }
2281
2282 bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
2283 {
2284         struct bpf_prog_array_item *item;
2285
2286         for (item = array->items; item->prog; item++)
2287                 if (item->prog != &dummy_bpf_prog.prog)
2288                         return false;
2289         return true;
2290 }
2291
2292 static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
2293                                      u32 *prog_ids,
2294                                      u32 request_cnt)
2295 {
2296         struct bpf_prog_array_item *item;
2297         int i = 0;
2298
2299         for (item = array->items; item->prog; item++) {
2300                 if (item->prog == &dummy_bpf_prog.prog)
2301                         continue;
2302                 prog_ids[i] = item->prog->aux->id;
2303                 if (++i == request_cnt) {
2304                         item++;
2305                         break;
2306                 }
2307         }
2308
2309         return !!(item->prog);
2310 }
2311
2312 int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2313                                 __u32 __user *prog_ids, u32 cnt)
2314 {
2315         unsigned long err = 0;
2316         bool nospc;
2317         u32 *ids;
2318
2319         /* users of this function are doing:
2320          * cnt = bpf_prog_array_length();
2321          * if (cnt > 0)
2322          *     bpf_prog_array_copy_to_user(..., cnt);
2323          * so below kcalloc doesn't need extra cnt > 0 check.
2324          */
2325         ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
2326         if (!ids)
2327                 return -ENOMEM;
2328         nospc = bpf_prog_array_copy_core(array, ids, cnt);
2329         err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
2330         kfree(ids);
2331         if (err)
2332                 return -EFAULT;
2333         if (nospc)
2334                 return -ENOSPC;
2335         return 0;
2336 }
2337
2338 void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2339                                 struct bpf_prog *old_prog)
2340 {
2341         struct bpf_prog_array_item *item;
2342
2343         for (item = array->items; item->prog; item++)
2344                 if (item->prog == old_prog) {
2345                         WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2346                         break;
2347                 }
2348 }
2349
2350 /**
2351  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2352  *                                   index into the program array with
2353  *                                   a dummy no-op program.
2354  * @array: a bpf_prog_array
2355  * @index: the index of the program to replace
2356  *
2357  * Skips over dummy programs, by not counting them, when calculating
2358  * the position of the program to replace.
2359  *
2360  * Return:
2361  * * 0          - Success
2362  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2363  * * -ENOENT    - Index out of range
2364  */
2365 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2366 {
2367         return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2368 }
2369
2370 /**
2371  * bpf_prog_array_update_at() - Updates the program at the given index
2372  *                              into the program array.
2373  * @array: a bpf_prog_array
2374  * @index: the index of the program to update
2375  * @prog: the program to insert into the array
2376  *
2377  * Skips over dummy programs, by not counting them, when calculating
2378  * the position of the program to update.
2379  *
2380  * Return:
2381  * * 0          - Success
2382  * * -EINVAL    - Invalid index value. Must be a non-negative integer.
2383  * * -ENOENT    - Index out of range
2384  */
2385 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2386                              struct bpf_prog *prog)
2387 {
2388         struct bpf_prog_array_item *item;
2389
2390         if (unlikely(index < 0))
2391                 return -EINVAL;
2392
2393         for (item = array->items; item->prog; item++) {
2394                 if (item->prog == &dummy_bpf_prog.prog)
2395                         continue;
2396                 if (!index) {
2397                         WRITE_ONCE(item->prog, prog);
2398                         return 0;
2399                 }
2400                 index--;
2401         }
2402         return -ENOENT;
2403 }
2404
2405 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2406                         struct bpf_prog *exclude_prog,
2407                         struct bpf_prog *include_prog,
2408                         u64 bpf_cookie,
2409                         struct bpf_prog_array **new_array)
2410 {
2411         int new_prog_cnt, carry_prog_cnt = 0;
2412         struct bpf_prog_array_item *existing, *new;
2413         struct bpf_prog_array *array;
2414         bool found_exclude = false;
2415
2416         /* Figure out how many existing progs we need to carry over to
2417          * the new array.
2418          */
2419         if (old_array) {
2420                 existing = old_array->items;
2421                 for (; existing->prog; existing++) {
2422                         if (existing->prog == exclude_prog) {
2423                                 found_exclude = true;
2424                                 continue;
2425                         }
2426                         if (existing->prog != &dummy_bpf_prog.prog)
2427                                 carry_prog_cnt++;
2428                         if (existing->prog == include_prog)
2429                                 return -EEXIST;
2430                 }
2431         }
2432
2433         if (exclude_prog && !found_exclude)
2434                 return -ENOENT;
2435
2436         /* How many progs (not NULL) will be in the new array? */
2437         new_prog_cnt = carry_prog_cnt;
2438         if (include_prog)
2439                 new_prog_cnt += 1;
2440
2441         /* Do we have any prog (not NULL) in the new array? */
2442         if (!new_prog_cnt) {
2443                 *new_array = NULL;
2444                 return 0;
2445         }
2446
2447         /* +1 as the end of prog_array is marked with NULL */
2448         array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2449         if (!array)
2450                 return -ENOMEM;
2451         new = array->items;
2452
2453         /* Fill in the new prog array */
2454         if (carry_prog_cnt) {
2455                 existing = old_array->items;
2456                 for (; existing->prog; existing++) {
2457                         if (existing->prog == exclude_prog ||
2458                             existing->prog == &dummy_bpf_prog.prog)
2459                                 continue;
2460
2461                         new->prog = existing->prog;
2462                         new->bpf_cookie = existing->bpf_cookie;
2463                         new++;
2464                 }
2465         }
2466         if (include_prog) {
2467                 new->prog = include_prog;
2468                 new->bpf_cookie = bpf_cookie;
2469                 new++;
2470         }
2471         new->prog = NULL;
2472         *new_array = array;
2473         return 0;
2474 }
2475
2476 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2477                              u32 *prog_ids, u32 request_cnt,
2478                              u32 *prog_cnt)
2479 {
2480         u32 cnt = 0;
2481
2482         if (array)
2483                 cnt = bpf_prog_array_length(array);
2484
2485         *prog_cnt = cnt;
2486
2487         /* return early if user requested only program count or nothing to copy */
2488         if (!request_cnt || !cnt)
2489                 return 0;
2490
2491         /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2492         return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
2493                                                                      : 0;
2494 }
2495
2496 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2497                           struct bpf_map **used_maps, u32 len)
2498 {
2499         struct bpf_map *map;
2500         u32 i;
2501
2502         for (i = 0; i < len; i++) {
2503                 map = used_maps[i];
2504                 if (map->ops->map_poke_untrack)
2505                         map->ops->map_poke_untrack(map, aux);
2506                 bpf_map_put(map);
2507         }
2508 }
2509
2510 static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2511 {
2512         __bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
2513         kfree(aux->used_maps);
2514 }
2515
2516 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2517                           struct btf_mod_pair *used_btfs, u32 len)
2518 {
2519 #ifdef CONFIG_BPF_SYSCALL
2520         struct btf_mod_pair *btf_mod;
2521         u32 i;
2522
2523         for (i = 0; i < len; i++) {
2524                 btf_mod = &used_btfs[i];
2525                 if (btf_mod->module)
2526                         module_put(btf_mod->module);
2527                 btf_put(btf_mod->btf);
2528         }
2529 #endif
2530 }
2531
2532 static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2533 {
2534         __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt);
2535         kfree(aux->used_btfs);
2536 }
2537
2538 static void bpf_prog_free_deferred(struct work_struct *work)
2539 {
2540         struct bpf_prog_aux *aux;
2541         int i;
2542
2543         aux = container_of(work, struct bpf_prog_aux, work);
2544 #ifdef CONFIG_BPF_SYSCALL
2545         bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
2546 #endif
2547         bpf_free_used_maps(aux);
2548         bpf_free_used_btfs(aux);
2549         if (bpf_prog_is_dev_bound(aux))
2550                 bpf_prog_offload_destroy(aux->prog);
2551 #ifdef CONFIG_PERF_EVENTS
2552         if (aux->prog->has_callchain_buf)
2553                 put_callchain_buffers();
2554 #endif
2555         if (aux->dst_trampoline)
2556                 bpf_trampoline_put(aux->dst_trampoline);
2557         for (i = 0; i < aux->func_cnt; i++) {
2558                 /* We can just unlink the subprog poke descriptor table as
2559                  * it was originally linked to the main program and is also
2560                  * released along with it.
2561                  */
2562                 aux->func[i]->aux->poke_tab = NULL;
2563                 bpf_jit_free(aux->func[i]);
2564         }
2565         if (aux->func_cnt) {
2566                 kfree(aux->func);
2567                 bpf_prog_unlock_free(aux->prog);
2568         } else {
2569                 bpf_jit_free(aux->prog);
2570         }
2571 }
2572
2573 void bpf_prog_free(struct bpf_prog *fp)
2574 {
2575         struct bpf_prog_aux *aux = fp->aux;
2576
2577         if (aux->dst_prog)
2578                 bpf_prog_put(aux->dst_prog);
2579         INIT_WORK(&aux->work, bpf_prog_free_deferred);
2580         schedule_work(&aux->work);
2581 }
2582 EXPORT_SYMBOL_GPL(bpf_prog_free);
2583
2584 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
2585 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
2586
2587 void bpf_user_rnd_init_once(void)
2588 {
2589         prandom_init_once(&bpf_user_rnd_state);
2590 }
2591
2592 BPF_CALL_0(bpf_user_rnd_u32)
2593 {
2594         /* Should someone ever have the rather unwise idea to use some
2595          * of the registers passed into this function, then note that
2596          * this function is called from native eBPF and classic-to-eBPF
2597          * transformations. Register assignments from both sides are
2598          * different, f.e. classic always sets fn(ctx, A, X) here.
2599          */
2600         struct rnd_state *state;
2601         u32 res;
2602
2603         state = &get_cpu_var(bpf_user_rnd_state);
2604         res = prandom_u32_state(state);
2605         put_cpu_var(bpf_user_rnd_state);
2606
2607         return res;
2608 }
2609
2610 BPF_CALL_0(bpf_get_raw_cpu_id)
2611 {
2612         return raw_smp_processor_id();
2613 }
2614
2615 /* Weak definitions of helper functions in case we don't have bpf syscall. */
2616 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
2617 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
2618 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2619 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2620 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2621 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
2622 const struct bpf_func_proto bpf_spin_lock_proto __weak;
2623 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
2624 const struct bpf_func_proto bpf_jiffies64_proto __weak;
2625
2626 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2627 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
2628 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
2629 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
2630 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2631 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2632
2633 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2634 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2635 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2636 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
2637 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2638 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2639 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2640 const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2641 const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
2642
2643 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
2644 {
2645         return NULL;
2646 }
2647
2648 const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
2649 {
2650         return NULL;
2651 }
2652
2653 u64 __weak
2654 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2655                  void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2656 {
2657         return -ENOTSUPP;
2658 }
2659 EXPORT_SYMBOL_GPL(bpf_event_output);
2660
2661 /* Always built-in helper functions. */
2662 const struct bpf_func_proto bpf_tail_call_proto = {
2663         .func           = NULL,
2664         .gpl_only       = false,
2665         .ret_type       = RET_VOID,
2666         .arg1_type      = ARG_PTR_TO_CTX,
2667         .arg2_type      = ARG_CONST_MAP_PTR,
2668         .arg3_type      = ARG_ANYTHING,
2669 };
2670
2671 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
2672  * It is encouraged to implement bpf_int_jit_compile() instead, so that
2673  * eBPF and implicitly also cBPF can get JITed!
2674  */
2675 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
2676 {
2677         return prog;
2678 }
2679
2680 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
2681  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
2682  */
2683 void __weak bpf_jit_compile(struct bpf_prog *prog)
2684 {
2685 }
2686
2687 bool __weak bpf_helper_changes_pkt_data(void *func)
2688 {
2689         return false;
2690 }
2691
2692 /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
2693  * analysis code and wants explicit zero extension inserted by verifier.
2694  * Otherwise, return FALSE.
2695  *
2696  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
2697  * you don't override this. JITs that don't want these extra insns can detect
2698  * them using insn_is_zext.
2699  */
2700 bool __weak bpf_jit_needs_zext(void)
2701 {
2702         return false;
2703 }
2704
2705 bool __weak bpf_jit_supports_kfunc_call(void)
2706 {
2707         return false;
2708 }
2709
2710 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
2711  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
2712  */
2713 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
2714                          int len)
2715 {
2716         return -EFAULT;
2717 }
2718
2719 int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2720                               void *addr1, void *addr2)
2721 {
2722         return -ENOTSUPP;
2723 }
2724
2725 void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
2726 {
2727         return ERR_PTR(-ENOTSUPP);
2728 }
2729
2730 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
2731 EXPORT_SYMBOL(bpf_stats_enabled_key);
2732
2733 /* All definitions of tracepoints related to BPF. */
2734 #define CREATE_TRACE_POINTS
2735 #include <linux/bpf_trace.h>
2736
2737 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
2738 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
This page took 0.197543 seconds and 4 git commands to generate.