]> Git Repo - linux.git/blob - kernel/bpf/stackmap.c
Merge tag 'wireless-drivers-next-2020-07-20' of git://git.kernel.org/pub/scm/linux...
[linux.git] / kernel / bpf / stackmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
3  */
4 #include <linux/bpf.h>
5 #include <linux/jhash.h>
6 #include <linux/filter.h>
7 #include <linux/stacktrace.h>
8 #include <linux/perf_event.h>
9 #include <linux/elf.h>
10 #include <linux/pagemap.h>
11 #include <linux/irq_work.h>
12 #include <linux/btf_ids.h>
13 #include "percpu_freelist.h"
14
15 #define STACK_CREATE_FLAG_MASK                                  \
16         (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |        \
17          BPF_F_STACK_BUILD_ID)
18
19 struct stack_map_bucket {
20         struct pcpu_freelist_node fnode;
21         u32 hash;
22         u32 nr;
23         u64 data[];
24 };
25
26 struct bpf_stack_map {
27         struct bpf_map map;
28         void *elems;
29         struct pcpu_freelist freelist;
30         u32 n_buckets;
31         struct stack_map_bucket *buckets[];
32 };
33
34 /* irq_work to run up_read() for build_id lookup in nmi context */
35 struct stack_map_irq_work {
36         struct irq_work irq_work;
37         struct mm_struct *mm;
38 };
39
40 static void do_up_read(struct irq_work *entry)
41 {
42         struct stack_map_irq_work *work;
43
44         if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
45                 return;
46
47         work = container_of(entry, struct stack_map_irq_work, irq_work);
48         mmap_read_unlock_non_owner(work->mm);
49 }
50
51 static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
52
53 static inline bool stack_map_use_build_id(struct bpf_map *map)
54 {
55         return (map->map_flags & BPF_F_STACK_BUILD_ID);
56 }
57
58 static inline int stack_map_data_size(struct bpf_map *map)
59 {
60         return stack_map_use_build_id(map) ?
61                 sizeof(struct bpf_stack_build_id) : sizeof(u64);
62 }
63
64 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
65 {
66         u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
67         int err;
68
69         smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
70                                          smap->map.numa_node);
71         if (!smap->elems)
72                 return -ENOMEM;
73
74         err = pcpu_freelist_init(&smap->freelist);
75         if (err)
76                 goto free_elems;
77
78         pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
79                                smap->map.max_entries);
80         return 0;
81
82 free_elems:
83         bpf_map_area_free(smap->elems);
84         return err;
85 }
86
87 /* Called from syscall */
88 static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
89 {
90         u32 value_size = attr->value_size;
91         struct bpf_stack_map *smap;
92         struct bpf_map_memory mem;
93         u64 cost, n_buckets;
94         int err;
95
96         if (!bpf_capable())
97                 return ERR_PTR(-EPERM);
98
99         if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
100                 return ERR_PTR(-EINVAL);
101
102         /* check sanity of attributes */
103         if (attr->max_entries == 0 || attr->key_size != 4 ||
104             value_size < 8 || value_size % 8)
105                 return ERR_PTR(-EINVAL);
106
107         BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
108         if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
109                 if (value_size % sizeof(struct bpf_stack_build_id) ||
110                     value_size / sizeof(struct bpf_stack_build_id)
111                     > sysctl_perf_event_max_stack)
112                         return ERR_PTR(-EINVAL);
113         } else if (value_size / 8 > sysctl_perf_event_max_stack)
114                 return ERR_PTR(-EINVAL);
115
116         /* hash table size must be power of 2 */
117         n_buckets = roundup_pow_of_two(attr->max_entries);
118
119         cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
120         cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
121         err = bpf_map_charge_init(&mem, cost);
122         if (err)
123                 return ERR_PTR(err);
124
125         smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
126         if (!smap) {
127                 bpf_map_charge_finish(&mem);
128                 return ERR_PTR(-ENOMEM);
129         }
130
131         bpf_map_init_from_attr(&smap->map, attr);
132         smap->map.value_size = value_size;
133         smap->n_buckets = n_buckets;
134
135         err = get_callchain_buffers(sysctl_perf_event_max_stack);
136         if (err)
137                 goto free_charge;
138
139         err = prealloc_elems_and_freelist(smap);
140         if (err)
141                 goto put_buffers;
142
143         bpf_map_charge_move(&smap->map.memory, &mem);
144
145         return &smap->map;
146
147 put_buffers:
148         put_callchain_buffers();
149 free_charge:
150         bpf_map_charge_finish(&mem);
151         bpf_map_area_free(smap);
152         return ERR_PTR(err);
153 }
154
155 #define BPF_BUILD_ID 3
156 /*
157  * Parse build id from the note segment. This logic can be shared between
158  * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
159  * identical.
160  */
161 static inline int stack_map_parse_build_id(void *page_addr,
162                                            unsigned char *build_id,
163                                            void *note_start,
164                                            Elf32_Word note_size)
165 {
166         Elf32_Word note_offs = 0, new_offs;
167
168         /* check for overflow */
169         if (note_start < page_addr || note_start + note_size < note_start)
170                 return -EINVAL;
171
172         /* only supports note that fits in the first page */
173         if (note_start + note_size > page_addr + PAGE_SIZE)
174                 return -EINVAL;
175
176         while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
177                 Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
178
179                 if (nhdr->n_type == BPF_BUILD_ID &&
180                     nhdr->n_namesz == sizeof("GNU") &&
181                     nhdr->n_descsz > 0 &&
182                     nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
183                         memcpy(build_id,
184                                note_start + note_offs +
185                                ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
186                                nhdr->n_descsz);
187                         memset(build_id + nhdr->n_descsz, 0,
188                                BPF_BUILD_ID_SIZE - nhdr->n_descsz);
189                         return 0;
190                 }
191                 new_offs = note_offs + sizeof(Elf32_Nhdr) +
192                         ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
193                 if (new_offs <= note_offs)  /* overflow */
194                         break;
195                 note_offs = new_offs;
196         }
197         return -EINVAL;
198 }
199
200 /* Parse build ID from 32-bit ELF */
201 static int stack_map_get_build_id_32(void *page_addr,
202                                      unsigned char *build_id)
203 {
204         Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
205         Elf32_Phdr *phdr;
206         int i;
207
208         /* only supports phdr that fits in one page */
209         if (ehdr->e_phnum >
210             (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
211                 return -EINVAL;
212
213         phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
214
215         for (i = 0; i < ehdr->e_phnum; ++i)
216                 if (phdr[i].p_type == PT_NOTE)
217                         return stack_map_parse_build_id(page_addr, build_id,
218                                         page_addr + phdr[i].p_offset,
219                                         phdr[i].p_filesz);
220         return -EINVAL;
221 }
222
223 /* Parse build ID from 64-bit ELF */
224 static int stack_map_get_build_id_64(void *page_addr,
225                                      unsigned char *build_id)
226 {
227         Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
228         Elf64_Phdr *phdr;
229         int i;
230
231         /* only supports phdr that fits in one page */
232         if (ehdr->e_phnum >
233             (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
234                 return -EINVAL;
235
236         phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
237
238         for (i = 0; i < ehdr->e_phnum; ++i)
239                 if (phdr[i].p_type == PT_NOTE)
240                         return stack_map_parse_build_id(page_addr, build_id,
241                                         page_addr + phdr[i].p_offset,
242                                         phdr[i].p_filesz);
243         return -EINVAL;
244 }
245
246 /* Parse build ID of ELF file mapped to vma */
247 static int stack_map_get_build_id(struct vm_area_struct *vma,
248                                   unsigned char *build_id)
249 {
250         Elf32_Ehdr *ehdr;
251         struct page *page;
252         void *page_addr;
253         int ret;
254
255         /* only works for page backed storage  */
256         if (!vma->vm_file)
257                 return -EINVAL;
258
259         page = find_get_page(vma->vm_file->f_mapping, 0);
260         if (!page)
261                 return -EFAULT; /* page not mapped */
262
263         ret = -EINVAL;
264         page_addr = kmap_atomic(page);
265         ehdr = (Elf32_Ehdr *)page_addr;
266
267         /* compare magic x7f "ELF" */
268         if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
269                 goto out;
270
271         /* only support executable file and shared object file */
272         if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
273                 goto out;
274
275         if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
276                 ret = stack_map_get_build_id_32(page_addr, build_id);
277         else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
278                 ret = stack_map_get_build_id_64(page_addr, build_id);
279 out:
280         kunmap_atomic(page_addr);
281         put_page(page);
282         return ret;
283 }
284
285 static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
286                                           u64 *ips, u32 trace_nr, bool user)
287 {
288         int i;
289         struct vm_area_struct *vma;
290         bool irq_work_busy = false;
291         struct stack_map_irq_work *work = NULL;
292
293         if (irqs_disabled()) {
294                 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
295                         work = this_cpu_ptr(&up_read_work);
296                         if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
297                                 /* cannot queue more up_read, fallback */
298                                 irq_work_busy = true;
299                         }
300                 } else {
301                         /*
302                          * PREEMPT_RT does not allow to trylock mmap sem in
303                          * interrupt disabled context. Force the fallback code.
304                          */
305                         irq_work_busy = true;
306                 }
307         }
308
309         /*
310          * We cannot do up_read() when the irq is disabled, because of
311          * risk to deadlock with rq_lock. To do build_id lookup when the
312          * irqs are disabled, we need to run up_read() in irq_work. We use
313          * a percpu variable to do the irq_work. If the irq_work is
314          * already used by another lookup, we fall back to report ips.
315          *
316          * Same fallback is used for kernel stack (!user) on a stackmap
317          * with build_id.
318          */
319         if (!user || !current || !current->mm || irq_work_busy ||
320             !mmap_read_trylock_non_owner(current->mm)) {
321                 /* cannot access current->mm, fall back to ips */
322                 for (i = 0; i < trace_nr; i++) {
323                         id_offs[i].status = BPF_STACK_BUILD_ID_IP;
324                         id_offs[i].ip = ips[i];
325                         memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
326                 }
327                 return;
328         }
329
330         for (i = 0; i < trace_nr; i++) {
331                 vma = find_vma(current->mm, ips[i]);
332                 if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
333                         /* per entry fall back to ips */
334                         id_offs[i].status = BPF_STACK_BUILD_ID_IP;
335                         id_offs[i].ip = ips[i];
336                         memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
337                         continue;
338                 }
339                 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
340                         - vma->vm_start;
341                 id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
342         }
343
344         if (!work) {
345                 mmap_read_unlock_non_owner(current->mm);
346         } else {
347                 work->mm = current->mm;
348                 irq_work_queue(&work->irq_work);
349         }
350 }
351
352 static struct perf_callchain_entry *
353 get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
354 {
355 #ifdef CONFIG_STACKTRACE
356         struct perf_callchain_entry *entry;
357         int rctx;
358
359         entry = get_callchain_entry(&rctx);
360
361         if (!entry)
362                 return NULL;
363
364         entry->nr = init_nr +
365                 stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
366                                      sysctl_perf_event_max_stack - init_nr, 0);
367
368         /* stack_trace_save_tsk() works on unsigned long array, while
369          * perf_callchain_entry uses u64 array. For 32-bit systems, it is
370          * necessary to fix this mismatch.
371          */
372         if (__BITS_PER_LONG != 64) {
373                 unsigned long *from = (unsigned long *) entry->ip;
374                 u64 *to = entry->ip;
375                 int i;
376
377                 /* copy data from the end to avoid using extra buffer */
378                 for (i = entry->nr - 1; i >= (int)init_nr; i--)
379                         to[i] = (u64)(from[i]);
380         }
381
382         put_callchain_entry(rctx);
383
384         return entry;
385 #else /* CONFIG_STACKTRACE */
386         return NULL;
387 #endif
388 }
389
390 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
391            u64, flags)
392 {
393         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
394         struct perf_callchain_entry *trace;
395         struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
396         u32 max_depth = map->value_size / stack_map_data_size(map);
397         /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
398         u32 init_nr = sysctl_perf_event_max_stack - max_depth;
399         u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
400         u32 hash, id, trace_nr, trace_len;
401         bool user = flags & BPF_F_USER_STACK;
402         bool kernel = !user;
403         u64 *ips;
404         bool hash_matches;
405
406         if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
407                                BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
408                 return -EINVAL;
409
410         trace = get_perf_callchain(regs, init_nr, kernel, user,
411                                    sysctl_perf_event_max_stack, false, false);
412
413         if (unlikely(!trace))
414                 /* couldn't fetch the stack trace */
415                 return -EFAULT;
416
417         /* get_perf_callchain() guarantees that trace->nr >= init_nr
418          * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
419          */
420         trace_nr = trace->nr - init_nr;
421
422         if (trace_nr <= skip)
423                 /* skipping more than usable stack trace */
424                 return -EFAULT;
425
426         trace_nr -= skip;
427         trace_len = trace_nr * sizeof(u64);
428         ips = trace->ip + skip + init_nr;
429         hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
430         id = hash & (smap->n_buckets - 1);
431         bucket = READ_ONCE(smap->buckets[id]);
432
433         hash_matches = bucket && bucket->hash == hash;
434         /* fast cmp */
435         if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
436                 return id;
437
438         if (stack_map_use_build_id(map)) {
439                 /* for build_id+offset, pop a bucket before slow cmp */
440                 new_bucket = (struct stack_map_bucket *)
441                         pcpu_freelist_pop(&smap->freelist);
442                 if (unlikely(!new_bucket))
443                         return -ENOMEM;
444                 new_bucket->nr = trace_nr;
445                 stack_map_get_build_id_offset(
446                         (struct bpf_stack_build_id *)new_bucket->data,
447                         ips, trace_nr, user);
448                 trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
449                 if (hash_matches && bucket->nr == trace_nr &&
450                     memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
451                         pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
452                         return id;
453                 }
454                 if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
455                         pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
456                         return -EEXIST;
457                 }
458         } else {
459                 if (hash_matches && bucket->nr == trace_nr &&
460                     memcmp(bucket->data, ips, trace_len) == 0)
461                         return id;
462                 if (bucket && !(flags & BPF_F_REUSE_STACKID))
463                         return -EEXIST;
464
465                 new_bucket = (struct stack_map_bucket *)
466                         pcpu_freelist_pop(&smap->freelist);
467                 if (unlikely(!new_bucket))
468                         return -ENOMEM;
469                 memcpy(new_bucket->data, ips, trace_len);
470         }
471
472         new_bucket->hash = hash;
473         new_bucket->nr = trace_nr;
474
475         old_bucket = xchg(&smap->buckets[id], new_bucket);
476         if (old_bucket)
477                 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
478         return id;
479 }
480
481 const struct bpf_func_proto bpf_get_stackid_proto = {
482         .func           = bpf_get_stackid,
483         .gpl_only       = true,
484         .ret_type       = RET_INTEGER,
485         .arg1_type      = ARG_PTR_TO_CTX,
486         .arg2_type      = ARG_CONST_MAP_PTR,
487         .arg3_type      = ARG_ANYTHING,
488 };
489
490 static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
491                             void *buf, u32 size, u64 flags)
492 {
493         u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
494         bool user_build_id = flags & BPF_F_USER_BUILD_ID;
495         u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
496         bool user = flags & BPF_F_USER_STACK;
497         struct perf_callchain_entry *trace;
498         bool kernel = !user;
499         int err = -EINVAL;
500         u64 *ips;
501
502         if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
503                                BPF_F_USER_BUILD_ID)))
504                 goto clear;
505         if (kernel && user_build_id)
506                 goto clear;
507
508         elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
509                                             : sizeof(u64);
510         if (unlikely(size % elem_size))
511                 goto clear;
512
513         /* cannot get valid user stack for task without user_mode regs */
514         if (task && user && !user_mode(regs))
515                 goto err_fault;
516
517         num_elem = size / elem_size;
518         if (sysctl_perf_event_max_stack < num_elem)
519                 init_nr = 0;
520         else
521                 init_nr = sysctl_perf_event_max_stack - num_elem;
522
523         if (kernel && task)
524                 trace = get_callchain_entry_for_task(task, init_nr);
525         else
526                 trace = get_perf_callchain(regs, init_nr, kernel, user,
527                                            sysctl_perf_event_max_stack,
528                                            false, false);
529         if (unlikely(!trace))
530                 goto err_fault;
531
532         trace_nr = trace->nr - init_nr;
533         if (trace_nr < skip)
534                 goto err_fault;
535
536         trace_nr -= skip;
537         trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
538         copy_len = trace_nr * elem_size;
539         ips = trace->ip + skip + init_nr;
540         if (user && user_build_id)
541                 stack_map_get_build_id_offset(buf, ips, trace_nr, user);
542         else
543                 memcpy(buf, ips, copy_len);
544
545         if (size > copy_len)
546                 memset(buf + copy_len, 0, size - copy_len);
547         return copy_len;
548
549 err_fault:
550         err = -EFAULT;
551 clear:
552         memset(buf, 0, size);
553         return err;
554 }
555
556 BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
557            u64, flags)
558 {
559         return __bpf_get_stack(regs, NULL, buf, size, flags);
560 }
561
562 const struct bpf_func_proto bpf_get_stack_proto = {
563         .func           = bpf_get_stack,
564         .gpl_only       = true,
565         .ret_type       = RET_INTEGER,
566         .arg1_type      = ARG_PTR_TO_CTX,
567         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
568         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
569         .arg4_type      = ARG_ANYTHING,
570 };
571
572 BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
573            u32, size, u64, flags)
574 {
575         struct pt_regs *regs = task_pt_regs(task);
576
577         return __bpf_get_stack(regs, task, buf, size, flags);
578 }
579
580 BTF_ID_LIST(bpf_get_task_stack_btf_ids)
581 BTF_ID(struct, task_struct)
582
583 const struct bpf_func_proto bpf_get_task_stack_proto = {
584         .func           = bpf_get_task_stack,
585         .gpl_only       = false,
586         .ret_type       = RET_INTEGER,
587         .arg1_type      = ARG_PTR_TO_BTF_ID,
588         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
589         .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
590         .arg4_type      = ARG_ANYTHING,
591         .btf_id         = bpf_get_task_stack_btf_ids,
592 };
593
594 /* Called from eBPF program */
595 static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
596 {
597         return ERR_PTR(-EOPNOTSUPP);
598 }
599
600 /* Called from syscall */
601 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
602 {
603         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
604         struct stack_map_bucket *bucket, *old_bucket;
605         u32 id = *(u32 *)key, trace_len;
606
607         if (unlikely(id >= smap->n_buckets))
608                 return -ENOENT;
609
610         bucket = xchg(&smap->buckets[id], NULL);
611         if (!bucket)
612                 return -ENOENT;
613
614         trace_len = bucket->nr * stack_map_data_size(map);
615         memcpy(value, bucket->data, trace_len);
616         memset(value + trace_len, 0, map->value_size - trace_len);
617
618         old_bucket = xchg(&smap->buckets[id], bucket);
619         if (old_bucket)
620                 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
621         return 0;
622 }
623
624 static int stack_map_get_next_key(struct bpf_map *map, void *key,
625                                   void *next_key)
626 {
627         struct bpf_stack_map *smap = container_of(map,
628                                                   struct bpf_stack_map, map);
629         u32 id;
630
631         WARN_ON_ONCE(!rcu_read_lock_held());
632
633         if (!key) {
634                 id = 0;
635         } else {
636                 id = *(u32 *)key;
637                 if (id >= smap->n_buckets || !smap->buckets[id])
638                         id = 0;
639                 else
640                         id++;
641         }
642
643         while (id < smap->n_buckets && !smap->buckets[id])
644                 id++;
645
646         if (id >= smap->n_buckets)
647                 return -ENOENT;
648
649         *(u32 *)next_key = id;
650         return 0;
651 }
652
653 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
654                                  u64 map_flags)
655 {
656         return -EINVAL;
657 }
658
659 /* Called from syscall or from eBPF program */
660 static int stack_map_delete_elem(struct bpf_map *map, void *key)
661 {
662         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
663         struct stack_map_bucket *old_bucket;
664         u32 id = *(u32 *)key;
665
666         if (unlikely(id >= smap->n_buckets))
667                 return -E2BIG;
668
669         old_bucket = xchg(&smap->buckets[id], NULL);
670         if (old_bucket) {
671                 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
672                 return 0;
673         } else {
674                 return -ENOENT;
675         }
676 }
677
678 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
679 static void stack_map_free(struct bpf_map *map)
680 {
681         struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
682
683         bpf_map_area_free(smap->elems);
684         pcpu_freelist_destroy(&smap->freelist);
685         bpf_map_area_free(smap);
686         put_callchain_buffers();
687 }
688
689 static int stack_trace_map_btf_id;
690 const struct bpf_map_ops stack_trace_map_ops = {
691         .map_alloc = stack_map_alloc,
692         .map_free = stack_map_free,
693         .map_get_next_key = stack_map_get_next_key,
694         .map_lookup_elem = stack_map_lookup_elem,
695         .map_update_elem = stack_map_update_elem,
696         .map_delete_elem = stack_map_delete_elem,
697         .map_check_btf = map_check_no_btf,
698         .map_btf_name = "bpf_stack_map",
699         .map_btf_id = &stack_trace_map_btf_id,
700 };
701
702 static int __init stack_map_init(void)
703 {
704         int cpu;
705         struct stack_map_irq_work *work;
706
707         for_each_possible_cpu(cpu) {
708                 work = per_cpu_ptr(&up_read_work, cpu);
709                 init_irq_work(&work->irq_work, do_up_read);
710         }
711         return 0;
712 }
713 subsys_initcall(stack_map_init);
This page took 0.077376 seconds and 4 git commands to generate.