]> Git Repo - linux.git/blob - tools/perf/util/bpf_counter_cgroup.c
driver core: Return proper error code when dev_set_name() fails
[linux.git] / tools / perf / util / bpf_counter_cgroup.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2021 Facebook */
4 /* Copyright (c) 2021 Google */
5
6 #include <assert.h>
7 #include <limits.h>
8 #include <unistd.h>
9 #include <sys/file.h>
10 #include <sys/time.h>
11 #include <sys/resource.h>
12 #include <linux/err.h>
13 #include <linux/zalloc.h>
14 #include <linux/perf_event.h>
15 #include <api/fs/fs.h>
16 #include <perf/bpf_perf.h>
17
18 #include "affinity.h"
19 #include "bpf_counter.h"
20 #include "cgroup.h"
21 #include "counts.h"
22 #include "debug.h"
23 #include "evsel.h"
24 #include "evlist.h"
25 #include "target.h"
26 #include "cpumap.h"
27 #include "thread_map.h"
28
29 #include "bpf_skel/bperf_cgroup.skel.h"
30
31 static struct perf_event_attr cgrp_switch_attr = {
32         .type = PERF_TYPE_SOFTWARE,
33         .config = PERF_COUNT_SW_CGROUP_SWITCHES,
34         .size = sizeof(cgrp_switch_attr),
35         .sample_period = 1,
36         .disabled = 1,
37 };
38
39 static struct evsel *cgrp_switch;
40 static struct bperf_cgroup_bpf *skel;
41
42 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
43
44 static int bperf_load_program(struct evlist *evlist)
45 {
46         struct bpf_link *link;
47         struct evsel *evsel;
48         struct cgroup *cgrp, *leader_cgrp;
49         int i, j;
50         struct perf_cpu cpu;
51         int total_cpus = cpu__max_cpu().cpu;
52         int map_size, map_fd;
53         int prog_fd, err;
54
55         skel = bperf_cgroup_bpf__open();
56         if (!skel) {
57                 pr_err("Failed to open cgroup skeleton\n");
58                 return -1;
59         }
60
61         skel->rodata->num_cpus = total_cpus;
62         skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
63
64         BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
65
66         /* we need one copy of events per cpu for reading */
67         map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
68         bpf_map__set_max_entries(skel->maps.events, map_size);
69         bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
70         /* previous result is saved in a per-cpu array */
71         map_size = evlist->core.nr_entries / nr_cgroups;
72         bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
73         /* cgroup result needs all events (per-cpu) */
74         map_size = evlist->core.nr_entries;
75         bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
76
77         set_max_rlimit();
78
79         err = bperf_cgroup_bpf__load(skel);
80         if (err) {
81                 pr_err("Failed to load cgroup skeleton\n");
82                 goto out;
83         }
84
85         if (cgroup_is_v2("perf_event") > 0)
86                 skel->bss->use_cgroup_v2 = 1;
87
88         err = -1;
89
90         cgrp_switch = evsel__new(&cgrp_switch_attr);
91         if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
92                 pr_err("Failed to open cgroup switches event\n");
93                 goto out;
94         }
95
96         perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
97                 link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
98                                                       FD(cgrp_switch, i));
99                 if (IS_ERR(link)) {
100                         pr_err("Failed to attach cgroup program\n");
101                         err = PTR_ERR(link);
102                         goto out;
103                 }
104         }
105
106         /*
107          * Update cgrp_idx map from cgroup-id to event index.
108          */
109         cgrp = NULL;
110         i = 0;
111
112         evlist__for_each_entry(evlist, evsel) {
113                 if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
114                         leader_cgrp = evsel->cgrp;
115                         evsel->cgrp = NULL;
116
117                         /* open single copy of the events w/o cgroup */
118                         err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
119                         if (err == 0)
120                                 evsel->supported = true;
121
122                         map_fd = bpf_map__fd(skel->maps.events);
123                         perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
124                                 int fd = FD(evsel, j);
125                                 __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
126
127                                 bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
128                         }
129
130                         evsel->cgrp = leader_cgrp;
131                 }
132
133                 if (evsel->cgrp == cgrp)
134                         continue;
135
136                 cgrp = evsel->cgrp;
137
138                 if (read_cgroup_id(cgrp) < 0) {
139                         pr_err("Failed to get cgroup id\n");
140                         err = -1;
141                         goto out;
142                 }
143
144                 map_fd = bpf_map__fd(skel->maps.cgrp_idx);
145                 err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
146                 if (err < 0) {
147                         pr_err("Failed to update cgroup index map\n");
148                         goto out;
149                 }
150
151                 i++;
152         }
153
154         /*
155          * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
156          * whether the kernel support it
157          */
158         prog_fd = bpf_program__fd(skel->progs.trigger_read);
159         err = bperf_trigger_reading(prog_fd, 0);
160         if (err) {
161                 pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
162                            "Therefore, --for-each-cgroup might show inaccurate readings\n");
163                 err = 0;
164         }
165
166 out:
167         return err;
168 }
169
170 static int bperf_cgrp__load(struct evsel *evsel,
171                             struct target *target __maybe_unused)
172 {
173         static bool bperf_loaded = false;
174
175         evsel->bperf_leader_prog_fd = -1;
176         evsel->bperf_leader_link_fd = -1;
177
178         if (!bperf_loaded && bperf_load_program(evsel->evlist))
179                 return -1;
180
181         bperf_loaded = true;
182         /* just to bypass bpf_counter_skip() */
183         evsel->follower_skel = (struct bperf_follower_bpf *)skel;
184
185         return 0;
186 }
187
188 static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
189                                   int cpu __maybe_unused, int fd __maybe_unused)
190 {
191         /* nothing to do */
192         return 0;
193 }
194
195 /*
196  * trigger the leader prog on each cpu, so the cgrp_reading map could get
197  * the latest results.
198  */
199 static int bperf_cgrp__sync_counters(struct evlist *evlist)
200 {
201         struct perf_cpu cpu;
202         int idx;
203         int prog_fd = bpf_program__fd(skel->progs.trigger_read);
204
205         perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
206                 bperf_trigger_reading(prog_fd, cpu.cpu);
207
208         return 0;
209 }
210
211 static int bperf_cgrp__enable(struct evsel *evsel)
212 {
213         if (evsel->core.idx)
214                 return 0;
215
216         bperf_cgrp__sync_counters(evsel->evlist);
217
218         skel->bss->enabled = 1;
219         return 0;
220 }
221
222 static int bperf_cgrp__disable(struct evsel *evsel)
223 {
224         if (evsel->core.idx)
225                 return 0;
226
227         bperf_cgrp__sync_counters(evsel->evlist);
228
229         skel->bss->enabled = 0;
230         return 0;
231 }
232
233 static int bperf_cgrp__read(struct evsel *evsel)
234 {
235         struct evlist *evlist = evsel->evlist;
236         int total_cpus = cpu__max_cpu().cpu;
237         struct perf_counts_values *counts;
238         struct bpf_perf_event_value *values;
239         int reading_map_fd, err = 0;
240
241         if (evsel->core.idx)
242                 return 0;
243
244         bperf_cgrp__sync_counters(evsel->evlist);
245
246         values = calloc(total_cpus, sizeof(*values));
247         if (values == NULL)
248                 return -ENOMEM;
249
250         reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
251
252         evlist__for_each_entry(evlist, evsel) {
253                 __u32 idx = evsel->core.idx;
254                 int i;
255                 struct perf_cpu cpu;
256
257                 err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
258                 if (err) {
259                         pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
260                                idx, evsel__name(evsel), evsel->cgrp->name);
261                         goto out;
262                 }
263
264                 perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) {
265                         counts = perf_counts(evsel->counts, i, 0);
266                         counts->val = values[cpu.cpu].counter;
267                         counts->ena = values[cpu.cpu].enabled;
268                         counts->run = values[cpu.cpu].running;
269                 }
270         }
271
272 out:
273         free(values);
274         return err;
275 }
276
277 static int bperf_cgrp__destroy(struct evsel *evsel)
278 {
279         if (evsel->core.idx)
280                 return 0;
281
282         bperf_cgroup_bpf__destroy(skel);
283         evsel__delete(cgrp_switch);  // it'll destroy on_switch progs too
284
285         return 0;
286 }
287
288 struct bpf_counter_ops bperf_cgrp_ops = {
289         .load       = bperf_cgrp__load,
290         .enable     = bperf_cgrp__enable,
291         .disable    = bperf_cgrp__disable,
292         .read       = bperf_cgrp__read,
293         .install_pe = bperf_cgrp__install_pe,
294         .destroy    = bperf_cgrp__destroy,
295 };
This page took 0.049373 seconds and 4 git commands to generate.