1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <linux/zalloc.h>
18 #include <perf/cpumap.h>
19 #include <perf/threadmap.h>
20 #include <api/fd/array.h>
22 void perf_evlist__init(struct perf_evlist *evlist)
26 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
27 INIT_HLIST_HEAD(&evlist->heads[i]);
28 INIT_LIST_HEAD(&evlist->entries);
29 evlist->nr_entries = 0;
32 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
33 struct perf_evsel *evsel)
36 * We already have cpus for evsel (via PMU sysfs) so
37 * keep it, if there's no target cpu list defined.
39 if (!evsel->own_cpus || evlist->has_user_cpus) {
40 perf_cpu_map__put(evsel->cpus);
41 evsel->cpus = perf_cpu_map__get(evlist->cpus);
42 } else if (evsel->cpus != evsel->own_cpus) {
43 perf_cpu_map__put(evsel->cpus);
44 evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
47 perf_thread_map__put(evsel->threads);
48 evsel->threads = perf_thread_map__get(evlist->threads);
51 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
53 struct perf_evsel *evsel;
55 perf_evlist__for_each_evsel(evlist, evsel)
56 __perf_evlist__propagate_maps(evlist, evsel);
59 void perf_evlist__add(struct perf_evlist *evlist,
60 struct perf_evsel *evsel)
62 list_add_tail(&evsel->node, &evlist->entries);
63 evlist->nr_entries += 1;
64 __perf_evlist__propagate_maps(evlist, evsel);
67 void perf_evlist__remove(struct perf_evlist *evlist,
68 struct perf_evsel *evsel)
70 list_del_init(&evsel->node);
71 evlist->nr_entries -= 1;
74 struct perf_evlist *perf_evlist__new(void)
76 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
79 perf_evlist__init(evlist);
85 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
87 struct perf_evsel *next;
90 next = list_first_entry(&evlist->entries,
94 next = list_next_entry(prev, node);
97 /* Empty list is noticed here so don't need checking on entry. */
98 if (&next->node == &evlist->entries)
104 void perf_evlist__delete(struct perf_evlist *evlist)
109 void perf_evlist__set_maps(struct perf_evlist *evlist,
110 struct perf_cpu_map *cpus,
111 struct perf_thread_map *threads)
114 * Allow for the possibility that one or another of the maps isn't being
115 * changed i.e. don't put it. Note we are assuming the maps that are
116 * being applied are brand new and evlist is taking ownership of the
117 * original reference count of 1. If that is not the case it is up to
118 * the caller to increase the reference count.
120 if (cpus != evlist->cpus) {
121 perf_cpu_map__put(evlist->cpus);
122 evlist->cpus = perf_cpu_map__get(cpus);
125 if (threads != evlist->threads) {
126 perf_thread_map__put(evlist->threads);
127 evlist->threads = perf_thread_map__get(threads);
130 perf_evlist__propagate_maps(evlist);
133 int perf_evlist__open(struct perf_evlist *evlist)
135 struct perf_evsel *evsel;
138 perf_evlist__for_each_entry(evlist, evsel) {
139 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
147 perf_evlist__close(evlist);
151 void perf_evlist__close(struct perf_evlist *evlist)
153 struct perf_evsel *evsel;
155 perf_evlist__for_each_entry_reverse(evlist, evsel)
156 perf_evsel__close(evsel);
159 void perf_evlist__enable(struct perf_evlist *evlist)
161 struct perf_evsel *evsel;
163 perf_evlist__for_each_entry(evlist, evsel)
164 perf_evsel__enable(evsel);
167 void perf_evlist__disable(struct perf_evlist *evlist)
169 struct perf_evsel *evsel;
171 perf_evlist__for_each_entry(evlist, evsel)
172 perf_evsel__disable(evsel);
175 u64 perf_evlist__read_format(struct perf_evlist *evlist)
177 struct perf_evsel *first = perf_evlist__first(evlist);
179 return first->attr.read_format;
182 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
184 static void perf_evlist__id_hash(struct perf_evlist *evlist,
185 struct perf_evsel *evsel,
186 int cpu, int thread, u64 id)
189 struct perf_sample_id *sid = SID(evsel, cpu, thread);
193 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
194 hlist_add_head(&sid->node, &evlist->heads[hash]);
197 void perf_evlist__id_add(struct perf_evlist *evlist,
198 struct perf_evsel *evsel,
199 int cpu, int thread, u64 id)
201 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
202 evsel->id[evsel->ids++] = id;
205 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
206 struct perf_evsel *evsel,
207 int cpu, int thread, int fd)
209 u64 read_data[4] = { 0, };
210 int id_idx = 1; /* The first entry is the counter value */
214 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
221 /* Legacy way to get event id.. All hail to old kernels! */
224 * This way does not work with group format read, so bail
227 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
230 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
231 read(fd, &read_data, sizeof(read_data)) == -1)
234 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
236 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
239 id = read_data[id_idx];
242 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
246 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
248 int nr_cpus = perf_cpu_map__nr(evlist->cpus);
249 int nr_threads = perf_thread_map__nr(evlist->threads);
251 struct perf_evsel *evsel;
253 perf_evlist__for_each_entry(evlist, evsel) {
254 if (evsel->system_wide)
257 nfds += nr_cpus * nr_threads;
260 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
261 fdarray__grow(&evlist->pollfd, nfds) < 0)
267 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
268 void *ptr, short revent)
270 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
273 evlist->pollfd.priv[pos].ptr = ptr;
274 fcntl(fd, F_SETFL, O_NONBLOCK);
280 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
282 return fdarray__poll(&evlist->pollfd, timeout);