1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <perf/cpumap.h>
4 #include <linux/refcount.h>
5 #include <internal/cpumap.h>
14 void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
16 RC_CHK_ACCESS(map)->nr = nr_cpus;
19 struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
21 RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
22 struct perf_cpu_map *result;
24 if (ADD_RC_CHK(result, cpus)) {
26 refcount_set(&cpus->refcnt, 1);
31 struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
33 struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
36 RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
41 static void cpu_map__delete(struct perf_cpu_map *map)
44 WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
45 "cpu_map refcnt unbalanced\n");
50 struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
52 struct perf_cpu_map *result;
54 if (RC_CHK_GET(result, map))
55 refcount_inc(perf_cpu_map__refcnt(map));
60 void perf_cpu_map__put(struct perf_cpu_map *map)
63 if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
70 static struct perf_cpu_map *cpu_map__new_sysconf(void)
72 struct perf_cpu_map *cpus;
73 int nr_cpus, nr_cpus_conf;
75 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
79 nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
80 if (nr_cpus != nr_cpus_conf) {
81 pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
82 nr_cpus, nr_cpus_conf, nr_cpus);
85 cpus = perf_cpu_map__alloc(nr_cpus);
89 for (i = 0; i < nr_cpus; ++i)
90 RC_CHK_ACCESS(cpus)->map[i].cpu = i;
96 static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
98 struct perf_cpu_map *cpus = NULL;
101 onlnf = fopen("/sys/devices/system/cpu/online", "r");
103 cpus = perf_cpu_map__read(onlnf);
109 struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
111 struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
116 return cpu_map__new_sysconf();
120 static int cmp_cpu(const void *a, const void *b)
122 const struct perf_cpu *cpu_a = a, *cpu_b = b;
124 return cpu_a->cpu - cpu_b->cpu;
127 static struct perf_cpu __perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
129 return RC_CHK_ACCESS(cpus)->map[idx];
132 static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
134 size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
135 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
139 memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
140 qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
143 for (i = 0; i < nr_cpus; i++) {
145 __perf_cpu_map__cpu(cpus, i).cpu !=
146 __perf_cpu_map__cpu(cpus, i - 1).cpu) {
147 RC_CHK_ACCESS(cpus)->map[j++].cpu =
148 __perf_cpu_map__cpu(cpus, i).cpu;
151 perf_cpu_map__set_nr(cpus, j);
152 assert(j <= nr_cpus);
157 struct perf_cpu_map *perf_cpu_map__read(FILE *file)
159 struct perf_cpu_map *cpus = NULL;
161 struct perf_cpu *tmp_cpus = NULL, *tmp;
169 n = fscanf(file, "%u%c", &cpu, &sep);
173 int new_max = nr_cpus + cpu - prev - 1;
175 WARN_ONCE(new_max >= MAX_NR_CPUS, "Perf can support %d CPUs. "
176 "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
178 if (new_max >= max_entries) {
179 max_entries = new_max + MAX_NR_CPUS / 2;
180 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
187 tmp_cpus[nr_cpus++].cpu = prev;
189 if (nr_cpus == max_entries) {
190 max_entries += MAX_NR_CPUS;
191 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
197 tmp_cpus[nr_cpus++].cpu = cpu;
198 if (n == 2 && sep == '-')
202 if (n == 1 || sep == '\n')
207 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
213 struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
215 struct perf_cpu_map *cpus = NULL;
216 unsigned long start_cpu, end_cpu = 0;
219 struct perf_cpu *tmp_cpus = NULL, *tmp;
223 return perf_cpu_map__new_online_cpus();
226 * must handle the case of empty cpumap to cover
227 * TOPOLOGY header for NUMA nodes with no CPU
228 * ( e.g., because of CPU hotplug)
230 if (!isdigit(*cpu_list) && *cpu_list != '\0')
233 while (isdigit(*cpu_list)) {
235 start_cpu = strtoul(cpu_list, &p, 0);
236 if (start_cpu >= INT_MAX
237 || (*p != '\0' && *p != ',' && *p != '-'))
243 end_cpu = strtoul(cpu_list, &p, 0);
245 if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
248 if (end_cpu < start_cpu)
254 WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. "
255 "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
257 for (; start_cpu <= end_cpu; start_cpu++) {
258 /* check for duplicates */
259 for (i = 0; i < nr_cpus; i++)
260 if (tmp_cpus[i].cpu == (int)start_cpu)
263 if (nr_cpus == max_entries) {
264 max_entries += MAX_NR_CPUS;
265 tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
270 tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
279 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
280 else if (*cpu_list != '\0') {
281 pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
283 cpus = perf_cpu_map__new_online_cpus();
285 cpus = perf_cpu_map__new_any_cpu();
292 static int __perf_cpu_map__nr(const struct perf_cpu_map *cpus)
294 return RC_CHK_ACCESS(cpus)->nr;
297 struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
299 struct perf_cpu result = {
303 if (cpus && idx < __perf_cpu_map__nr(cpus))
304 return __perf_cpu_map__cpu(cpus, idx);
309 int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
311 return cpus ? __perf_cpu_map__nr(cpus) : 1;
314 bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
316 return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
319 int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
327 high = __perf_cpu_map__nr(cpus);
329 int idx = (low + high) / 2;
330 struct perf_cpu cpu_at_idx = __perf_cpu_map__cpu(cpus, idx);
332 if (cpu_at_idx.cpu == cpu.cpu)
335 if (cpu_at_idx.cpu > cpu.cpu)
344 bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
346 return perf_cpu_map__idx(cpus, cpu) != -1;
349 bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, const struct perf_cpu_map *rhs)
359 nr = __perf_cpu_map__nr(lhs);
360 if (nr != __perf_cpu_map__nr(rhs))
363 for (int idx = 0; idx < nr; idx++) {
364 if (__perf_cpu_map__cpu(lhs, idx).cpu != __perf_cpu_map__cpu(rhs, idx).cpu)
370 bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map)
372 return map && __perf_cpu_map__cpu(map, 0).cpu == -1;
375 struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
377 struct perf_cpu result = {
381 // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
382 return __perf_cpu_map__nr(map) > 0
383 ? __perf_cpu_map__cpu(map, __perf_cpu_map__nr(map) - 1)
387 /** Is 'b' a subset of 'a'. */
388 bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
392 if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a))
395 for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) {
396 if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu)
398 if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
400 if (j == __perf_cpu_map__nr(b))
410 * orig either gets freed and replaced with a new map, or reused
411 * with no reference count change (similar to "realloc")
412 * other has its reference count increased.
415 struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
416 struct perf_cpu_map *other)
418 struct perf_cpu *tmp_cpus;
421 struct perf_cpu_map *merged;
423 if (perf_cpu_map__is_subset(orig, other))
425 if (perf_cpu_map__is_subset(other, orig)) {
426 perf_cpu_map__put(orig);
427 return perf_cpu_map__get(other);
430 tmp_len = __perf_cpu_map__nr(orig) + __perf_cpu_map__nr(other);
431 tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
435 /* Standard merge algorithm from wikipedia */
437 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
438 if (__perf_cpu_map__cpu(orig, i).cpu <= __perf_cpu_map__cpu(other, j).cpu) {
439 if (__perf_cpu_map__cpu(orig, i).cpu == __perf_cpu_map__cpu(other, j).cpu)
441 tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
443 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
446 while (i < __perf_cpu_map__nr(orig))
447 tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
449 while (j < __perf_cpu_map__nr(other))
450 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++);
451 assert(k <= tmp_len);
453 merged = cpu_map__trim_new(k, tmp_cpus);
455 perf_cpu_map__put(orig);
459 struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
460 struct perf_cpu_map *other)
462 struct perf_cpu *tmp_cpus;
465 struct perf_cpu_map *merged = NULL;
467 if (perf_cpu_map__is_subset(other, orig))
468 return perf_cpu_map__get(orig);
469 if (perf_cpu_map__is_subset(orig, other))
470 return perf_cpu_map__get(other);
472 tmp_len = max(__perf_cpu_map__nr(orig), __perf_cpu_map__nr(other));
473 tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
478 while (i < __perf_cpu_map__nr(orig) && j < __perf_cpu_map__nr(other)) {
479 if (__perf_cpu_map__cpu(orig, i).cpu < __perf_cpu_map__cpu(other, j).cpu)
481 else if (__perf_cpu_map__cpu(orig, i).cpu > __perf_cpu_map__cpu(other, j).cpu)
485 tmp_cpus[k++] = __perf_cpu_map__cpu(orig, i++);
489 merged = cpu_map__trim_new(k, tmp_cpus);