1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/bitmap.h>
13 #include <linux/ctype.h>
14 #include <linux/zalloc.h>
15 #include <internal/cpumap.h>
17 static struct perf_cpu max_cpu_num;
18 static struct perf_cpu max_present_cpu_num;
19 static int max_node_num;
21 * The numa node X as read from /sys/devices/system/node/nodeX indexed by the
24 static int *cpunode_map;
26 bool perf_record_cpu_map_data__test_bit(int i,
27 const struct perf_record_cpu_map_data *data)
29 int bit_word32 = i / 32;
30 __u32 bit_mask32 = 1U << (i & 31);
31 int bit_word64 = i / 64;
32 __u64 bit_mask64 = ((__u64)1) << (i & 63);
34 return (data->mask32_data.long_size == 4)
35 ? (bit_word32 < data->mask32_data.nr) &&
36 (data->mask32_data.mask[bit_word32] & bit_mask32) != 0
37 : (bit_word64 < data->mask64_data.nr) &&
38 (data->mask64_data.mask[bit_word64] & bit_mask64) != 0;
41 /* Read ith mask value from data into the given 64-bit sized bitmap */
42 static void perf_record_cpu_map_data__read_one_mask(const struct perf_record_cpu_map_data *data,
43 int i, unsigned long *bitmap)
45 #if __SIZEOF_LONG__ == 8
46 if (data->mask32_data.long_size == 4)
47 bitmap[0] = data->mask32_data.mask[i];
49 bitmap[0] = data->mask64_data.mask[i];
51 if (data->mask32_data.long_size == 4) {
52 bitmap[0] = data->mask32_data.mask[i];
55 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
56 bitmap[0] = (unsigned long)(data->mask64_data.mask[i] >> 32);
57 bitmap[1] = (unsigned long)data->mask64_data.mask[i];
59 bitmap[0] = (unsigned long)data->mask64_data.mask[i];
60 bitmap[1] = (unsigned long)(data->mask64_data.mask[i] >> 32);
65 static struct perf_cpu_map *cpu_map__from_entries(const struct perf_record_cpu_map_data *data)
67 struct perf_cpu_map *map;
69 map = perf_cpu_map__empty_new(data->cpus_data.nr);
73 for (i = 0; i < data->cpus_data.nr; i++) {
75 * Special treatment for -1, which is not real cpu number,
76 * and we need to use (int) -1 to initialize map[i],
77 * otherwise it would become 65535.
79 if (data->cpus_data.cpu[i] == (u16) -1)
80 RC_CHK_ACCESS(map)->map[i].cpu = -1;
82 RC_CHK_ACCESS(map)->map[i].cpu = (int) data->cpus_data.cpu[i];
89 static struct perf_cpu_map *cpu_map__from_mask(const struct perf_record_cpu_map_data *data)
91 DECLARE_BITMAP(local_copy, 64);
92 int weight = 0, mask_nr = data->mask32_data.nr;
93 struct perf_cpu_map *map;
95 for (int i = 0; i < mask_nr; i++) {
96 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
97 weight += bitmap_weight(local_copy, 64);
100 map = perf_cpu_map__empty_new(weight);
104 for (int i = 0, j = 0; i < mask_nr; i++) {
105 int cpus_per_i = (i * data->mask32_data.long_size * BITS_PER_BYTE);
108 perf_record_cpu_map_data__read_one_mask(data, i, local_copy);
109 for_each_set_bit(cpu, local_copy, 64)
110 RC_CHK_ACCESS(map)->map[j++].cpu = cpu + cpus_per_i;
116 static struct perf_cpu_map *cpu_map__from_range(const struct perf_record_cpu_map_data *data)
118 struct perf_cpu_map *map;
121 map = perf_cpu_map__empty_new(data->range_cpu_data.end_cpu -
122 data->range_cpu_data.start_cpu + 1 + data->range_cpu_data.any_cpu);
126 if (data->range_cpu_data.any_cpu)
127 RC_CHK_ACCESS(map)->map[i++].cpu = -1;
129 for (int cpu = data->range_cpu_data.start_cpu; cpu <= data->range_cpu_data.end_cpu;
131 RC_CHK_ACCESS(map)->map[i].cpu = cpu;
136 struct perf_cpu_map *cpu_map__new_data(const struct perf_record_cpu_map_data *data)
138 switch (data->type) {
139 case PERF_CPU_MAP__CPUS:
140 return cpu_map__from_entries(data);
141 case PERF_CPU_MAP__MASK:
142 return cpu_map__from_mask(data);
143 case PERF_CPU_MAP__RANGE_CPUS:
144 return cpu_map__from_range(data);
146 pr_err("cpu_map__new_data unknown type %d\n", data->type);
151 size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp)
156 cpu_map__snprint(map, buf, sizeof(buf));
157 return fprintf(fp, "%s\n", buf);
161 struct perf_cpu_map *perf_cpu_map__empty_new(int nr)
163 struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr);
166 for (int i = 0; i < nr; i++)
167 RC_CHK_ACCESS(cpus)->map[i].cpu = -1;
173 struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
175 struct cpu_aggr_map *cpus = malloc(sizeof(*cpus) + sizeof(struct aggr_cpu_id) * nr);
181 for (i = 0; i < nr; i++)
182 cpus->map[i] = aggr_cpu_id__empty();
184 refcount_set(&cpus->refcnt, 1);
190 static int cpu__get_topology_int(int cpu, const char *name, int *value)
194 snprintf(path, PATH_MAX,
195 "devices/system/cpu/cpu%d/topology/%s", cpu, name);
197 return sysfs__read_int(path, value);
200 int cpu__get_socket_id(struct perf_cpu cpu)
202 int value, ret = cpu__get_topology_int(cpu.cpu, "physical_package_id", &value);
206 struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data __maybe_unused)
208 struct aggr_cpu_id id = aggr_cpu_id__empty();
210 id.socket = cpu__get_socket_id(cpu);
214 static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer)
216 struct aggr_cpu_id *a = (struct aggr_cpu_id *)a_pointer;
217 struct aggr_cpu_id *b = (struct aggr_cpu_id *)b_pointer;
219 if (a->node != b->node)
220 return a->node - b->node;
221 else if (a->socket != b->socket)
222 return a->socket - b->socket;
223 else if (a->die != b->die)
224 return a->die - b->die;
225 else if (a->core != b->core)
226 return a->core - b->core;
228 return a->thread_idx - b->thread_idx;
231 struct cpu_aggr_map *cpu_aggr_map__new(const struct perf_cpu_map *cpus,
232 aggr_cpu_id_get_t get_id,
233 void *data, bool needs_sort)
237 struct cpu_aggr_map *c = cpu_aggr_map__empty_new(perf_cpu_map__nr(cpus));
242 /* Reset size as it may only be partially filled */
245 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
246 bool duplicate = false;
247 struct aggr_cpu_id cpu_id = get_id(cpu, data);
249 for (int j = 0; j < c->nr; j++) {
250 if (aggr_cpu_id__equal(&cpu_id, &c->map[j])) {
256 c->map[c->nr] = cpu_id;
261 if (c->nr != perf_cpu_map__nr(cpus)) {
262 struct cpu_aggr_map *trimmed_c =
264 sizeof(struct cpu_aggr_map) + sizeof(struct aggr_cpu_id) * c->nr);
270 /* ensure we process id in increasing order */
272 qsort(c->map, c->nr, sizeof(struct aggr_cpu_id), aggr_cpu_id__cmp);
278 int cpu__get_die_id(struct perf_cpu cpu)
280 int value, ret = cpu__get_topology_int(cpu.cpu, "die_id", &value);
285 struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data)
287 struct aggr_cpu_id id;
290 die = cpu__get_die_id(cpu);
291 /* There is no die_id on legacy system. */
296 * die_id is relative to socket, so start
297 * with the socket ID and then add die to
300 id = aggr_cpu_id__socket(cpu, data);
301 if (aggr_cpu_id__is_empty(&id))
308 int cpu__get_core_id(struct perf_cpu cpu)
310 int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value);
314 struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data)
316 struct aggr_cpu_id id;
317 int core = cpu__get_core_id(cpu);
319 /* aggr_cpu_id__die returns a struct with socket and die set. */
320 id = aggr_cpu_id__die(cpu, data);
321 if (aggr_cpu_id__is_empty(&id))
325 * core_id is relative to socket and die, we need a global id.
326 * So we combine the result from cpu_map__get_die with the core id
333 struct aggr_cpu_id aggr_cpu_id__cpu(struct perf_cpu cpu, void *data)
335 struct aggr_cpu_id id;
337 /* aggr_cpu_id__core returns a struct with socket, die and core set. */
338 id = aggr_cpu_id__core(cpu, data);
339 if (aggr_cpu_id__is_empty(&id))
347 struct aggr_cpu_id aggr_cpu_id__node(struct perf_cpu cpu, void *data __maybe_unused)
349 struct aggr_cpu_id id = aggr_cpu_id__empty();
351 id.node = cpu__get_node(cpu);
355 struct aggr_cpu_id aggr_cpu_id__global(struct perf_cpu cpu, void *data __maybe_unused)
357 struct aggr_cpu_id id = aggr_cpu_id__empty();
359 /* it always aggregates to the cpu 0 */
365 /* setup simple routines to easily access node numbers given a cpu number */
366 static int get_max_num(char *path, int *max)
372 if (filename__read_str(path, &buf, &num))
377 /* start on the right, to find highest node num */
379 if ((buf[num] == ',') || (buf[num] == '-')) {
384 if (sscanf(&buf[num], "%d", max) < 1) {
389 /* convert from 0-based to 1-based */
397 /* Determine highest possible cpu in the system for sparse allocation */
398 static void set_max_cpu_num(void)
405 max_cpu_num.cpu = 4096;
406 max_present_cpu_num.cpu = 4096;
408 mnt = sysfs__mountpoint();
412 /* get the highest possible cpu number for a sparse allocation */
413 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
414 if (ret >= PATH_MAX) {
415 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
419 ret = get_max_num(path, &max_cpu_num.cpu);
423 /* get the highest present cpu number for a sparse allocation */
424 ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
425 if (ret >= PATH_MAX) {
426 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
430 ret = get_max_num(path, &max_present_cpu_num.cpu);
434 pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num.cpu);
437 /* Determine highest possible node in the system for sparse allocation */
438 static void set_max_node_num(void)
447 mnt = sysfs__mountpoint();
451 /* get the highest possible cpu number for a sparse allocation */
452 ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
453 if (ret >= PATH_MAX) {
454 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
458 ret = get_max_num(path, &max_node_num);
462 pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
465 int cpu__max_node(void)
467 if (unlikely(!max_node_num))
473 struct perf_cpu cpu__max_cpu(void)
475 if (unlikely(!max_cpu_num.cpu))
481 struct perf_cpu cpu__max_present_cpu(void)
483 if (unlikely(!max_present_cpu_num.cpu))
486 return max_present_cpu_num;
490 int cpu__get_node(struct perf_cpu cpu)
492 if (unlikely(cpunode_map == NULL)) {
493 pr_debug("cpu_map not initialized\n");
497 return cpunode_map[cpu.cpu];
500 static int init_cpunode_map(void)
507 cpunode_map = calloc(max_cpu_num.cpu, sizeof(int));
509 pr_err("%s: calloc failed\n", __func__);
513 for (i = 0; i < max_cpu_num.cpu; i++)
519 int cpu__setup_cpunode_map(void)
521 struct dirent *dent1, *dent2;
523 unsigned int cpu, mem;
529 /* initialize globals */
530 if (init_cpunode_map())
533 mnt = sysfs__mountpoint();
537 n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
539 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
543 dir1 = opendir(path);
547 /* walk tree and setup map */
548 while ((dent1 = readdir(dir1)) != NULL) {
549 if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
552 n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
554 pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
561 while ((dent2 = readdir(dir2)) != NULL) {
562 if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
564 cpunode_map[cpu] = mem;
572 size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size)
578 #define COMMA first ? "" : ","
580 for (i = 0; i < perf_cpu_map__nr(map) + 1; i++) {
581 struct perf_cpu cpu = { .cpu = INT_MAX };
582 bool last = i == perf_cpu_map__nr(map);
585 cpu = perf_cpu_map__cpu(map, i);
590 ret += snprintf(buf + ret, size - ret,
592 perf_cpu_map__cpu(map, i).cpu);
594 } else if (((i - start) != (cpu.cpu - perf_cpu_map__cpu(map, start).cpu)) || last) {
598 ret += snprintf(buf + ret, size - ret,
600 perf_cpu_map__cpu(map, start).cpu);
602 ret += snprintf(buf + ret, size - ret,
604 perf_cpu_map__cpu(map, start).cpu, perf_cpu_map__cpu(map, end).cpu);
613 pr_debug2("cpumask list: %s\n", buf);
617 static char hex_char(unsigned char val)
622 return val - 10 + 'a';
626 size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
630 unsigned char *bitmap;
631 struct perf_cpu last_cpu = perf_cpu_map__cpu(map, perf_cpu_map__nr(map) - 1);
636 bitmap = zalloc(last_cpu.cpu / 8 + 1);
637 if (bitmap == NULL) {
642 for (i = 0; i < perf_cpu_map__nr(map); i++) {
643 cpu = perf_cpu_map__cpu(map, i).cpu;
644 bitmap[cpu / 8] |= 1 << (cpu % 8);
647 for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
648 unsigned char bits = bitmap[cpu / 8];
655 *ptr++ = hex_char(bits);
656 if ((cpu % 32) == 0 && cpu > 0)
662 buf[size - 1] = '\0';
666 const struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
668 static const struct perf_cpu_map *online = NULL;
671 online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
676 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
678 return a->thread_idx == b->thread_idx &&
679 a->node == b->node &&
680 a->socket == b->socket &&
682 a->core == b->core &&
683 a->cpu.cpu == b->cpu.cpu;
686 bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a)
688 return a->thread_idx == -1 &&
696 struct aggr_cpu_id aggr_cpu_id__empty(void)
698 struct aggr_cpu_id ret = {
704 .cpu = (struct perf_cpu){ .cpu = -1 },