1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2020 Intel Corporation. All rights rsvd. */
4 #include <linux/sched/task.h>
5 #include <linux/io-64-nonatomic-lo-hi.h>
9 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
12 static cpumask_t perfmon_dsa_cpu_mask;
13 static bool cpuhp_set_up;
14 static enum cpuhp_state cpuhp_slot;
17 * perf userspace reads this attribute to determine which cpus to open
18 * counters on. It's connected to perfmon_dsa_cpu_mask, which is
19 * maintained by the cpu hotplug handlers.
21 static DEVICE_ATTR_RO(cpumask);
23 static struct attribute *perfmon_cpumask_attrs[] = {
24 &dev_attr_cpumask.attr,
28 static struct attribute_group cpumask_attr_group = {
29 .attrs = perfmon_cpumask_attrs,
33 * These attributes specify the bits in the config word that the perf
34 * syscall uses to pass the event ids and categories to perfmon.
36 DEFINE_PERFMON_FORMAT_ATTR(event_category, "config:0-3");
37 DEFINE_PERFMON_FORMAT_ATTR(event, "config:4-31");
40 * These attributes specify the bits in the config1 word that the perf
41 * syscall uses to pass filter data to perfmon.
43 DEFINE_PERFMON_FORMAT_ATTR(filter_wq, "config1:0-31");
44 DEFINE_PERFMON_FORMAT_ATTR(filter_tc, "config1:32-39");
45 DEFINE_PERFMON_FORMAT_ATTR(filter_pgsz, "config1:40-43");
46 DEFINE_PERFMON_FORMAT_ATTR(filter_sz, "config1:44-51");
47 DEFINE_PERFMON_FORMAT_ATTR(filter_eng, "config1:52-59");
49 #define PERFMON_FILTERS_START 2
50 #define PERFMON_FILTERS_MAX 5
52 static struct attribute *perfmon_format_attrs[] = {
53 &format_attr_idxd_event_category.attr,
54 &format_attr_idxd_event.attr,
55 &format_attr_idxd_filter_wq.attr,
56 &format_attr_idxd_filter_tc.attr,
57 &format_attr_idxd_filter_pgsz.attr,
58 &format_attr_idxd_filter_sz.attr,
59 &format_attr_idxd_filter_eng.attr,
63 static struct attribute_group perfmon_format_attr_group = {
65 .attrs = perfmon_format_attrs,
68 static const struct attribute_group *perfmon_attr_groups[] = {
69 &perfmon_format_attr_group,
74 static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
77 return cpumap_print_to_pagebuf(true, buf, &perfmon_dsa_cpu_mask);
80 static bool is_idxd_event(struct idxd_pmu *idxd_pmu, struct perf_event *event)
82 return &idxd_pmu->pmu == event->pmu;
85 static int perfmon_collect_events(struct idxd_pmu *idxd_pmu,
86 struct perf_event *leader,
89 struct perf_event *event;
92 max_count = idxd_pmu->n_counters;
93 n = idxd_pmu->n_events;
98 if (is_idxd_event(idxd_pmu, leader)) {
99 idxd_pmu->event_list[n] = leader;
100 idxd_pmu->event_list[n]->hw.idx = n;
107 for_each_sibling_event(event, leader) {
108 if (!is_idxd_event(idxd_pmu, event) ||
109 event->state <= PERF_EVENT_STATE_OFF)
115 idxd_pmu->event_list[n] = event;
116 idxd_pmu->event_list[n]->hw.idx = n;
123 static void perfmon_assign_hw_event(struct idxd_pmu *idxd_pmu,
124 struct perf_event *event, int idx)
126 struct idxd_device *idxd = idxd_pmu->idxd;
127 struct hw_perf_event *hwc = &event->hw;
130 hwc->config_base = ioread64(CNTRCFG_REG(idxd, idx));
131 hwc->event_base = ioread64(CNTRCFG_REG(idxd, idx));
134 static int perfmon_assign_event(struct idxd_pmu *idxd_pmu,
135 struct perf_event *event)
139 for (i = 0; i < IDXD_PMU_EVENT_MAX; i++)
140 if (!test_and_set_bit(i, idxd_pmu->used_mask))
147 * Check whether there are enough counters to satisfy that all the
148 * events in the group can actually be scheduled at the same time.
150 * To do this, create a fake idxd_pmu object so the event collection
151 * and assignment functions can be used without affecting the internal
152 * state of the real idxd_pmu object.
154 static int perfmon_validate_group(struct idxd_pmu *pmu,
155 struct perf_event *event)
157 struct perf_event *leader = event->group_leader;
158 struct idxd_pmu *fake_pmu;
159 int i, ret = 0, n, idx;
161 fake_pmu = kzalloc(sizeof(*fake_pmu), GFP_KERNEL);
165 fake_pmu->pmu.name = pmu->pmu.name;
166 fake_pmu->n_counters = pmu->n_counters;
168 n = perfmon_collect_events(fake_pmu, leader, true);
174 fake_pmu->n_events = n;
175 n = perfmon_collect_events(fake_pmu, event, false);
181 fake_pmu->n_events = n;
183 for (i = 0; i < n; i++) {
184 event = fake_pmu->event_list[i];
186 idx = perfmon_assign_event(fake_pmu, event);
198 static int perfmon_pmu_event_init(struct perf_event *event)
200 struct idxd_device *idxd;
203 idxd = event_to_idxd(event);
206 if (event->attr.type != event->pmu->type)
209 /* sampling not supported */
210 if (event->attr.sample_period)
216 if (event->pmu != &idxd->idxd_pmu->pmu)
219 event->hw.event_base = ioread64(PERFMON_TABLE_OFFSET(idxd));
220 event->cpu = idxd->idxd_pmu->cpu;
221 event->hw.config = event->attr.config;
223 if (event->group_leader != event)
224 /* non-group events have themselves as leader */
225 ret = perfmon_validate_group(idxd->idxd_pmu, event);
230 static inline u64 perfmon_pmu_read_counter(struct perf_event *event)
232 struct hw_perf_event *hwc = &event->hw;
233 struct idxd_device *idxd;
236 idxd = event_to_idxd(event);
238 return ioread64(CNTRDATA_REG(idxd, cntr));
241 static void perfmon_pmu_event_update(struct perf_event *event)
243 struct idxd_device *idxd = event_to_idxd(event);
244 u64 prev_raw_count, new_raw_count, delta, p, n;
245 int shift = 64 - idxd->idxd_pmu->counter_width;
246 struct hw_perf_event *hwc = &event->hw;
248 prev_raw_count = local64_read(&hwc->prev_count);
250 new_raw_count = perfmon_pmu_read_counter(event);
251 } while (!local64_try_cmpxchg(&hwc->prev_count,
252 &prev_raw_count, new_raw_count));
253 n = (new_raw_count << shift);
254 p = (prev_raw_count << shift);
256 delta = ((n - p) >> shift);
258 local64_add(delta, &event->count);
261 void perfmon_counter_overflow(struct idxd_device *idxd)
263 int i, n_counters, max_loop = OVERFLOW_SIZE;
264 struct perf_event *event;
265 unsigned long ovfstatus;
267 n_counters = min(idxd->idxd_pmu->n_counters, OVERFLOW_SIZE);
269 ovfstatus = ioread32(OVFSTATUS_REG(idxd));
272 * While updating overflowed counters, other counters behind
273 * them could overflow and be missed in a given pass.
274 * Normally this could happen at most n_counters times, but in
275 * theory a tiny counter width could result in continual
276 * overflows and endless looping. max_loop provides a
277 * failsafe in that highly unlikely case.
279 while (ovfstatus && max_loop--) {
280 /* Figure out which counter(s) overflowed */
281 for_each_set_bit(i, &ovfstatus, n_counters) {
282 unsigned long ovfstatus_clear = 0;
284 /* Update event->count for overflowed counter */
285 event = idxd->idxd_pmu->event_list[i];
286 perfmon_pmu_event_update(event);
287 /* Writing 1 to OVFSTATUS bit clears it */
288 set_bit(i, &ovfstatus_clear);
289 iowrite32(ovfstatus_clear, OVFSTATUS_REG(idxd));
292 ovfstatus = ioread32(OVFSTATUS_REG(idxd));
296 * Should never happen. If so, it means a counter(s) looped
297 * around twice while this handler was running.
299 WARN_ON_ONCE(ovfstatus);
302 static inline void perfmon_reset_config(struct idxd_device *idxd)
304 iowrite32(CONFIG_RESET, PERFRST_REG(idxd));
305 iowrite32(0, OVFSTATUS_REG(idxd));
306 iowrite32(0, PERFFRZ_REG(idxd));
309 static inline void perfmon_reset_counters(struct idxd_device *idxd)
311 iowrite32(CNTR_RESET, PERFRST_REG(idxd));
314 static inline void perfmon_reset(struct idxd_device *idxd)
316 perfmon_reset_config(idxd);
317 perfmon_reset_counters(idxd);
320 static void perfmon_pmu_event_start(struct perf_event *event, int mode)
322 u32 flt_wq, flt_tc, flt_pg_sz, flt_xfer_sz, flt_eng = 0;
323 u64 cntr_cfg, cntrdata, event_enc, event_cat = 0;
324 struct hw_perf_event *hwc = &event->hw;
325 union filter_cfg flt_cfg;
326 union event_cfg event_cfg;
327 struct idxd_device *idxd;
330 idxd = event_to_idxd(event);
332 event->hw.idx = hwc->idx;
335 /* Obtain event category and event value from user space */
336 event_cfg.val = event->attr.config;
337 flt_cfg.val = event->attr.config1;
338 event_cat = event_cfg.event_cat;
339 event_enc = event_cfg.event_enc;
341 /* Obtain filter configuration from user space */
344 flt_pg_sz = flt_cfg.pg_sz;
345 flt_xfer_sz = flt_cfg.xfer_sz;
346 flt_eng = flt_cfg.eng;
348 if (flt_wq && test_bit(FLT_WQ, &idxd->idxd_pmu->supported_filters))
349 iowrite32(flt_wq, FLTCFG_REG(idxd, cntr, FLT_WQ));
350 if (flt_tc && test_bit(FLT_TC, &idxd->idxd_pmu->supported_filters))
351 iowrite32(flt_tc, FLTCFG_REG(idxd, cntr, FLT_TC));
352 if (flt_pg_sz && test_bit(FLT_PG_SZ, &idxd->idxd_pmu->supported_filters))
353 iowrite32(flt_pg_sz, FLTCFG_REG(idxd, cntr, FLT_PG_SZ));
354 if (flt_xfer_sz && test_bit(FLT_XFER_SZ, &idxd->idxd_pmu->supported_filters))
355 iowrite32(flt_xfer_sz, FLTCFG_REG(idxd, cntr, FLT_XFER_SZ));
356 if (flt_eng && test_bit(FLT_ENG, &idxd->idxd_pmu->supported_filters))
357 iowrite32(flt_eng, FLTCFG_REG(idxd, cntr, FLT_ENG));
359 /* Read the start value */
360 cntrdata = ioread64(CNTRDATA_REG(idxd, cntr));
361 local64_set(&event->hw.prev_count, cntrdata);
363 /* Set counter to event/category */
364 cntr_cfg = event_cat << CNTRCFG_CATEGORY_SHIFT;
365 cntr_cfg |= event_enc << CNTRCFG_EVENT_SHIFT;
366 /* Set interrupt on overflow and counter enable bits */
367 cntr_cfg |= (CNTRCFG_IRQ_OVERFLOW | CNTRCFG_ENABLE);
369 iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
372 static void perfmon_pmu_event_stop(struct perf_event *event, int mode)
374 struct hw_perf_event *hwc = &event->hw;
375 struct idxd_device *idxd;
376 int i, cntr = hwc->idx;
379 idxd = event_to_idxd(event);
381 /* remove this event from event list */
382 for (i = 0; i < idxd->idxd_pmu->n_events; i++) {
383 if (event != idxd->idxd_pmu->event_list[i])
386 for (++i; i < idxd->idxd_pmu->n_events; i++)
387 idxd->idxd_pmu->event_list[i - 1] = idxd->idxd_pmu->event_list[i];
388 --idxd->idxd_pmu->n_events;
392 cntr_cfg = ioread64(CNTRCFG_REG(idxd, cntr));
393 cntr_cfg &= ~CNTRCFG_ENABLE;
394 iowrite64(cntr_cfg, CNTRCFG_REG(idxd, cntr));
396 if (mode == PERF_EF_UPDATE)
397 perfmon_pmu_event_update(event);
400 clear_bit(cntr, idxd->idxd_pmu->used_mask);
403 static void perfmon_pmu_event_del(struct perf_event *event, int mode)
405 perfmon_pmu_event_stop(event, PERF_EF_UPDATE);
408 static int perfmon_pmu_event_add(struct perf_event *event, int flags)
410 struct idxd_device *idxd = event_to_idxd(event);
411 struct idxd_pmu *idxd_pmu = idxd->idxd_pmu;
412 struct hw_perf_event *hwc = &event->hw;
415 n = perfmon_collect_events(idxd_pmu, event, false);
419 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
420 if (!(flags & PERF_EF_START))
421 hwc->state |= PERF_HES_ARCH;
423 idx = perfmon_assign_event(idxd_pmu, event);
427 perfmon_assign_hw_event(idxd_pmu, event, idx);
429 if (flags & PERF_EF_START)
430 perfmon_pmu_event_start(event, 0);
432 idxd_pmu->n_events = n;
437 static void enable_perfmon_pmu(struct idxd_device *idxd)
439 iowrite32(COUNTER_UNFREEZE, PERFFRZ_REG(idxd));
442 static void disable_perfmon_pmu(struct idxd_device *idxd)
444 iowrite32(COUNTER_FREEZE, PERFFRZ_REG(idxd));
447 static void perfmon_pmu_enable(struct pmu *pmu)
449 struct idxd_device *idxd = pmu_to_idxd(pmu);
451 enable_perfmon_pmu(idxd);
454 static void perfmon_pmu_disable(struct pmu *pmu)
456 struct idxd_device *idxd = pmu_to_idxd(pmu);
458 disable_perfmon_pmu(idxd);
461 static void skip_filter(int i)
465 for (j = i; j < PERFMON_FILTERS_MAX; j++)
466 perfmon_format_attrs[PERFMON_FILTERS_START + j] =
467 perfmon_format_attrs[PERFMON_FILTERS_START + j + 1];
470 static void idxd_pmu_init(struct idxd_pmu *idxd_pmu)
474 for (i = 0 ; i < PERFMON_FILTERS_MAX; i++) {
475 if (!test_bit(i, &idxd_pmu->supported_filters))
479 idxd_pmu->pmu.name = idxd_pmu->name;
480 idxd_pmu->pmu.attr_groups = perfmon_attr_groups;
481 idxd_pmu->pmu.task_ctx_nr = perf_invalid_context;
482 idxd_pmu->pmu.event_init = perfmon_pmu_event_init;
483 idxd_pmu->pmu.pmu_enable = perfmon_pmu_enable,
484 idxd_pmu->pmu.pmu_disable = perfmon_pmu_disable,
485 idxd_pmu->pmu.add = perfmon_pmu_event_add;
486 idxd_pmu->pmu.del = perfmon_pmu_event_del;
487 idxd_pmu->pmu.start = perfmon_pmu_event_start;
488 idxd_pmu->pmu.stop = perfmon_pmu_event_stop;
489 idxd_pmu->pmu.read = perfmon_pmu_event_update;
490 idxd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
491 idxd_pmu->pmu.module = THIS_MODULE;
494 void perfmon_pmu_remove(struct idxd_device *idxd)
499 cpuhp_state_remove_instance(cpuhp_slot, &idxd->idxd_pmu->cpuhp_node);
500 perf_pmu_unregister(&idxd->idxd_pmu->pmu);
501 kfree(idxd->idxd_pmu);
502 idxd->idxd_pmu = NULL;
505 static int perf_event_cpu_online(unsigned int cpu, struct hlist_node *node)
507 struct idxd_pmu *idxd_pmu;
509 idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
511 /* select the first online CPU as the designated reader */
512 if (cpumask_empty(&perfmon_dsa_cpu_mask)) {
513 cpumask_set_cpu(cpu, &perfmon_dsa_cpu_mask);
520 static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
522 struct idxd_pmu *idxd_pmu;
525 idxd_pmu = hlist_entry_safe(node, typeof(*idxd_pmu), cpuhp_node);
527 if (!cpumask_test_and_clear_cpu(cpu, &perfmon_dsa_cpu_mask))
530 target = cpumask_any_but(cpu_online_mask, cpu);
531 /* migrate events if there is a valid target */
532 if (target < nr_cpu_ids) {
533 cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
534 perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
540 int perfmon_pmu_init(struct idxd_device *idxd)
542 union idxd_perfcap perfcap;
543 struct idxd_pmu *idxd_pmu;
547 * perfmon module initialization failed, nothing to do
553 * If perfmon_offset or num_counters is 0, it means perfmon is
554 * not supported on this hardware.
556 if (idxd->perfmon_offset == 0)
559 idxd_pmu = kzalloc(sizeof(*idxd_pmu), GFP_KERNEL);
563 idxd_pmu->idxd = idxd;
564 idxd->idxd_pmu = idxd_pmu;
566 if (idxd->data->type == IDXD_TYPE_DSA) {
567 rc = sprintf(idxd_pmu->name, "dsa%d", idxd->id);
570 } else if (idxd->data->type == IDXD_TYPE_IAX) {
571 rc = sprintf(idxd_pmu->name, "iax%d", idxd->id);
580 perfcap.bits = ioread64(PERFCAP_REG(idxd));
583 * If total perf counter is 0, stop further registration.
584 * This is necessary in order to support driver running on
585 * guest which does not have pmon support.
587 if (perfcap.num_perf_counter == 0)
590 /* A counter width of 0 means it can't count */
591 if (perfcap.counter_width == 0)
594 /* Overflow interrupt and counter freeze support must be available */
595 if (!perfcap.overflow_interrupt || !perfcap.counter_freeze)
598 /* Number of event categories cannot be 0 */
599 if (perfcap.num_event_category == 0)
603 * We don't support per-counter capabilities for now.
605 if (perfcap.cap_per_counter)
608 idxd_pmu->n_event_categories = perfcap.num_event_category;
609 idxd_pmu->supported_event_categories = perfcap.global_event_category;
610 idxd_pmu->per_counter_caps_supported = perfcap.cap_per_counter;
612 /* check filter capability. If 0, then filters are not supported */
613 idxd_pmu->supported_filters = perfcap.filter;
615 idxd_pmu->n_filters = hweight8(perfcap.filter);
617 /* Store the total number of counters categories, and counter width */
618 idxd_pmu->n_counters = perfcap.num_perf_counter;
619 idxd_pmu->counter_width = perfcap.counter_width;
621 idxd_pmu_init(idxd_pmu);
623 rc = perf_pmu_register(&idxd_pmu->pmu, idxd_pmu->name, -1);
627 rc = cpuhp_state_add_instance(cpuhp_slot, &idxd_pmu->cpuhp_node);
629 perf_pmu_unregister(&idxd->idxd_pmu->pmu);
636 idxd->idxd_pmu = NULL;
641 void __init perfmon_init(void)
643 int rc = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
644 "driver/dma/idxd/perf:online",
645 perf_event_cpu_online,
646 perf_event_cpu_offline);
654 void __exit perfmon_exit(void)
657 cpuhp_remove_multi_state(cpuhp_slot);