1 // SPDX-License-Identifier: GPL-2.0
3 * Performance event support - Processor Activity Instrumentation Extension
6 * Copyright IBM Corp. 2022
9 #define KMSG_COMPONENT "pai_ext"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/kernel.h>
13 #include <linux/kernel_stat.h>
14 #include <linux/percpu.h>
15 #include <linux/notifier.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
19 #include <linux/perf_event.h>
20 #include <asm/ctlreg.h>
22 #include <asm/debug.h>
24 #define PAIE1_CB_SZ 0x200 /* Size of PAIE1 control block */
25 #define PAIE1_CTRBLOCK_SZ 0x400 /* Size of PAIE1 counter blocks */
27 static debug_info_t *paiext_dbg;
28 static unsigned int paiext_cnt; /* Extracted with QPACI instruction */
35 /* Create the PAI extension 1 control block area.
36 * The PAI extension control block 1 is pointed to by lowcore
37 * address 0x1508 for each CPU. This control block is 512 bytes in size
38 * and requires a 512 byte boundary alignment.
40 struct paiext_cb { /* PAI extension 1 control block */
41 u64 header; /* Not used */
43 u64 acc; /* Addr to analytics counter control block */
48 unsigned long *area; /* Area for CPU to store counters */
49 struct pai_userdata *save; /* Area to store non-zero counters */
50 enum paievt_mode mode; /* Type of event */
51 unsigned int active_events; /* # of PAI Extension users */
53 struct perf_event *event; /* Perf event for sampling */
54 struct paiext_cb *paiext_cb; /* PAI extension control block area */
57 struct paiext_mapptr {
58 struct paiext_map *mapptr;
61 static struct paiext_root { /* Anchor to per CPU data */
62 refcount_t refcnt; /* Overall active events */
63 struct paiext_mapptr __percpu *mapptr;
66 /* Free per CPU data when the last event is removed. */
67 static void paiext_root_free(void)
69 if (refcount_dec_and_test(&paiext_root.refcnt)) {
70 free_percpu(paiext_root.mapptr);
71 paiext_root.mapptr = NULL;
75 /* On initialization of first event also allocate per CPU data dynamically.
76 * Start with an array of pointers, the array size is the maximum number of
77 * CPUs possible, which might be larger than the number of CPUs currently
80 static int paiext_root_alloc(void)
82 if (!refcount_inc_not_zero(&paiext_root.refcnt)) {
83 /* The memory is already zeroed. */
84 paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
85 if (!paiext_root.mapptr) {
86 /* Returning without refcnt adjustment is ok. The
87 * error code is handled by paiext_alloc() which
88 * decrements refcnt when an event can not be
93 refcount_set(&paiext_root.refcnt, 1);
98 /* Protects against concurrent increment of sampler and counter member
99 * increments at the same time and prohibits concurrent execution of
100 * counting and sampling events.
101 * Ensures that analytics counter block is deallocated only when the
102 * sampling and counting on that cpu is zero.
103 * For details see paiext_alloc().
105 static DEFINE_MUTEX(paiext_reserve_mutex);
107 /* Free all memory allocated for event counting/sampling setup */
108 static void paiext_free(struct paiext_mapptr *mp)
110 kfree(mp->mapptr->area);
111 kfree(mp->mapptr->paiext_cb);
112 kvfree(mp->mapptr->save);
117 /* Release the PMU if event is the last perf event */
118 static void paiext_event_destroy(struct perf_event *event)
120 struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
121 struct paiext_map *cpump = mp->mapptr;
123 free_page(PAI_SAVE_AREA(event));
124 mutex_lock(&paiext_reserve_mutex);
126 if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
129 mutex_unlock(&paiext_reserve_mutex);
130 debug_sprintf_event(paiext_dbg, 4, "%s cpu %d mapptr %p\n", __func__,
131 event->cpu, mp->mapptr);
135 /* Used to avoid races in checking concurrent access of counting and
136 * sampling for pai_extension events.
138 * Only one instance of event pai_ext/NNPA_ALL/ for sampling is
139 * allowed and when this event is running, no counting event is allowed.
140 * Several counting events are allowed in parallel, but no sampling event
141 * is allowed while one (or more) counting events are running.
143 * This function is called in process context and it is safe to block.
144 * When the event initialization functions fails, no other call back will
147 * Allocate the memory for the event.
149 static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event)
151 struct paiext_mapptr *mp;
152 struct paiext_map *cpump;
155 mutex_lock(&paiext_reserve_mutex);
157 rc = paiext_root_alloc();
161 mp = per_cpu_ptr(paiext_root.mapptr, event->cpu);
163 if (!cpump) { /* Paiext_map allocated? */
165 cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
169 /* Allocate memory for counter area and counter extraction.
171 * - a 512 byte block and requires 512 byte boundary alignment.
172 * - a 1KB byte block and requires 1KB boundary alignment.
173 * Only the first counting event has to allocate the area.
175 * Note: This works with commit 59bb47985c1d by default.
176 * Backporting this to kernels without this commit might
180 cpump->area = kzalloc(PAIE1_CTRBLOCK_SZ, GFP_KERNEL);
181 cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
182 cpump->save = kvmalloc_array(paiext_cnt + 1,
183 sizeof(struct pai_userdata),
185 if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
189 refcount_set(&cpump->refcnt, 1);
190 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
193 /* Multiple invocation, check what is active.
194 * Supported are multiple counter events or only one sampling
195 * event concurrently at any one time.
197 if (cpump->mode == PAI_MODE_SAMPLING ||
198 (cpump->mode == PAI_MODE_COUNTING && a->sample_period)) {
202 refcount_inc(&cpump->refcnt);
209 /* Error in allocation of event, decrement anchor. Since
210 * the event in not created, its destroy() function is never
211 * invoked. Adjust the reference counter for the anchor.
216 mutex_unlock(&paiext_reserve_mutex);
217 /* If rc is non-zero, no increment of counter/sampler was done. */
221 /* The PAI extension 1 control block supports up to 128 entries. Return
222 * the index within PAIE1_CB given the event number. Also validate event
225 static int paiext_event_valid(struct perf_event *event)
227 u64 cfg = event->attr.config;
229 if (cfg >= PAI_NNPA_BASE && cfg <= PAI_NNPA_BASE + paiext_cnt) {
230 /* Offset NNPA in paiext_cb */
231 event->hw.config_base = offsetof(struct paiext_cb, acc);
237 /* Might be called on different CPU than the one the event is intended for. */
238 static int paiext_event_init(struct perf_event *event)
240 struct perf_event_attr *a = &event->attr;
243 /* PMU pai_ext registered as PERF_TYPE_RAW, check event type */
244 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
246 /* PAI extension event must be valid and in supported range */
247 rc = paiext_event_valid(event);
250 /* Allow only CPU wide operation, no process context for now. */
251 if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
253 /* Allow only event NNPA_ALL for sampling. */
254 if (a->sample_period && a->config != PAI_NNPA_BASE)
256 /* Prohibit exclude_user event selection */
259 /* Get a page to store last counter values for sampling */
260 if (a->sample_period) {
261 PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
262 if (!PAI_SAVE_AREA(event))
266 rc = paiext_alloc(a, event);
268 free_page(PAI_SAVE_AREA(event));
271 event->destroy = paiext_event_destroy;
273 if (a->sample_period) {
274 a->sample_period = 1;
276 /* Register for paicrypt_sched_task() to be called */
277 event->attach_state |= PERF_ATTACH_SCHED_CB;
278 /* Add raw data which are the memory mapped counters */
279 a->sample_type |= PERF_SAMPLE_RAW;
280 /* Turn off inheritance */
287 static u64 paiext_getctr(unsigned long *area, int nr)
292 /* Read the counter values. Return value from location in buffer. For event
293 * NNPA_ALL sum up all events.
295 static u64 paiext_getdata(struct perf_event *event)
297 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
298 struct paiext_map *cpump = mp->mapptr;
302 if (event->attr.config != PAI_NNPA_BASE)
303 return paiext_getctr(cpump->area,
304 event->attr.config - PAI_NNPA_BASE);
306 for (i = 1; i <= paiext_cnt; i++)
307 sum += paiext_getctr(cpump->area, i);
312 static u64 paiext_getall(struct perf_event *event)
314 return paiext_getdata(event);
317 static void paiext_read(struct perf_event *event)
319 u64 prev, new, delta;
321 prev = local64_read(&event->hw.prev_count);
322 new = paiext_getall(event);
323 local64_set(&event->hw.prev_count, new);
325 local64_add(delta, &event->count);
328 static void paiext_start(struct perf_event *event, int flags)
330 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
331 struct paiext_map *cpump = mp->mapptr;
334 if (!event->attr.sample_period) { /* Counting */
335 sum = paiext_getall(event); /* Get current value */
336 local64_set(&event->hw.prev_count, sum);
337 } else { /* Sampling */
338 cpump->event = event;
339 perf_sched_cb_inc(event->pmu);
343 static int paiext_add(struct perf_event *event, int flags)
345 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
346 struct paiext_map *cpump = mp->mapptr;
347 struct paiext_cb *pcb = cpump->paiext_cb;
349 if (++cpump->active_events == 1) {
350 S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
351 pcb->acc = virt_to_phys(cpump->area) | 0x1;
352 /* Enable CPU instruction lookup for PAIE1 control block */
353 local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
354 debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
355 __func__, S390_lowcore.aicd, pcb->acc);
357 if (flags & PERF_EF_START)
358 paiext_start(event, PERF_EF_RELOAD);
363 static void paiext_stop(struct perf_event *event, int flags)
365 if (!event->attr.sample_period) /* Counting */
368 perf_sched_cb_dec(event->pmu);
369 event->hw.state = PERF_HES_STOPPED;
372 static void paiext_del(struct perf_event *event, int flags)
374 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
375 struct paiext_map *cpump = mp->mapptr;
376 struct paiext_cb *pcb = cpump->paiext_cb;
378 paiext_stop(event, PERF_EF_UPDATE);
379 if (--cpump->active_events == 0) {
380 /* Disable CPU instruction lookup for PAIE1 control block */
381 local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
383 S390_lowcore.aicd = 0;
384 debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
385 __func__, S390_lowcore.aicd, pcb->acc);
389 /* Create raw data and save it in buffer. Returns number of bytes copied.
390 * Saves only positive counter entries of the form
391 * 2 bytes: Number of counter
392 * 8 bytes: Value of counter
394 static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area,
395 unsigned long *area_old)
399 for (i = 1; i <= paiext_cnt; i++) {
400 u64 val = paiext_getctr(area, i);
401 u64 val_old = paiext_getctr(area_old, i);
406 val = (~0ULL - val_old) + val + 1;
408 userdata[outidx].num = i;
409 userdata[outidx].value = val;
413 return outidx * sizeof(*userdata);
416 /* Write sample when one or more counters values are nonzero.
418 * Note: The function paiext_sched_task() and paiext_push_sample() are not
419 * invoked after function paiext_del() has been called because of function
420 * perf_sched_cb_dec().
421 * The function paiext_sched_task() and paiext_push_sample() are only
422 * called when sampling is active. Function perf_sched_cb_inc()
423 * has been invoked to install function paiext_sched_task() as call back
424 * to run at context switch time (see paiext_add()).
426 * This causes function perf_event_context_sched_out() and
427 * perf_event_context_sched_in() to check whether the PMU has installed an
428 * sched_task() callback. That callback is not active after paiext_del()
429 * returns and has deleted the event on that CPU.
431 static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
432 struct perf_event *event)
434 struct perf_sample_data data;
435 struct perf_raw_record raw;
439 /* Setup perf sample */
440 memset(®s, 0, sizeof(regs));
441 memset(&raw, 0, sizeof(raw));
442 memset(&data, 0, sizeof(data));
443 perf_sample_data_init(&data, 0, event->hw.last_period);
444 if (event->attr.sample_type & PERF_SAMPLE_TID) {
445 data.tid_entry.pid = task_tgid_nr(current);
446 data.tid_entry.tid = task_pid_nr(current);
448 if (event->attr.sample_type & PERF_SAMPLE_TIME)
449 data.time = event->clock();
450 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
452 if (event->attr.sample_type & PERF_SAMPLE_CPU)
453 data.cpu_entry.cpu = smp_processor_id();
454 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
455 raw.frag.size = rawsize;
456 raw.frag.data = cpump->save;
457 perf_sample_save_raw_data(&data, &raw);
460 overflow = perf_event_overflow(event, &data, ®s);
461 perf_event_update_userpage(event);
462 /* Save NNPA lowcore area after read in event */
463 memcpy((void *)PAI_SAVE_AREA(event), cpump->area,
468 /* Check if there is data to be saved on schedule out of a task. */
469 static int paiext_have_sample(void)
471 struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
472 struct paiext_map *cpump = mp->mapptr;
473 struct perf_event *event = cpump->event;
479 rawsize = paiext_copy(cpump->save, cpump->area,
480 (unsigned long *)PAI_SAVE_AREA(event));
481 if (rawsize) /* Incremented counters */
482 rc = paiext_push_sample(rawsize, cpump, event);
486 /* Called on schedule-in and schedule-out. No access to event structure,
487 * but for sampling only event NNPA_ALL is allowed.
489 static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
491 /* We started with a clean page on event installation. So read out
492 * results on schedule_out and if page was dirty, clear values.
495 paiext_have_sample();
498 /* Attribute definitions for pai extension1 interface. As with other CPU
499 * Measurement Facilities, there is one attribute per mapped counter.
500 * The number of mapped counters may vary per machine generation. Use
501 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
502 * to determine the number of mapped counters. The instructions returns
503 * a positive number, which is the highest number of supported counters.
504 * All counters less than this number are also supported, there are no
505 * holes. A returned number of zero means no support for mapped counters.
507 * The identification of the counter is a unique number. The chosen range
508 * is 0x1800 + offset in mapped kernel page.
509 * All CPU Measurement Facility counters identifiers must be unique and
510 * the numbers from 0 to 496 are already used for the CPU Measurement
511 * Counter facility. Number 0x1000 to 0x103e are used for PAI cryptography
513 * Numbers 0xb0000, 0xbc000 and 0xbd000 are already
514 * used for the CPU Measurement Sampling facility.
516 PMU_FORMAT_ATTR(event, "config:0-63");
518 static struct attribute *paiext_format_attr[] = {
519 &format_attr_event.attr,
523 static struct attribute_group paiext_events_group = {
525 .attrs = NULL, /* Filled in attr_event_init() */
528 static struct attribute_group paiext_format_group = {
530 .attrs = paiext_format_attr,
533 static const struct attribute_group *paiext_attr_groups[] = {
534 &paiext_events_group,
535 &paiext_format_group,
539 /* Performance monitoring unit for mapped counters */
540 static struct pmu paiext = {
541 .task_ctx_nr = perf_invalid_context,
542 .event_init = paiext_event_init,
545 .start = paiext_start,
548 .sched_task = paiext_sched_task,
549 .attr_groups = paiext_attr_groups,
552 /* List of symbolic PAI extension 1 NNPA counter names. */
553 static const char * const paiext_ctrnames[] = {
563 [9] = "NNPA_IBM_RESERVED_9",
566 [12] = "NNPA_SIGMOID",
567 [13] = "NNPA_SOFTMAX",
568 [14] = "NNPA_BATCHNORM",
569 [15] = "NNPA_MAXPOOL2D",
570 [16] = "NNPA_AVGPOOL2D",
571 [17] = "NNPA_LSTMACT",
572 [18] = "NNPA_GRUACT",
573 [19] = "NNPA_CONVOLUTION",
574 [20] = "NNPA_MATMUL_OP",
575 [21] = "NNPA_MATMUL_OP_BCAST23",
576 [22] = "NNPA_SMALLBATCH",
577 [23] = "NNPA_LARGEDIM",
578 [24] = "NNPA_SMALLTENSOR",
579 [25] = "NNPA_1MFRAME",
580 [26] = "NNPA_2GFRAME",
581 [27] = "NNPA_ACCESSEXCEPT",
584 static void __init attr_event_free(struct attribute **attrs, int num)
586 struct perf_pmu_events_attr *pa;
587 struct device_attribute *dap;
590 for (i = 0; i < num; i++) {
591 dap = container_of(attrs[i], struct device_attribute, attr);
592 pa = container_of(dap, struct perf_pmu_events_attr, attr);
598 static int __init attr_event_init_one(struct attribute **attrs, int num)
600 struct perf_pmu_events_attr *pa;
602 /* Index larger than array_size, no counter name available */
603 if (num >= ARRAY_SIZE(paiext_ctrnames)) {
608 pa = kzalloc(sizeof(*pa), GFP_KERNEL);
612 sysfs_attr_init(&pa->attr.attr);
613 pa->id = PAI_NNPA_BASE + num;
614 pa->attr.attr.name = paiext_ctrnames[num];
615 pa->attr.attr.mode = 0444;
616 pa->attr.show = cpumf_events_sysfs_show;
617 pa->attr.store = NULL;
618 attrs[num] = &pa->attr.attr;
622 /* Create PMU sysfs event attributes on the fly. */
623 static int __init attr_event_init(void)
625 struct attribute **attrs;
628 attrs = kmalloc_array(paiext_cnt + 2, sizeof(*attrs), GFP_KERNEL);
631 for (i = 0; i <= paiext_cnt; i++) {
632 ret = attr_event_init_one(attrs, i);
634 attr_event_free(attrs, i);
639 paiext_events_group.attrs = attrs;
643 static int __init paiext_init(void)
645 struct qpaci_info_block ib;
648 if (!test_facility(197))
652 paiext_cnt = ib.num_nnpa;
653 if (paiext_cnt >= PAI_NNPA_MAXCTR)
654 paiext_cnt = PAI_NNPA_MAXCTR;
658 rc = attr_event_init();
660 pr_err("Creation of PMU " KMSG_COMPONENT " /sysfs failed\n");
664 /* Setup s390dbf facility */
665 paiext_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
667 pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n");
671 debug_register_view(paiext_dbg, &debug_sprintf_view);
673 rc = perf_pmu_register(&paiext, KMSG_COMPONENT, -1);
675 pr_err("Registration of " KMSG_COMPONENT " PMU failed with "
683 debug_unregister_view(paiext_dbg, &debug_sprintf_view);
684 debug_unregister(paiext_dbg);
686 attr_event_free(paiext_events_group.attrs,
687 ARRAY_SIZE(paiext_ctrnames) + 1);
691 device_initcall(paiext_init);