1 // SPDX-License-Identifier: GPL-2.0
3 * Performance event support - Processor Activity Instrumentation Facility
5 * Copyright IBM Corp. 2022
8 #define KMSG_COMPONENT "pai_crypto"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/percpu.h>
14 #include <linux/notifier.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/perf_event.h>
19 #include <asm/ctlreg.h>
21 #include <asm/debug.h>
23 static debug_info_t *cfm_dbg;
24 static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
25 /* extracted with QPACI instruction */
27 DEFINE_STATIC_KEY_FALSE(pai_key);
35 unsigned long *page; /* Page for CPU to store counters */
36 struct pai_userdata *save; /* Page to store no-zero counters */
37 unsigned int active_events; /* # of PAI crypto users */
38 refcount_t refcnt; /* Reference count mapped buffers */
39 enum paievt_mode mode; /* Type of event */
40 struct perf_event *event; /* Perf event for sampling */
43 struct paicrypt_mapptr {
44 struct paicrypt_map *mapptr;
47 static struct paicrypt_root { /* Anchor to per CPU data */
48 refcount_t refcnt; /* Overall active events */
49 struct paicrypt_mapptr __percpu *mapptr;
52 /* Free per CPU data when the last event is removed. */
53 static void paicrypt_root_free(void)
55 if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
56 free_percpu(paicrypt_root.mapptr);
57 paicrypt_root.mapptr = NULL;
59 debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
60 refcount_read(&paicrypt_root.refcnt));
64 * On initialization of first event also allocate per CPU data dynamically.
65 * Start with an array of pointers, the array size is the maximum number of
66 * CPUs possible, which might be larger than the number of CPUs currently
69 static int paicrypt_root_alloc(void)
71 if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
72 /* The memory is already zeroed. */
73 paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
74 if (!paicrypt_root.mapptr)
76 refcount_set(&paicrypt_root.refcnt, 1);
81 /* Release the PMU if event is the last perf event */
82 static DEFINE_MUTEX(pai_reserve_mutex);
84 /* Adjust usage counters and remove allocated memory when all users are
87 static void paicrypt_event_destroy(struct perf_event *event)
89 struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
91 struct paicrypt_map *cpump = mp->mapptr;
94 static_branch_dec(&pai_key);
95 mutex_lock(&pai_reserve_mutex);
96 debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
97 " mode %d refcnt %u\n", __func__,
98 event->attr.config, event->cpu,
99 cpump->active_events, cpump->mode,
100 refcount_read(&cpump->refcnt));
101 free_page(PAI_SAVE_AREA(event));
102 if (refcount_dec_and_test(&cpump->refcnt)) {
103 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
104 __func__, (unsigned long)cpump->page,
106 free_page((unsigned long)cpump->page);
111 paicrypt_root_free();
112 mutex_unlock(&pai_reserve_mutex);
115 static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
118 nr += PAI_CRYPTO_MAXCTR;
122 /* Read the counter values. Return value from location in CMP. For event
123 * CRYPTO_ALL sum up all events.
125 static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
127 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
128 struct paicrypt_map *cpump = mp->mapptr;
132 if (event->attr.config != PAI_CRYPTO_BASE) {
133 return paicrypt_getctr(cpump->page,
134 event->attr.config - PAI_CRYPTO_BASE,
138 for (i = 1; i <= paicrypt_cnt; i++) {
139 u64 val = paicrypt_getctr(cpump->page, i, kernel);
148 static u64 paicrypt_getall(struct perf_event *event)
152 if (!event->attr.exclude_kernel)
153 sum += paicrypt_getdata(event, true);
154 if (!event->attr.exclude_user)
155 sum += paicrypt_getdata(event, false);
160 /* Used to avoid races in checking concurrent access of counting and
161 * sampling for crypto events
163 * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
164 * allowed and when this event is running, no counting event is allowed.
165 * Several counting events are allowed in parallel, but no sampling event
166 * is allowed while one (or more) counting events are running.
168 * This function is called in process context and it is save to block.
169 * When the event initialization functions fails, no other call back will
172 * Allocate the memory for the event.
174 static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
176 struct perf_event_attr *a = &event->attr;
177 struct paicrypt_map *cpump = NULL;
178 struct paicrypt_mapptr *mp;
181 mutex_lock(&pai_reserve_mutex);
183 /* Allocate root node */
184 rc = paicrypt_root_alloc();
188 /* Allocate node for this event */
189 mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
191 if (!cpump) { /* Paicrypt_map allocated? */
192 cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
199 if (a->sample_period) { /* Sampling requested */
200 if (cpump->mode != PAI_MODE_NONE)
201 rc = -EBUSY; /* ... sampling/counting active */
202 } else { /* Counting requested */
203 if (cpump->mode == PAI_MODE_SAMPLING)
204 rc = -EBUSY; /* ... and sampling active */
207 * This error case triggers when there is a conflict:
208 * Either sampling requested and counting already active, or visa
209 * versa. Therefore the struct paicrypto_map for this CPU is
210 * needed or the error could not have occurred. Only adjust root
216 /* Allocate memory for counter page and counter extraction.
217 * Only the first counting event has to allocate a page.
220 refcount_inc(&cpump->refcnt);
225 cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
227 goto free_paicrypt_map;
228 cpump->save = kvmalloc_array(paicrypt_cnt + 1,
229 sizeof(struct pai_userdata), GFP_KERNEL);
231 free_page((unsigned long)cpump->page);
233 goto free_paicrypt_map;
236 /* Set mode and reference count */
238 refcount_set(&cpump->refcnt, 1);
239 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
241 debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
242 " mode %d refcnt %u page %#lx save %p rc %d\n",
243 __func__, a->sample_period, cpump->active_events,
244 cpump->mode, refcount_read(&cpump->refcnt),
245 (unsigned long)cpump->page, cpump->save, rc);
252 paicrypt_root_free();
255 mutex_unlock(&pai_reserve_mutex);
256 return rc ? ERR_PTR(rc) : cpump;
259 /* Might be called on different CPU than the one the event is intended for. */
260 static int paicrypt_event_init(struct perf_event *event)
262 struct perf_event_attr *a = &event->attr;
263 struct paicrypt_map *cpump;
266 /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
267 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
269 /* PAI crypto event must be in valid range */
270 if (a->config < PAI_CRYPTO_BASE ||
271 a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
273 /* Allow only CPU wide operation, no process context for now. */
274 if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
276 /* Allow only CRYPTO_ALL for sampling. */
277 if (a->sample_period && a->config != PAI_CRYPTO_BASE)
279 /* Get a page to store last counter values for sampling */
280 if (a->sample_period) {
281 PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
282 if (!PAI_SAVE_AREA(event)) {
288 cpump = paicrypt_busy(event);
290 free_page(PAI_SAVE_AREA(event));
295 event->destroy = paicrypt_event_destroy;
297 if (a->sample_period) {
298 a->sample_period = 1;
300 /* Register for paicrypt_sched_task() to be called */
301 event->attach_state |= PERF_ATTACH_SCHED_CB;
302 /* Add raw data which contain the memory mapped counters */
303 a->sample_type |= PERF_SAMPLE_RAW;
304 /* Turn off inheritance */
308 static_branch_inc(&pai_key);
313 static void paicrypt_read(struct perf_event *event)
315 u64 prev, new, delta;
317 prev = local64_read(&event->hw.prev_count);
318 new = paicrypt_getall(event);
319 local64_set(&event->hw.prev_count, new);
320 delta = (prev <= new) ? new - prev
321 : (-1ULL - prev) + new + 1; /* overflow */
322 local64_add(delta, &event->count);
325 static void paicrypt_start(struct perf_event *event, int flags)
327 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
328 struct paicrypt_map *cpump = mp->mapptr;
331 if (!event->attr.sample_period) { /* Counting */
332 sum = paicrypt_getall(event); /* Get current value */
333 local64_set(&event->hw.prev_count, sum);
334 } else { /* Sampling */
335 cpump->event = event;
336 perf_sched_cb_inc(event->pmu);
340 static int paicrypt_add(struct perf_event *event, int flags)
342 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
343 struct paicrypt_map *cpump = mp->mapptr;
346 if (++cpump->active_events == 1) {
347 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
348 WRITE_ONCE(S390_lowcore.ccd, ccd);
349 local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
351 if (flags & PERF_EF_START)
352 paicrypt_start(event, PERF_EF_RELOAD);
357 static void paicrypt_stop(struct perf_event *event, int flags)
359 if (!event->attr.sample_period) /* Counting */
360 paicrypt_read(event);
362 perf_sched_cb_dec(event->pmu);
363 event->hw.state = PERF_HES_STOPPED;
366 static void paicrypt_del(struct perf_event *event, int flags)
368 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
369 struct paicrypt_map *cpump = mp->mapptr;
371 paicrypt_stop(event, PERF_EF_UPDATE);
372 if (--cpump->active_events == 0) {
373 local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
374 WRITE_ONCE(S390_lowcore.ccd, 0);
378 /* Create raw data and save it in buffer. Calculate the delta for each
379 * counter between this invocation and the last invocation.
380 * Returns number of bytes copied.
381 * Saves only entries with positive counter difference of the form
382 * 2 bytes: Number of counter
383 * 8 bytes: Value of counter
385 static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
386 unsigned long *page_old, bool exclude_user,
391 for (i = 1; i <= paicrypt_cnt; i++) {
392 u64 val = 0, val_old = 0;
394 if (!exclude_kernel) {
395 val += paicrypt_getctr(page, i, true);
396 val_old += paicrypt_getctr(page_old, i, true);
399 val += paicrypt_getctr(page, i, false);
400 val_old += paicrypt_getctr(page_old, i, false);
405 val = (~0ULL - val_old) + val + 1;
407 userdata[outidx].num = i;
408 userdata[outidx].value = val;
412 return outidx * sizeof(struct pai_userdata);
415 static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
416 struct perf_event *event)
418 struct perf_sample_data data;
419 struct perf_raw_record raw;
423 /* Setup perf sample */
424 memset(®s, 0, sizeof(regs));
425 memset(&raw, 0, sizeof(raw));
426 memset(&data, 0, sizeof(data));
427 perf_sample_data_init(&data, 0, event->hw.last_period);
428 if (event->attr.sample_type & PERF_SAMPLE_TID) {
429 data.tid_entry.pid = task_tgid_nr(current);
430 data.tid_entry.tid = task_pid_nr(current);
432 if (event->attr.sample_type & PERF_SAMPLE_TIME)
433 data.time = event->clock();
434 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
436 if (event->attr.sample_type & PERF_SAMPLE_CPU) {
437 data.cpu_entry.cpu = smp_processor_id();
438 data.cpu_entry.reserved = 0;
440 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
441 raw.frag.size = rawsize;
442 raw.frag.data = cpump->save;
443 perf_sample_save_raw_data(&data, &raw);
446 overflow = perf_event_overflow(event, &data, ®s);
447 perf_event_update_userpage(event);
448 /* Save crypto counter lowcore page after reading event data. */
449 memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
453 /* Check if there is data to be saved on schedule out of a task. */
454 static int paicrypt_have_sample(void)
456 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
457 struct paicrypt_map *cpump = mp->mapptr;
458 struct perf_event *event = cpump->event;
462 if (!event) /* No event active */
464 rawsize = paicrypt_copy(cpump->save, cpump->page,
465 (unsigned long *)PAI_SAVE_AREA(event),
466 cpump->event->attr.exclude_user,
467 cpump->event->attr.exclude_kernel);
468 if (rawsize) /* No incremented counters */
469 rc = paicrypt_push_sample(rawsize, cpump, event);
473 /* Called on schedule-in and schedule-out. No access to event structure,
474 * but for sampling only event CRYPTO_ALL is allowed.
476 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
478 /* We started with a clean page on event installation. So read out
479 * results on schedule_out and if page was dirty, clear values.
482 paicrypt_have_sample();
485 /* Attribute definitions for paicrypt interface. As with other CPU
486 * Measurement Facilities, there is one attribute per mapped counter.
487 * The number of mapped counters may vary per machine generation. Use
488 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
489 * to determine the number of mapped counters. The instructions returns
490 * a positive number, which is the highest number of supported counters.
491 * All counters less than this number are also supported, there are no
492 * holes. A returned number of zero means no support for mapped counters.
494 * The identification of the counter is a unique number. The chosen range
495 * is 0x1000 + offset in mapped kernel page.
496 * All CPU Measurement Facility counters identifiers must be unique and
497 * the numbers from 0 to 496 are already used for the CPU Measurement
498 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
499 * used for the CPU Measurement Sampling facility.
501 PMU_FORMAT_ATTR(event, "config:0-63");
503 static struct attribute *paicrypt_format_attr[] = {
504 &format_attr_event.attr,
508 static struct attribute_group paicrypt_events_group = {
510 .attrs = NULL /* Filled in attr_event_init() */
513 static struct attribute_group paicrypt_format_group = {
515 .attrs = paicrypt_format_attr,
518 static const struct attribute_group *paicrypt_attr_groups[] = {
519 &paicrypt_events_group,
520 &paicrypt_format_group,
524 /* Performance monitoring unit for mapped counters */
525 static struct pmu paicrypt = {
526 .task_ctx_nr = perf_invalid_context,
527 .event_init = paicrypt_event_init,
530 .start = paicrypt_start,
531 .stop = paicrypt_stop,
532 .read = paicrypt_read,
533 .sched_task = paicrypt_sched_task,
534 .attr_groups = paicrypt_attr_groups
537 /* List of symbolic PAI counter names. */
538 static const char * const paicrypt_ctrnames[] = {
543 [4] = "KM_ENCRYPTED_DEA",
544 [5] = "KM_ENCRYPTED_TDEA_128",
545 [6] = "KM_ENCRYPTED_TDEA_192",
549 [10] = "KM_ENCRYPTED_AES_128",
550 [11] = "KM_ENCRYPTED_AES_192",
551 [12] = "KM_ENCRYPTED_AES_256",
552 [13] = "KM_XTS_AES_128",
553 [14] = "KM_XTS_AES_256",
554 [15] = "KM_XTS_ENCRYPTED_AES_128",
555 [16] = "KM_XTS_ENCRYPTED_AES_256",
557 [18] = "KMC_TDEA_128",
558 [19] = "KMC_TDEA_192",
559 [20] = "KMC_ENCRYPTED_DEA",
560 [21] = "KMC_ENCRYPTED_TDEA_128",
561 [22] = "KMC_ENCRYPTED_TDEA_192",
562 [23] = "KMC_AES_128",
563 [24] = "KMC_AES_192",
564 [25] = "KMC_AES_256",
565 [26] = "KMC_ENCRYPTED_AES_128",
566 [27] = "KMC_ENCRYPTED_AES_192",
567 [28] = "KMC_ENCRYPTED_AES_256",
569 [30] = "KMA_GCM_AES_128",
570 [31] = "KMA_GCM_AES_192",
571 [32] = "KMA_GCM_AES_256",
572 [33] = "KMA_GCM_ENCRYPTED_AES_128",
573 [34] = "KMA_GCM_ENCRYPTED_AES_192",
574 [35] = "KMA_GCM_ENCRYPTED_AES_256",
576 [37] = "KMF_TDEA_128",
577 [38] = "KMF_TDEA_192",
578 [39] = "KMF_ENCRYPTED_DEA",
579 [40] = "KMF_ENCRYPTED_TDEA_128",
580 [41] = "KMF_ENCRYPTED_TDEA_192",
581 [42] = "KMF_AES_128",
582 [43] = "KMF_AES_192",
583 [44] = "KMF_AES_256",
584 [45] = "KMF_ENCRYPTED_AES_128",
585 [46] = "KMF_ENCRYPTED_AES_192",
586 [47] = "KMF_ENCRYPTED_AES_256",
588 [49] = "KMCTR_TDEA_128",
589 [50] = "KMCTR_TDEA_192",
590 [51] = "KMCTR_ENCRYPTED_DEA",
591 [52] = "KMCTR_ENCRYPTED_TDEA_128",
592 [53] = "KMCTR_ENCRYPTED_TDEA_192",
593 [54] = "KMCTR_AES_128",
594 [55] = "KMCTR_AES_192",
595 [56] = "KMCTR_AES_256",
596 [57] = "KMCTR_ENCRYPTED_AES_128",
597 [58] = "KMCTR_ENCRYPTED_AES_192",
598 [59] = "KMCTR_ENCRYPTED_AES_256",
600 [61] = "KMO_TDEA_128",
601 [62] = "KMO_TDEA_192",
602 [63] = "KMO_ENCRYPTED_DEA",
603 [64] = "KMO_ENCRYPTED_TDEA_128",
604 [65] = "KMO_ENCRYPTED_TDEA_192",
605 [66] = "KMO_AES_128",
606 [67] = "KMO_AES_192",
607 [68] = "KMO_AES_256",
608 [69] = "KMO_ENCRYPTED_AES_128",
609 [70] = "KMO_ENCRYPTED_AES_192",
610 [71] = "KMO_ENCRYPTED_AES_256",
612 [73] = "KIMD_SHA_256",
613 [74] = "KIMD_SHA_512",
614 [75] = "KIMD_SHA3_224",
615 [76] = "KIMD_SHA3_256",
616 [77] = "KIMD_SHA3_384",
617 [78] = "KIMD_SHA3_512",
618 [79] = "KIMD_SHAKE_128",
619 [80] = "KIMD_SHAKE_256",
622 [83] = "KLMD_SHA_256",
623 [84] = "KLMD_SHA_512",
624 [85] = "KLMD_SHA3_224",
625 [86] = "KLMD_SHA3_256",
626 [87] = "KLMD_SHA3_384",
627 [88] = "KLMD_SHA3_512",
628 [89] = "KLMD_SHAKE_128",
629 [90] = "KLMD_SHAKE_256",
631 [92] = "KMAC_TDEA_128",
632 [93] = "KMAC_TDEA_192",
633 [94] = "KMAC_ENCRYPTED_DEA",
634 [95] = "KMAC_ENCRYPTED_TDEA_128",
635 [96] = "KMAC_ENCRYPTED_TDEA_192",
636 [97] = "KMAC_AES_128",
637 [98] = "KMAC_AES_192",
638 [99] = "KMAC_AES_256",
639 [100] = "KMAC_ENCRYPTED_AES_128",
640 [101] = "KMAC_ENCRYPTED_AES_192",
641 [102] = "KMAC_ENCRYPTED_AES_256",
642 [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
643 [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
644 [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
645 [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
646 [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
647 [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
648 [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
649 [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
650 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
651 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
652 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
653 [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
654 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
655 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
656 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
657 [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
658 [119] = "PCC_SCALAR_MULTIPLY_P256",
659 [120] = "PCC_SCALAR_MULTIPLY_P384",
660 [121] = "PCC_SCALAR_MULTIPLY_P521",
661 [122] = "PCC_SCALAR_MULTIPLY_ED25519",
662 [123] = "PCC_SCALAR_MULTIPLY_ED448",
663 [124] = "PCC_SCALAR_MULTIPLY_X25519",
664 [125] = "PCC_SCALAR_MULTIPLY_X448",
665 [126] = "PRNO_SHA_512_DRNG",
666 [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
668 [129] = "KDSA_ECDSA_VERIFY_P256",
669 [130] = "KDSA_ECDSA_VERIFY_P384",
670 [131] = "KDSA_ECDSA_VERIFY_P521",
671 [132] = "KDSA_ECDSA_SIGN_P256",
672 [133] = "KDSA_ECDSA_SIGN_P384",
673 [134] = "KDSA_ECDSA_SIGN_P521",
674 [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
675 [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
676 [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
677 [138] = "KDSA_EDDSA_VERIFY_ED25519",
678 [139] = "KDSA_EDDSA_VERIFY_ED448",
679 [140] = "KDSA_EDDSA_SIGN_ED25519",
680 [141] = "KDSA_EDDSA_SIGN_ED448",
681 [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
682 [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
683 [144] = "PCKMO_ENCRYPT_DEA_KEY",
684 [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
685 [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
686 [147] = "PCKMO_ENCRYPT_AES_128_KEY",
687 [148] = "PCKMO_ENCRYPT_AES_192_KEY",
688 [149] = "PCKMO_ENCRYPT_AES_256_KEY",
689 [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
690 [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
691 [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
692 [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
693 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
694 [155] = "IBM_RESERVED_155",
695 [156] = "IBM_RESERVED_156",
698 static void __init attr_event_free(struct attribute **attrs, int num)
700 struct perf_pmu_events_attr *pa;
703 for (i = 0; i < num; i++) {
704 struct device_attribute *dap;
706 dap = container_of(attrs[i], struct device_attribute, attr);
707 pa = container_of(dap, struct perf_pmu_events_attr, attr);
713 static int __init attr_event_init_one(struct attribute **attrs, int num)
715 struct perf_pmu_events_attr *pa;
717 /* Index larger than array_size, no counter name available */
718 if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
723 pa = kzalloc(sizeof(*pa), GFP_KERNEL);
727 sysfs_attr_init(&pa->attr.attr);
728 pa->id = PAI_CRYPTO_BASE + num;
729 pa->attr.attr.name = paicrypt_ctrnames[num];
730 pa->attr.attr.mode = 0444;
731 pa->attr.show = cpumf_events_sysfs_show;
732 pa->attr.store = NULL;
733 attrs[num] = &pa->attr.attr;
737 /* Create PMU sysfs event attributes on the fly. */
738 static int __init attr_event_init(void)
740 struct attribute **attrs;
743 attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
746 for (i = 0; i <= paicrypt_cnt; i++) {
747 ret = attr_event_init_one(attrs, i);
749 attr_event_free(attrs, i);
754 paicrypt_events_group.attrs = attrs;
758 static int __init paicrypt_init(void)
760 struct qpaci_info_block ib;
763 if (!test_facility(196))
767 paicrypt_cnt = ib.num_cc;
768 if (paicrypt_cnt == 0)
770 if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
771 pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
775 rc = attr_event_init(); /* Export known PAI crypto events */
777 pr_err("Creation of PMU pai_crypto /sysfs failed\n");
781 /* Setup s390dbf facility */
782 cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
784 pr_err("Registration of s390dbf pai_crypto failed\n");
787 debug_register_view(cfm_dbg, &debug_sprintf_view);
789 rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
791 pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
793 debug_unregister_view(cfm_dbg, &debug_sprintf_view);
794 debug_unregister(cfm_dbg);
800 device_initcall(paicrypt_init);