]>
Commit | Line | Data |
---|---|---|
a17ae4c3 | 1 | // SPDX-License-Identifier: GPL-2.0 |
212188a5 HB |
2 | /* |
3 | * Performance event support for s390x - CPU-measurement Counter Facility | |
4 | * | |
db17160d | 5 | * Copyright IBM Corp. 2012, 2017 |
212188a5 | 6 | * Author(s): Hendrik Brueckner <[email protected]> |
212188a5 HB |
7 | */ |
8 | #define KMSG_COMPONENT "cpum_cf" | |
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/kernel_stat.h> | |
13 | #include <linux/perf_event.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <linux/notifier.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/export.h> | |
1e3cab2f | 18 | #include <asm/ctl_reg.h> |
212188a5 HB |
19 | #include <asm/irq.h> |
20 | #include <asm/cpu_mf.h> | |
21 | ||
212188a5 | 22 | enum cpumf_ctr_set { |
ee699f32 HB |
23 | CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */ |
24 | CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */ | |
25 | CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */ | |
26 | CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */ | |
27 | CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */ | |
212188a5 HB |
28 | |
29 | /* Maximum number of counter sets */ | |
30 | CPUMF_CTR_SET_MAX, | |
31 | }; | |
32 | ||
33 | #define CPUMF_LCCTL_ENABLE_SHIFT 16 | |
34 | #define CPUMF_LCCTL_ACTCTL_SHIFT 0 | |
35 | static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = { | |
36 | [CPUMF_CTR_SET_BASIC] = 0x02, | |
37 | [CPUMF_CTR_SET_USER] = 0x04, | |
38 | [CPUMF_CTR_SET_CRYPTO] = 0x08, | |
39 | [CPUMF_CTR_SET_EXT] = 0x01, | |
ee699f32 | 40 | [CPUMF_CTR_SET_MT_DIAG] = 0x20, |
212188a5 HB |
41 | }; |
42 | ||
43 | static void ctr_set_enable(u64 *state, int ctr_set) | |
44 | { | |
45 | *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; | |
46 | } | |
47 | static void ctr_set_disable(u64 *state, int ctr_set) | |
48 | { | |
49 | *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); | |
50 | } | |
51 | static void ctr_set_start(u64 *state, int ctr_set) | |
52 | { | |
53 | *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; | |
54 | } | |
55 | static void ctr_set_stop(u64 *state, int ctr_set) | |
56 | { | |
57 | *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); | |
58 | } | |
59 | ||
60 | /* Local CPUMF event structure */ | |
61 | struct cpu_hw_events { | |
62 | struct cpumf_ctr_info info; | |
63 | atomic_t ctr_set[CPUMF_CTR_SET_MAX]; | |
64 | u64 state, tx_state; | |
65 | unsigned int flags; | |
fbbe0701 | 66 | unsigned int txn_flags; |
212188a5 HB |
67 | }; |
68 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |
69 | .ctr_set = { | |
ee699f32 HB |
70 | [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), |
71 | [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), | |
72 | [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), | |
73 | [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), | |
74 | [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0), | |
212188a5 HB |
75 | }, |
76 | .state = 0, | |
77 | .flags = 0, | |
fbbe0701 | 78 | .txn_flags = 0, |
212188a5 HB |
79 | }; |
80 | ||
ee699f32 | 81 | static enum cpumf_ctr_set get_counter_set(u64 event) |
212188a5 | 82 | { |
ee699f32 | 83 | int set = CPUMF_CTR_SET_MAX; |
212188a5 HB |
84 | |
85 | if (event < 32) | |
86 | set = CPUMF_CTR_SET_BASIC; | |
87 | else if (event < 64) | |
88 | set = CPUMF_CTR_SET_USER; | |
89 | else if (event < 128) | |
90 | set = CPUMF_CTR_SET_CRYPTO; | |
f47586b2 | 91 | else if (event < 256) |
212188a5 | 92 | set = CPUMF_CTR_SET_EXT; |
ee699f32 HB |
93 | else if (event >= 448 && event < 496) |
94 | set = CPUMF_CTR_SET_MT_DIAG; | |
212188a5 HB |
95 | |
96 | return set; | |
97 | } | |
98 | ||
212188a5 HB |
99 | static int validate_ctr_version(const struct hw_perf_event *hwc) |
100 | { | |
101 | struct cpu_hw_events *cpuhw; | |
102 | int err = 0; | |
ee699f32 | 103 | u16 mtdiag_ctl; |
212188a5 HB |
104 | |
105 | cpuhw = &get_cpu_var(cpu_hw_events); | |
106 | ||
107 | /* check required version for counter sets */ | |
108 | switch (hwc->config_base) { | |
109 | case CPUMF_CTR_SET_BASIC: | |
110 | case CPUMF_CTR_SET_USER: | |
111 | if (cpuhw->info.cfvn < 1) | |
112 | err = -EOPNOTSUPP; | |
113 | break; | |
114 | case CPUMF_CTR_SET_CRYPTO: | |
115 | case CPUMF_CTR_SET_EXT: | |
116 | if (cpuhw->info.csvn < 1) | |
117 | err = -EOPNOTSUPP; | |
f47586b2 HB |
118 | if ((cpuhw->info.csvn == 1 && hwc->config > 159) || |
119 | (cpuhw->info.csvn == 2 && hwc->config > 175) || | |
120 | (cpuhw->info.csvn > 2 && hwc->config > 255)) | |
121 | err = -EOPNOTSUPP; | |
212188a5 | 122 | break; |
ee699f32 HB |
123 | case CPUMF_CTR_SET_MT_DIAG: |
124 | if (cpuhw->info.csvn <= 3) | |
125 | err = -EOPNOTSUPP; | |
126 | /* | |
127 | * MT-diagnostic counters are read-only. The counter set | |
128 | * is automatically enabled and activated on all CPUs with | |
129 | * multithreading (SMT). Deactivation of multithreading | |
130 | * also disables the counter set. State changes are ignored | |
131 | * by lcctl(). Because Linux controls SMT enablement through | |
132 | * a kernel parameter only, the counter set is either disabled | |
133 | * or enabled and active. | |
134 | * | |
135 | * Thus, the counters can only be used if SMT is on and the | |
136 | * counter set is enabled and active. | |
137 | */ | |
138 | mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG]; | |
139 | if (!((cpuhw->info.auth_ctl & mtdiag_ctl) && | |
140 | (cpuhw->info.enable_ctl & mtdiag_ctl) && | |
141 | (cpuhw->info.act_ctl & mtdiag_ctl))) | |
142 | err = -EOPNOTSUPP; | |
143 | break; | |
212188a5 HB |
144 | } |
145 | ||
146 | put_cpu_var(cpu_hw_events); | |
147 | return err; | |
148 | } | |
149 | ||
150 | static int validate_ctr_auth(const struct hw_perf_event *hwc) | |
151 | { | |
152 | struct cpu_hw_events *cpuhw; | |
153 | u64 ctrs_state; | |
154 | int err = 0; | |
155 | ||
156 | cpuhw = &get_cpu_var(cpu_hw_events); | |
157 | ||
58f8e9da HB |
158 | /* Check authorization for cpu counter sets. |
159 | * If the particular CPU counter set is not authorized, | |
160 | * return with -ENOENT in order to fall back to other | |
161 | * PMUs that might suffice the event request. | |
162 | */ | |
212188a5 HB |
163 | ctrs_state = cpumf_state_ctl[hwc->config_base]; |
164 | if (!(ctrs_state & cpuhw->info.auth_ctl)) | |
58f8e9da | 165 | err = -ENOENT; |
212188a5 HB |
166 | |
167 | put_cpu_var(cpu_hw_events); | |
168 | return err; | |
169 | } | |
170 | ||
171 | /* | |
172 | * Change the CPUMF state to active. | |
173 | * Enable and activate the CPU-counter sets according | |
174 | * to the per-cpu control state. | |
175 | */ | |
176 | static void cpumf_pmu_enable(struct pmu *pmu) | |
177 | { | |
eb7e7d76 | 178 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
179 | int err; |
180 | ||
181 | if (cpuhw->flags & PMU_F_ENABLED) | |
182 | return; | |
183 | ||
184 | err = lcctl(cpuhw->state); | |
185 | if (err) { | |
186 | pr_err("Enabling the performance measuring unit " | |
af0ee94e | 187 | "failed with rc=%x\n", err); |
212188a5 HB |
188 | return; |
189 | } | |
190 | ||
191 | cpuhw->flags |= PMU_F_ENABLED; | |
192 | } | |
193 | ||
194 | /* | |
195 | * Change the CPUMF state to inactive. | |
196 | * Disable and enable (inactive) the CPU-counter sets according | |
197 | * to the per-cpu control state. | |
198 | */ | |
199 | static void cpumf_pmu_disable(struct pmu *pmu) | |
200 | { | |
eb7e7d76 | 201 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
202 | int err; |
203 | u64 inactive; | |
204 | ||
205 | if (!(cpuhw->flags & PMU_F_ENABLED)) | |
206 | return; | |
207 | ||
208 | inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); | |
209 | err = lcctl(inactive); | |
210 | if (err) { | |
211 | pr_err("Disabling the performance measuring unit " | |
af0ee94e | 212 | "failed with rc=%x\n", err); |
212188a5 HB |
213 | return; |
214 | } | |
215 | ||
216 | cpuhw->flags &= ~PMU_F_ENABLED; | |
217 | } | |
218 | ||
219 | ||
220 | /* Number of perf events counting hardware events */ | |
221 | static atomic_t num_events = ATOMIC_INIT(0); | |
222 | /* Used to avoid races in calling reserve/release_cpumf_hardware */ | |
223 | static DEFINE_MUTEX(pmc_reserve_mutex); | |
224 | ||
225 | /* CPU-measurement alerts for the counter facility */ | |
226 | static void cpumf_measurement_alert(struct ext_code ext_code, | |
227 | unsigned int alert, unsigned long unused) | |
228 | { | |
229 | struct cpu_hw_events *cpuhw; | |
230 | ||
231 | if (!(alert & CPU_MF_INT_CF_MASK)) | |
232 | return; | |
233 | ||
420f42ec | 234 | inc_irq_stat(IRQEXT_CMC); |
eb7e7d76 | 235 | cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
236 | |
237 | /* Measurement alerts are shared and might happen when the PMU | |
238 | * is not reserved. Ignore these alerts in this case. */ | |
239 | if (!(cpuhw->flags & PMU_F_RESERVED)) | |
240 | return; | |
241 | ||
242 | /* counter authorization change alert */ | |
243 | if (alert & CPU_MF_INT_CF_CACA) | |
244 | qctri(&cpuhw->info); | |
245 | ||
246 | /* loss of counter data alert */ | |
247 | if (alert & CPU_MF_INT_CF_LCDA) | |
248 | pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); | |
ee699f32 HB |
249 | |
250 | /* loss of MT counter data alert */ | |
251 | if (alert & CPU_MF_INT_CF_MTDA) | |
252 | pr_warn("CPU[%i] MT counter data was lost\n", | |
253 | smp_processor_id()); | |
212188a5 HB |
254 | } |
255 | ||
256 | #define PMC_INIT 0 | |
257 | #define PMC_RELEASE 1 | |
258 | static void setup_pmc_cpu(void *flags) | |
259 | { | |
eb7e7d76 | 260 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
261 | |
262 | switch (*((int *) flags)) { | |
263 | case PMC_INIT: | |
264 | memset(&cpuhw->info, 0, sizeof(cpuhw->info)); | |
265 | qctri(&cpuhw->info); | |
266 | cpuhw->flags |= PMU_F_RESERVED; | |
267 | break; | |
268 | ||
269 | case PMC_RELEASE: | |
270 | cpuhw->flags &= ~PMU_F_RESERVED; | |
271 | break; | |
272 | } | |
273 | ||
274 | /* Disable CPU counter sets */ | |
275 | lcctl(0); | |
276 | } | |
277 | ||
278 | /* Initialize the CPU-measurement facility */ | |
279 | static int reserve_pmc_hardware(void) | |
280 | { | |
281 | int flags = PMC_INIT; | |
282 | ||
283 | on_each_cpu(setup_pmc_cpu, &flags, 1); | |
82003c3e | 284 | irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
212188a5 HB |
285 | |
286 | return 0; | |
287 | } | |
288 | ||
289 | /* Release the CPU-measurement facility */ | |
290 | static void release_pmc_hardware(void) | |
291 | { | |
292 | int flags = PMC_RELEASE; | |
293 | ||
294 | on_each_cpu(setup_pmc_cpu, &flags, 1); | |
82003c3e | 295 | irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); |
212188a5 HB |
296 | } |
297 | ||
298 | /* Release the PMU if event is the last perf event */ | |
299 | static void hw_perf_event_destroy(struct perf_event *event) | |
300 | { | |
301 | if (!atomic_add_unless(&num_events, -1, 1)) { | |
302 | mutex_lock(&pmc_reserve_mutex); | |
303 | if (atomic_dec_return(&num_events) == 0) | |
304 | release_pmc_hardware(); | |
305 | mutex_unlock(&pmc_reserve_mutex); | |
306 | } | |
307 | } | |
308 | ||
309 | /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */ | |
310 | static const int cpumf_generic_events_basic[] = { | |
311 | [PERF_COUNT_HW_CPU_CYCLES] = 0, | |
312 | [PERF_COUNT_HW_INSTRUCTIONS] = 1, | |
313 | [PERF_COUNT_HW_CACHE_REFERENCES] = -1, | |
314 | [PERF_COUNT_HW_CACHE_MISSES] = -1, | |
315 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, | |
316 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | |
317 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | |
318 | }; | |
319 | /* CPUMF <-> perf event mappings for userspace (problem-state set) */ | |
320 | static const int cpumf_generic_events_user[] = { | |
321 | [PERF_COUNT_HW_CPU_CYCLES] = 32, | |
322 | [PERF_COUNT_HW_INSTRUCTIONS] = 33, | |
323 | [PERF_COUNT_HW_CACHE_REFERENCES] = -1, | |
324 | [PERF_COUNT_HW_CACHE_MISSES] = -1, | |
325 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1, | |
326 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, | |
327 | [PERF_COUNT_HW_BUS_CYCLES] = -1, | |
328 | }; | |
329 | ||
330 | static int __hw_perf_event_init(struct perf_event *event) | |
331 | { | |
332 | struct perf_event_attr *attr = &event->attr; | |
333 | struct hw_perf_event *hwc = &event->hw; | |
ee699f32 | 334 | enum cpumf_ctr_set set; |
212188a5 HB |
335 | int err; |
336 | u64 ev; | |
337 | ||
338 | switch (attr->type) { | |
339 | case PERF_TYPE_RAW: | |
340 | /* Raw events are used to access counters directly, | |
341 | * hence do not permit excludes */ | |
342 | if (attr->exclude_kernel || attr->exclude_user || | |
343 | attr->exclude_hv) | |
344 | return -EOPNOTSUPP; | |
345 | ev = attr->config; | |
346 | break; | |
347 | ||
348 | case PERF_TYPE_HARDWARE: | |
613a41b0 TR |
349 | if (is_sampling_event(event)) /* No sampling support */ |
350 | return -ENOENT; | |
212188a5 HB |
351 | ev = attr->config; |
352 | /* Count user space (problem-state) only */ | |
353 | if (!attr->exclude_user && attr->exclude_kernel) { | |
354 | if (ev >= ARRAY_SIZE(cpumf_generic_events_user)) | |
355 | return -EOPNOTSUPP; | |
356 | ev = cpumf_generic_events_user[ev]; | |
357 | ||
358 | /* No support for kernel space counters only */ | |
359 | } else if (!attr->exclude_kernel && attr->exclude_user) { | |
360 | return -EOPNOTSUPP; | |
361 | ||
362 | /* Count user and kernel space */ | |
363 | } else { | |
364 | if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) | |
365 | return -EOPNOTSUPP; | |
366 | ev = cpumf_generic_events_basic[ev]; | |
367 | } | |
368 | break; | |
369 | ||
370 | default: | |
371 | return -ENOENT; | |
372 | } | |
373 | ||
374 | if (ev == -1) | |
375 | return -ENOENT; | |
376 | ||
20ba46da | 377 | if (ev > PERF_CPUM_CF_MAX_CTR) |
0bb2ae1b | 378 | return -ENOENT; |
212188a5 | 379 | |
ee699f32 HB |
380 | /* Obtain the counter set to which the specified counter belongs */ |
381 | set = get_counter_set(ev); | |
382 | switch (set) { | |
383 | case CPUMF_CTR_SET_BASIC: | |
384 | case CPUMF_CTR_SET_USER: | |
385 | case CPUMF_CTR_SET_CRYPTO: | |
386 | case CPUMF_CTR_SET_EXT: | |
387 | case CPUMF_CTR_SET_MT_DIAG: | |
388 | /* | |
389 | * Use the hardware perf event structure to store the | |
390 | * counter number in the 'config' member and the counter | |
391 | * set number in the 'config_base'. The counter set number | |
392 | * is then later used to enable/disable the counter(s). | |
393 | */ | |
394 | hwc->config = ev; | |
395 | hwc->config_base = set; | |
396 | break; | |
397 | case CPUMF_CTR_SET_MAX: | |
398 | /* The counter could not be associated to a counter set */ | |
399 | return -EINVAL; | |
400 | }; | |
212188a5 | 401 | |
212188a5 HB |
402 | /* Initialize for using the CPU-measurement counter facility */ |
403 | if (!atomic_inc_not_zero(&num_events)) { | |
404 | mutex_lock(&pmc_reserve_mutex); | |
405 | if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) | |
406 | err = -EBUSY; | |
407 | else | |
408 | atomic_inc(&num_events); | |
409 | mutex_unlock(&pmc_reserve_mutex); | |
410 | } | |
411 | event->destroy = hw_perf_event_destroy; | |
412 | ||
413 | /* Finally, validate version and authorization of the counter set */ | |
414 | err = validate_ctr_auth(hwc); | |
415 | if (!err) | |
416 | err = validate_ctr_version(hwc); | |
417 | ||
418 | return err; | |
419 | } | |
420 | ||
421 | static int cpumf_pmu_event_init(struct perf_event *event) | |
422 | { | |
423 | int err; | |
424 | ||
425 | switch (event->attr.type) { | |
426 | case PERF_TYPE_HARDWARE: | |
427 | case PERF_TYPE_HW_CACHE: | |
428 | case PERF_TYPE_RAW: | |
429 | err = __hw_perf_event_init(event); | |
430 | break; | |
431 | default: | |
432 | return -ENOENT; | |
433 | } | |
434 | ||
435 | if (unlikely(err) && event->destroy) | |
436 | event->destroy(event); | |
437 | ||
438 | return err; | |
439 | } | |
440 | ||
441 | static int hw_perf_event_reset(struct perf_event *event) | |
442 | { | |
443 | u64 prev, new; | |
444 | int err; | |
445 | ||
446 | do { | |
447 | prev = local64_read(&event->hw.prev_count); | |
448 | err = ecctr(event->hw.config, &new); | |
449 | if (err) { | |
450 | if (err != 3) | |
451 | break; | |
452 | /* The counter is not (yet) available. This | |
453 | * might happen if the counter set to which | |
454 | * this counter belongs is in the disabled | |
455 | * state. | |
456 | */ | |
457 | new = 0; | |
458 | } | |
459 | } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); | |
460 | ||
461 | return err; | |
462 | } | |
463 | ||
485527ba | 464 | static void hw_perf_event_update(struct perf_event *event) |
212188a5 HB |
465 | { |
466 | u64 prev, new, delta; | |
467 | int err; | |
468 | ||
469 | do { | |
470 | prev = local64_read(&event->hw.prev_count); | |
471 | err = ecctr(event->hw.config, &new); | |
472 | if (err) | |
485527ba | 473 | return; |
212188a5 HB |
474 | } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev); |
475 | ||
476 | delta = (prev <= new) ? new - prev | |
477 | : (-1ULL - prev) + new + 1; /* overflow */ | |
478 | local64_add(delta, &event->count); | |
212188a5 HB |
479 | } |
480 | ||
481 | static void cpumf_pmu_read(struct perf_event *event) | |
482 | { | |
483 | if (event->hw.state & PERF_HES_STOPPED) | |
484 | return; | |
485 | ||
486 | hw_perf_event_update(event); | |
487 | } | |
488 | ||
489 | static void cpumf_pmu_start(struct perf_event *event, int flags) | |
490 | { | |
eb7e7d76 | 491 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
492 | struct hw_perf_event *hwc = &event->hw; |
493 | ||
494 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | |
495 | return; | |
496 | ||
497 | if (WARN_ON_ONCE(hwc->config == -1)) | |
498 | return; | |
499 | ||
500 | if (flags & PERF_EF_RELOAD) | |
501 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
502 | ||
503 | hwc->state = 0; | |
504 | ||
505 | /* (Re-)enable and activate the counter set */ | |
506 | ctr_set_enable(&cpuhw->state, hwc->config_base); | |
507 | ctr_set_start(&cpuhw->state, hwc->config_base); | |
508 | ||
509 | /* The counter set to which this counter belongs can be already active. | |
510 | * Because all counters in a set are active, the event->hw.prev_count | |
511 | * needs to be synchronized. At this point, the counter set can be in | |
512 | * the inactive or disabled state. | |
513 | */ | |
514 | hw_perf_event_reset(event); | |
515 | ||
516 | /* increment refcount for this counter set */ | |
517 | atomic_inc(&cpuhw->ctr_set[hwc->config_base]); | |
518 | } | |
519 | ||
520 | static void cpumf_pmu_stop(struct perf_event *event, int flags) | |
521 | { | |
eb7e7d76 | 522 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
523 | struct hw_perf_event *hwc = &event->hw; |
524 | ||
525 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
526 | /* Decrement reference count for this counter set and if this | |
527 | * is the last used counter in the set, clear activation | |
528 | * control and set the counter set state to inactive. | |
529 | */ | |
530 | if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base])) | |
531 | ctr_set_stop(&cpuhw->state, hwc->config_base); | |
532 | event->hw.state |= PERF_HES_STOPPED; | |
533 | } | |
534 | ||
535 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
536 | hw_perf_event_update(event); | |
537 | event->hw.state |= PERF_HES_UPTODATE; | |
538 | } | |
539 | } | |
540 | ||
541 | static int cpumf_pmu_add(struct perf_event *event, int flags) | |
542 | { | |
eb7e7d76 | 543 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
544 | |
545 | /* Check authorization for the counter set to which this | |
546 | * counter belongs. | |
547 | * For group events transaction, the authorization check is | |
548 | * done in cpumf_pmu_commit_txn(). | |
549 | */ | |
8f3e5684 | 550 | if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD)) |
212188a5 | 551 | if (validate_ctr_auth(&event->hw)) |
58f8e9da | 552 | return -ENOENT; |
212188a5 HB |
553 | |
554 | ctr_set_enable(&cpuhw->state, event->hw.config_base); | |
555 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | |
556 | ||
557 | if (flags & PERF_EF_START) | |
558 | cpumf_pmu_start(event, PERF_EF_RELOAD); | |
559 | ||
560 | perf_event_update_userpage(event); | |
561 | ||
562 | return 0; | |
563 | } | |
564 | ||
565 | static void cpumf_pmu_del(struct perf_event *event, int flags) | |
566 | { | |
eb7e7d76 | 567 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
568 | |
569 | cpumf_pmu_stop(event, PERF_EF_UPDATE); | |
570 | ||
571 | /* Check if any counter in the counter set is still used. If not used, | |
572 | * change the counter set to the disabled state. This also clears the | |
573 | * content of all counters in the set. | |
574 | * | |
575 | * When a new perf event has been added but not yet started, this can | |
576 | * clear enable control and resets all counters in a set. Therefore, | |
577 | * cpumf_pmu_start() always has to reenable a counter set. | |
578 | */ | |
579 | if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base])) | |
580 | ctr_set_disable(&cpuhw->state, event->hw.config_base); | |
581 | ||
582 | perf_event_update_userpage(event); | |
583 | } | |
584 | ||
585 | /* | |
586 | * Start group events scheduling transaction. | |
587 | * Set flags to perform a single test at commit time. | |
fbbe0701 SB |
588 | * |
589 | * We only support PERF_PMU_TXN_ADD transactions. Save the | |
590 | * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD | |
591 | * transactions. | |
212188a5 | 592 | */ |
fbbe0701 | 593 | static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) |
212188a5 | 594 | { |
eb7e7d76 | 595 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 | 596 | |
fbbe0701 SB |
597 | WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ |
598 | ||
599 | cpuhw->txn_flags = txn_flags; | |
600 | if (txn_flags & ~PERF_PMU_TXN_ADD) | |
601 | return; | |
602 | ||
212188a5 | 603 | perf_pmu_disable(pmu); |
212188a5 HB |
604 | cpuhw->tx_state = cpuhw->state; |
605 | } | |
606 | ||
607 | /* | |
608 | * Stop and cancel a group events scheduling tranctions. | |
609 | * Assumes cpumf_pmu_del() is called for each successful added | |
610 | * cpumf_pmu_add() during the transaction. | |
611 | */ | |
612 | static void cpumf_pmu_cancel_txn(struct pmu *pmu) | |
613 | { | |
fbbe0701 | 614 | unsigned int txn_flags; |
eb7e7d76 | 615 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 | 616 | |
fbbe0701 SB |
617 | WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ |
618 | ||
619 | txn_flags = cpuhw->txn_flags; | |
620 | cpuhw->txn_flags = 0; | |
621 | if (txn_flags & ~PERF_PMU_TXN_ADD) | |
622 | return; | |
623 | ||
212188a5 HB |
624 | WARN_ON(cpuhw->tx_state != cpuhw->state); |
625 | ||
212188a5 HB |
626 | perf_pmu_enable(pmu); |
627 | } | |
628 | ||
629 | /* | |
630 | * Commit the group events scheduling transaction. On success, the | |
631 | * transaction is closed. On error, the transaction is kept open | |
632 | * until cpumf_pmu_cancel_txn() is called. | |
633 | */ | |
634 | static int cpumf_pmu_commit_txn(struct pmu *pmu) | |
635 | { | |
eb7e7d76 | 636 | struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); |
212188a5 HB |
637 | u64 state; |
638 | ||
fbbe0701 SB |
639 | WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ |
640 | ||
641 | if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { | |
642 | cpuhw->txn_flags = 0; | |
643 | return 0; | |
644 | } | |
645 | ||
212188a5 HB |
646 | /* check if the updated state can be scheduled */ |
647 | state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); | |
648 | state >>= CPUMF_LCCTL_ENABLE_SHIFT; | |
649 | if ((state & cpuhw->info.auth_ctl) != state) | |
58f8e9da | 650 | return -ENOENT; |
212188a5 | 651 | |
fbbe0701 | 652 | cpuhw->txn_flags = 0; |
212188a5 HB |
653 | perf_pmu_enable(pmu); |
654 | return 0; | |
655 | } | |
656 | ||
657 | /* Performance monitoring unit for s390x */ | |
658 | static struct pmu cpumf_pmu = { | |
9254e70c HB |
659 | .task_ctx_nr = perf_sw_context, |
660 | .capabilities = PERF_PMU_CAP_NO_INTERRUPT, | |
212188a5 HB |
661 | .pmu_enable = cpumf_pmu_enable, |
662 | .pmu_disable = cpumf_pmu_disable, | |
663 | .event_init = cpumf_pmu_event_init, | |
664 | .add = cpumf_pmu_add, | |
665 | .del = cpumf_pmu_del, | |
666 | .start = cpumf_pmu_start, | |
667 | .stop = cpumf_pmu_stop, | |
668 | .read = cpumf_pmu_read, | |
669 | .start_txn = cpumf_pmu_start_txn, | |
670 | .commit_txn = cpumf_pmu_commit_txn, | |
671 | .cancel_txn = cpumf_pmu_cancel_txn, | |
672 | }; | |
673 | ||
4f0f8217 | 674 | static int cpumf_pmf_setup(unsigned int cpu, int flags) |
212188a5 | 675 | { |
4f0f8217 TG |
676 | local_irq_disable(); |
677 | setup_pmc_cpu(&flags); | |
678 | local_irq_enable(); | |
679 | return 0; | |
680 | } | |
681 | ||
682 | static int s390_pmu_online_cpu(unsigned int cpu) | |
683 | { | |
684 | return cpumf_pmf_setup(cpu, PMC_INIT); | |
685 | } | |
212188a5 | 686 | |
4f0f8217 TG |
687 | static int s390_pmu_offline_cpu(unsigned int cpu) |
688 | { | |
689 | return cpumf_pmf_setup(cpu, PMC_RELEASE); | |
212188a5 HB |
690 | } |
691 | ||
692 | static int __init cpumf_pmu_init(void) | |
693 | { | |
694 | int rc; | |
695 | ||
696 | if (!cpum_cf_avail()) | |
697 | return -ENODEV; | |
698 | ||
699 | /* clear bit 15 of cr0 to unauthorize problem-state to | |
700 | * extract measurement counters */ | |
701 | ctl_clear_bit(0, 48); | |
702 | ||
703 | /* register handler for measurement-alert interruptions */ | |
1dad093b TH |
704 | rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, |
705 | cpumf_measurement_alert); | |
212188a5 HB |
706 | if (rc) { |
707 | pr_err("Registering for CPU-measurement alerts " | |
708 | "failed with rc=%i\n", rc); | |
4f0f8217 | 709 | return rc; |
212188a5 HB |
710 | } |
711 | ||
c7168325 | 712 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); |
212188a5 HB |
713 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); |
714 | if (rc) { | |
715 | pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); | |
1dad093b TH |
716 | unregister_external_irq(EXT_IRQ_MEASURE_ALERT, |
717 | cpumf_measurement_alert); | |
4f0f8217 | 718 | return rc; |
212188a5 | 719 | } |
4f0f8217 | 720 | return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, |
73c1b41e | 721 | "perf/s390/cf:online", |
4f0f8217 | 722 | s390_pmu_online_cpu, s390_pmu_offline_cpu); |
212188a5 HB |
723 | } |
724 | early_initcall(cpumf_pmu_init); |