1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Performance event support - powerpc architecture code
5 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/perf_event.h>
11 #include <linux/percpu.h>
12 #include <linux/hardirq.h>
13 #include <linux/uaccess.h>
16 #include <asm/machdep.h>
17 #include <asm/firmware.h>
18 #include <asm/ptrace.h>
19 #include <asm/code-patching.h>
20 #include <asm/hw_irq.h>
21 #include <asm/interrupt.h>
27 #define BHRB_MAX_ENTRIES 32
28 #define BHRB_TARGET 0x0000000000000002
29 #define BHRB_PREDICTION 0x0000000000000001
30 #define BHRB_EA 0xFFFFFFFFFFFFFFFCUL
32 struct cpu_hw_events {
39 struct perf_event *event[MAX_HWEVENTS];
40 u64 events[MAX_HWEVENTS];
41 unsigned int flags[MAX_HWEVENTS];
42 struct mmcr_regs mmcr;
43 struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
44 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
45 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
46 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
47 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
49 unsigned int txn_flags;
53 u64 bhrb_filter; /* BHRB HW branch filter */
54 unsigned int bhrb_users;
56 struct perf_branch_stack bhrb_stack;
57 struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
60 /* Store the PMC values */
61 unsigned long pmcs[MAX_HWEVENTS];
64 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
66 static struct power_pmu *ppmu;
69 * Normally, to ignore kernel events we set the FCS (freeze counters
70 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
71 * hypervisor bit set in the MSR, or if we are running on a processor
72 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
73 * then we need to use the FCHV bit to ignore kernel events.
75 static unsigned int freeze_events_kernel = MMCR0_FCS;
78 * 32-bit doesn't have MMCRA but does have an MMCR2,
79 * and a few other names are different.
80 * Also 32-bit doesn't have MMCR3, SIER2 and SIER3.
81 * Define them as zero knowing that any code path accessing
82 * these registers (via mtspr/mfspr) are done under ppmu flag
83 * check for PPMU_ARCH_31 and we will not enter that code path
89 #define MMCR0_PMCjCE MMCR0_PMCnCE
95 #define MMCR0_PMCC_U6 0
97 #define SPRN_MMCRA SPRN_MMCR2
101 #define MMCRA_SAMPLE_ENABLE 0
102 #define MMCRA_BHRB_DISABLE 0
103 #define MMCR0_PMCCEXT 0
105 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
109 static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { }
110 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
114 static inline void perf_read_regs(struct pt_regs *regs)
119 static inline int siar_valid(struct pt_regs *regs)
124 static bool is_ebb_event(struct perf_event *event) { return false; }
125 static int ebb_event_check(struct perf_event *event) { return 0; }
126 static void ebb_event_add(struct perf_event *event) { }
127 static void ebb_switch_out(unsigned long mmcr0) { }
128 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
130 return cpuhw->mmcr.mmcr0;
133 static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
134 static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
135 static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) {}
136 static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
137 static void pmao_restore_workaround(bool ebb) { }
138 #endif /* CONFIG_PPC32 */
140 bool is_sier_available(void)
145 if (ppmu->flags & PPMU_HAS_SIER)
152 * Return PMC value corresponding to the
155 unsigned long get_pmcs_ext_regs(int idx)
157 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
159 return cpuhw->pmcs[idx];
162 static bool regs_use_siar(struct pt_regs *regs)
165 * When we take a performance monitor exception the regs are setup
166 * using perf_read_regs() which overloads some fields, in particular
167 * regs->result to tell us whether to use SIAR.
169 * However if the regs are from another exception, eg. a syscall, then
170 * they have not been setup using perf_read_regs() and so regs->result
171 * is something random.
173 return ((TRAP(regs) == INTERRUPT_PERFMON) && regs->result);
177 * Things that are specific to 64-bit implementations.
181 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
183 unsigned long mmcra = regs->dsisr;
185 if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
186 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
188 return 4 * (slot - 1);
195 * The user wants a data address recorded.
196 * If we're not doing instruction sampling, give them the SDAR
197 * (sampled data address). If we are doing instruction sampling, then
198 * only give them the SDAR if it corresponds to the instruction
199 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
200 * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
202 static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp)
204 unsigned long mmcra = regs->dsisr;
207 if (ppmu->flags & PPMU_HAS_SIER)
208 sdar_valid = regs->dar & SIER_SDAR_VALID;
210 unsigned long sdsync;
212 if (ppmu->flags & PPMU_SIAR_VALID)
213 sdsync = POWER7P_MMCRA_SDAR_VALID;
214 else if (ppmu->flags & PPMU_ALT_SIPR)
215 sdsync = POWER6_MMCRA_SDSYNC;
216 else if (ppmu->flags & PPMU_NO_SIAR)
217 sdsync = MMCRA_SAMPLE_ENABLE;
219 sdsync = MMCRA_SDSYNC;
221 sdar_valid = mmcra & sdsync;
224 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
225 *addrp = mfspr(SPRN_SDAR);
227 if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
231 static bool regs_sihv(struct pt_regs *regs)
233 unsigned long sihv = MMCRA_SIHV;
235 if (ppmu->flags & PPMU_HAS_SIER)
236 return !!(regs->dar & SIER_SIHV);
238 if (ppmu->flags & PPMU_ALT_SIPR)
239 sihv = POWER6_MMCRA_SIHV;
241 return !!(regs->dsisr & sihv);
244 static bool regs_sipr(struct pt_regs *regs)
246 unsigned long sipr = MMCRA_SIPR;
248 if (ppmu->flags & PPMU_HAS_SIER)
249 return !!(regs->dar & SIER_SIPR);
251 if (ppmu->flags & PPMU_ALT_SIPR)
252 sipr = POWER6_MMCRA_SIPR;
254 return !!(regs->dsisr & sipr);
257 static inline u32 perf_flags_from_msr(struct pt_regs *regs)
260 return PERF_RECORD_MISC_USER;
261 if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
262 return PERF_RECORD_MISC_HYPERVISOR;
263 return PERF_RECORD_MISC_KERNEL;
266 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
268 bool use_siar = regs_use_siar(regs);
273 return perf_flags_from_msr(regs);
276 * If we don't have flags in MMCRA, rather than using
277 * the MSR, we intuit the flags from the address in
278 * SIAR which should give slightly more reliable
281 if (ppmu->flags & PPMU_NO_SIPR) {
282 siar = mfspr(SPRN_SIAR);
283 if (is_kernel_addr(siar))
284 return PERF_RECORD_MISC_KERNEL;
285 return PERF_RECORD_MISC_USER;
288 /* PR has priority over HV, so order below is important */
289 if (regs_sipr(regs)) {
290 if (!(ppmu->flags & PPMU_P10))
291 return PERF_RECORD_MISC_USER;
292 } else if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
293 return PERF_RECORD_MISC_HYPERVISOR;
296 * Check the address in SIAR to identify the
297 * privilege levels since the SIER[MSR_HV, MSR_PR]
298 * bits are not set correctly in power10 sometimes
300 if (ppmu->flags & PPMU_P10) {
301 siar = mfspr(SPRN_SIAR);
302 addr = siar ? siar : regs->nip;
303 if (!is_kernel_addr(addr))
304 return PERF_RECORD_MISC_USER;
307 return PERF_RECORD_MISC_KERNEL;
311 * Overload regs->dsisr to store MMCRA so we only need to read it once
313 * Overload regs->dar to store SIER if we have it.
314 * Overload regs->result to specify whether we should use the MSR (result
315 * is zero) or the SIAR (result is non zero).
317 static inline void perf_read_regs(struct pt_regs *regs)
319 unsigned long mmcra = mfspr(SPRN_MMCRA);
320 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
325 if (ppmu->flags & PPMU_HAS_SIER)
326 regs->dar = mfspr(SPRN_SIER);
329 * If this isn't a PMU exception (eg a software event) the SIAR is
330 * not valid. Use pt_regs.
332 * If it is a marked event use the SIAR.
334 * If the PMU doesn't update the SIAR for non marked events use
337 * If regs is a kernel interrupt, always use SIAR. Some PMUs have an
338 * issue with regs_sipr not being in synch with SIAR in interrupt entry
339 * and return sequences, which can result in regs_sipr being true for
340 * kernel interrupts and SIAR, which has the effect of causing samples
341 * to pile up at mtmsrd MSR[EE] 0->1 or pending irq replay around
342 * interrupt entry/exit.
344 * If the PMU has HV/PR flags then check to see if they
345 * place the exception in userspace. If so, use pt_regs. In
346 * continuous sampling mode the SIAR and the PMU exception are
347 * not synchronised, so they may be many instructions apart.
348 * This can result in confusing backtraces. We still want
349 * hypervisor samples as well as samples in the kernel with
350 * interrupts off hence the userspace check.
352 if (TRAP(regs) != INTERRUPT_PERFMON)
354 else if ((ppmu->flags & PPMU_NO_SIAR))
358 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
360 else if (!user_mode(regs))
362 else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
367 regs->result = use_siar;
371 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
372 * must be sampled only if the SIAR-valid bit is set.
374 * For unmarked instructions and for processors that don't have the SIAR-Valid
375 * bit, assume that SIAR is valid.
377 static inline int siar_valid(struct pt_regs *regs)
379 unsigned long mmcra = regs->dsisr;
380 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
384 * SIER[SIAR_VALID] is not set for some
385 * marked events on power10 DD1, so drop
386 * the check for SIER[SIAR_VALID] and return true.
388 if (ppmu->flags & PPMU_P10_DD1)
390 else if (ppmu->flags & PPMU_HAS_SIER)
391 return regs->dar & SIER_SIAR_VALID;
393 if (ppmu->flags & PPMU_SIAR_VALID)
394 return mmcra & POWER7P_MMCRA_SIAR_VALID;
401 /* Reset all possible BHRB entries */
402 static void power_pmu_bhrb_reset(void)
404 asm volatile(PPC_CLRBHRB);
407 static void power_pmu_bhrb_enable(struct perf_event *event)
409 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
414 /* Clear BHRB if we changed task context to avoid data leaks */
415 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
416 power_pmu_bhrb_reset();
417 cpuhw->bhrb_context = event->ctx;
420 perf_sched_cb_inc(event->pmu);
423 static void power_pmu_bhrb_disable(struct perf_event *event)
425 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
430 WARN_ON_ONCE(!cpuhw->bhrb_users);
432 perf_sched_cb_dec(event->pmu);
434 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
435 /* BHRB cannot be turned off when other
436 * events are active on the PMU.
439 /* avoid stale pointer */
440 cpuhw->bhrb_context = NULL;
444 /* Called from ctxsw to prevent one process's branch entries to
445 * mingle with the other process's entries during context switch.
447 static void power_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
453 power_pmu_bhrb_reset();
455 /* Calculate the to address for a branch */
456 static __u64 power_pmu_bhrb_to(u64 addr)
461 if (is_kernel_addr(addr)) {
462 if (copy_from_kernel_nofault(&instr, (void *)addr,
466 return branch_target(&instr);
469 /* Userspace: need copy instruction here then translate it */
470 if (copy_from_user_nofault(&instr, (unsigned int __user *)addr,
474 target = branch_target(&instr);
475 if ((!target) || (instr & BRANCH_ABSOLUTE))
478 /* Translate relative branch target from kernel to user address */
479 return target - (unsigned long)&instr + addr;
482 /* Processing BHRB entries */
483 static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw)
487 int r_index, u_index, pred;
491 while (r_index < ppmu->bhrb_nr) {
492 /* Assembly read function */
493 val = read_bhrb(r_index++);
495 /* Terminal marker: End of valid BHRB entries */
498 addr = val & BHRB_EA;
499 pred = val & BHRB_PREDICTION;
506 * BHRB rolling buffer could very much contain the kernel
507 * addresses at this point. Check the privileges before
508 * exporting it to userspace (avoid exposure of regions
509 * where we could have speculative execution)
510 * Incase of ISA v3.1, BHRB will capture only user-space
511 * addresses, hence include a check before filtering code
513 if (!(ppmu->flags & PPMU_ARCH_31) &&
514 is_kernel_addr(addr) && event->attr.exclude_kernel)
517 /* Branches are read most recent first (ie. mfbhrb 0 is
518 * the most recent branch).
519 * There are two types of valid entries:
520 * 1) a target entry which is the to address of a
521 * computed goto like a blr,bctr,btar. The next
522 * entry read from the bhrb will be branch
523 * corresponding to this target (ie. the actual
524 * blr/bctr/btar instruction).
525 * 2) a from address which is an actual branch. If a
526 * target entry proceeds this, then this is the
527 * matching branch for that target. If this is not
528 * following a target entry, then this is a branch
529 * where the target is given as an immediate field
530 * in the instruction (ie. an i or b form branch).
531 * In this case we need to read the instruction from
532 * memory to determine the target/to address.
535 if (val & BHRB_TARGET) {
536 /* Target branches use two entries
537 * (ie. computed gotos/XL form)
539 cpuhw->bhrb_entries[u_index].to = addr;
540 cpuhw->bhrb_entries[u_index].mispred = pred;
541 cpuhw->bhrb_entries[u_index].predicted = ~pred;
543 /* Get from address in next entry */
544 val = read_bhrb(r_index++);
545 addr = val & BHRB_EA;
546 if (val & BHRB_TARGET) {
547 /* Shouldn't have two targets in a
548 row.. Reset index and try again */
552 cpuhw->bhrb_entries[u_index].from = addr;
554 /* Branches to immediate field
556 cpuhw->bhrb_entries[u_index].from = addr;
557 cpuhw->bhrb_entries[u_index].to =
558 power_pmu_bhrb_to(addr);
559 cpuhw->bhrb_entries[u_index].mispred = pred;
560 cpuhw->bhrb_entries[u_index].predicted = ~pred;
566 cpuhw->bhrb_stack.nr = u_index;
567 cpuhw->bhrb_stack.hw_idx = -1ULL;
571 static bool is_ebb_event(struct perf_event *event)
574 * This could be a per-PMU callback, but we'd rather avoid the cost. We
575 * check that the PMU supports EBB, meaning those that don't can still
576 * use bit 63 of the event code for something else if they wish.
578 return (ppmu->flags & PPMU_ARCH_207S) &&
579 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
582 static int ebb_event_check(struct perf_event *event)
584 struct perf_event *leader = event->group_leader;
586 /* Event and group leader must agree on EBB */
587 if (is_ebb_event(leader) != is_ebb_event(event))
590 if (is_ebb_event(event)) {
591 if (!(event->attach_state & PERF_ATTACH_TASK))
594 if (!leader->attr.pinned || !leader->attr.exclusive)
597 if (event->attr.freq ||
598 event->attr.inherit ||
599 event->attr.sample_type ||
600 event->attr.sample_period ||
601 event->attr.enable_on_exec)
608 static void ebb_event_add(struct perf_event *event)
610 if (!is_ebb_event(event) || current->thread.used_ebb)
614 * IFF this is the first time we've added an EBB event, set
615 * PMXE in the user MMCR0 so we can detect when it's cleared by
616 * userspace. We need this so that we can context switch while
617 * userspace is in the EBB handler (where PMXE is 0).
619 current->thread.used_ebb = 1;
620 current->thread.mmcr0 |= MMCR0_PMXE;
623 static void ebb_switch_out(unsigned long mmcr0)
625 if (!(mmcr0 & MMCR0_EBE))
628 current->thread.siar = mfspr(SPRN_SIAR);
629 current->thread.sier = mfspr(SPRN_SIER);
630 current->thread.sdar = mfspr(SPRN_SDAR);
631 current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
632 current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
633 if (ppmu->flags & PPMU_ARCH_31) {
634 current->thread.mmcr3 = mfspr(SPRN_MMCR3);
635 current->thread.sier2 = mfspr(SPRN_SIER2);
636 current->thread.sier3 = mfspr(SPRN_SIER3);
640 static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
642 unsigned long mmcr0 = cpuhw->mmcr.mmcr0;
647 /* Enable EBB and read/write to all 6 PMCs and BHRB for userspace */
648 mmcr0 |= MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC_U6;
651 * Add any bits from the user MMCR0, FC or PMAO. This is compatible
652 * with pmao_restore_workaround() because we may add PMAO but we never
655 mmcr0 |= current->thread.mmcr0;
658 * Be careful not to set PMXE if userspace had it cleared. This is also
659 * compatible with pmao_restore_workaround() because it has already
660 * cleared PMXE and we leave PMAO alone.
662 if (!(current->thread.mmcr0 & MMCR0_PMXE))
663 mmcr0 &= ~MMCR0_PMXE;
665 mtspr(SPRN_SIAR, current->thread.siar);
666 mtspr(SPRN_SIER, current->thread.sier);
667 mtspr(SPRN_SDAR, current->thread.sdar);
670 * Merge the kernel & user values of MMCR2. The semantics we implement
671 * are that the user MMCR2 can set bits, ie. cause counters to freeze,
672 * but not clear bits. If a task wants to be able to clear bits, ie.
673 * unfreeze counters, it should not set exclude_xxx in its events and
674 * instead manage the MMCR2 entirely by itself.
676 mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2);
678 if (ppmu->flags & PPMU_ARCH_31) {
679 mtspr(SPRN_MMCR3, current->thread.mmcr3);
680 mtspr(SPRN_SIER2, current->thread.sier2);
681 mtspr(SPRN_SIER3, current->thread.sier3);
687 static void pmao_restore_workaround(bool ebb)
691 if (!cpu_has_feature(CPU_FTR_PMAO_BUG))
695 * On POWER8E there is a hardware defect which affects the PMU context
696 * switch logic, ie. power_pmu_disable/enable().
698 * When a counter overflows PMXE is cleared and FC/PMAO is set in MMCR0
699 * by the hardware. Sometime later the actual PMU exception is
702 * If we context switch, or simply disable/enable, the PMU prior to the
703 * exception arriving, the exception will be lost when we clear PMAO.
705 * When we reenable the PMU, we will write the saved MMCR0 with PMAO
706 * set, and this _should_ generate an exception. However because of the
707 * defect no exception is generated when we write PMAO, and we get
708 * stuck with no counters counting but no exception delivered.
710 * The workaround is to detect this case and tweak the hardware to
711 * create another pending PMU exception.
713 * We do that by setting up PMC6 (cycles) for an imminent overflow and
714 * enabling the PMU. That causes a new exception to be generated in the
715 * chip, but we don't take it yet because we have interrupts hard
716 * disabled. We then write back the PMU state as we want it to be seen
717 * by the exception handler. When we reenable interrupts the exception
718 * handler will be called and see the correct state.
720 * The logic is the same for EBB, except that the exception is gated by
721 * us having interrupts hard disabled as well as the fact that we are
722 * not in userspace. The exception is finally delivered when we return
726 /* Only if PMAO is set and PMAO_SYNC is clear */
727 if ((current->thread.mmcr0 & (MMCR0_PMAO | MMCR0_PMAO_SYNC)) != MMCR0_PMAO)
730 /* If we're doing EBB, only if BESCR[GE] is set */
731 if (ebb && !(current->thread.bescr & BESCR_GE))
735 * We are already soft-disabled in power_pmu_enable(). We need to hard
736 * disable to actually prevent the PMU exception from firing.
741 * This is a bit gross, but we know we're on POWER8E and have 6 PMCs.
742 * Using read/write_pmc() in a for loop adds 12 function calls and
743 * almost doubles our code size.
745 pmcs[0] = mfspr(SPRN_PMC1);
746 pmcs[1] = mfspr(SPRN_PMC2);
747 pmcs[2] = mfspr(SPRN_PMC3);
748 pmcs[3] = mfspr(SPRN_PMC4);
749 pmcs[4] = mfspr(SPRN_PMC5);
750 pmcs[5] = mfspr(SPRN_PMC6);
752 /* Ensure all freeze bits are unset */
753 mtspr(SPRN_MMCR2, 0);
755 /* Set up PMC6 to overflow in one cycle */
756 mtspr(SPRN_PMC6, 0x7FFFFFFE);
758 /* Enable exceptions and unfreeze PMC6 */
759 mtspr(SPRN_MMCR0, MMCR0_PMXE | MMCR0_PMCjCE | MMCR0_PMAO);
761 /* Now we need to refreeze and restore the PMCs */
762 mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMAO);
764 mtspr(SPRN_PMC1, pmcs[0]);
765 mtspr(SPRN_PMC2, pmcs[1]);
766 mtspr(SPRN_PMC3, pmcs[2]);
767 mtspr(SPRN_PMC4, pmcs[3]);
768 mtspr(SPRN_PMC5, pmcs[4]);
769 mtspr(SPRN_PMC6, pmcs[5]);
773 * If the perf subsystem wants performance monitor interrupts as soon as
774 * possible (e.g., to sample the instruction address and stack chain),
775 * this should return true. The IRQ masking code can then enable MSR[EE]
776 * in some places (e.g., interrupt handlers) that allows PMI interrupts
777 * through to improve accuracy of profiles, at the cost of some performance.
779 * The PMU counters can be enabled by other means (e.g., sysfs raw SPR
780 * access), but in that case there is no need for prompt PMI handling.
782 * This currently returns true if any perf counter is being used. It
783 * could possibly return false if only events are being counted rather than
784 * samples being taken, but for now this is good enough.
786 bool power_pmu_wants_prompt_pmi(void)
788 struct cpu_hw_events *cpuhw;
791 * This could simply test local_paca->pmcregs_in_use if that were not
797 cpuhw = this_cpu_ptr(&cpu_hw_events);
798 return cpuhw->n_events;
800 #endif /* CONFIG_PPC64 */
802 static void perf_event_interrupt(struct pt_regs *regs);
805 * Read one performance monitor counter (PMC).
807 static unsigned long read_pmc(int idx)
813 val = mfspr(SPRN_PMC1);
816 val = mfspr(SPRN_PMC2);
819 val = mfspr(SPRN_PMC3);
822 val = mfspr(SPRN_PMC4);
825 val = mfspr(SPRN_PMC5);
828 val = mfspr(SPRN_PMC6);
832 val = mfspr(SPRN_PMC7);
835 val = mfspr(SPRN_PMC8);
837 #endif /* CONFIG_PPC64 */
839 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
848 static void write_pmc(int idx, unsigned long val)
852 mtspr(SPRN_PMC1, val);
855 mtspr(SPRN_PMC2, val);
858 mtspr(SPRN_PMC3, val);
861 mtspr(SPRN_PMC4, val);
864 mtspr(SPRN_PMC5, val);
867 mtspr(SPRN_PMC6, val);
871 mtspr(SPRN_PMC7, val);
874 mtspr(SPRN_PMC8, val);
876 #endif /* CONFIG_PPC64 */
878 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
882 static int any_pmc_overflown(struct cpu_hw_events *cpuhw)
886 for (i = 0; i < cpuhw->n_events; i++) {
887 idx = cpuhw->event[i]->hw.idx;
888 if ((idx) && ((int)read_pmc(idx) < 0))
895 /* Called from sysrq_handle_showregs() */
896 void perf_event_print_debug(void)
898 unsigned long sdar, sier, flags;
899 u32 pmcs[MAX_HWEVENTS];
903 pr_info("Performance monitor hardware not registered.\n");
907 if (!ppmu->n_counter)
910 local_irq_save(flags);
912 pr_info("CPU: %d PMU registers, ppmu = %s n_counters = %d",
913 smp_processor_id(), ppmu->name, ppmu->n_counter);
915 for (i = 0; i < ppmu->n_counter; i++)
916 pmcs[i] = read_pmc(i + 1);
918 for (; i < MAX_HWEVENTS; i++)
919 pmcs[i] = 0xdeadbeef;
921 pr_info("PMC1: %08x PMC2: %08x PMC3: %08x PMC4: %08x\n",
922 pmcs[0], pmcs[1], pmcs[2], pmcs[3]);
924 if (ppmu->n_counter > 4)
925 pr_info("PMC5: %08x PMC6: %08x PMC7: %08x PMC8: %08x\n",
926 pmcs[4], pmcs[5], pmcs[6], pmcs[7]);
928 pr_info("MMCR0: %016lx MMCR1: %016lx MMCRA: %016lx\n",
929 mfspr(SPRN_MMCR0), mfspr(SPRN_MMCR1), mfspr(SPRN_MMCRA));
933 sdar = mfspr(SPRN_SDAR);
935 if (ppmu->flags & PPMU_HAS_SIER)
936 sier = mfspr(SPRN_SIER);
938 if (ppmu->flags & PPMU_ARCH_207S) {
939 pr_info("MMCR2: %016lx EBBHR: %016lx\n",
940 mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
941 pr_info("EBBRR: %016lx BESCR: %016lx\n",
942 mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
945 if (ppmu->flags & PPMU_ARCH_31) {
946 pr_info("MMCR3: %016lx SIER2: %016lx SIER3: %016lx\n",
947 mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3));
950 pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
951 mfspr(SPRN_SIAR), sdar, sier);
953 local_irq_restore(flags);
957 * Check if a set of events can all go on the PMU at once.
958 * If they can't, this will look at alternative codes for the events
959 * and see if any combination of alternative codes is feasible.
960 * The feasible set is returned in event_id[].
962 static int power_check_constraints(struct cpu_hw_events *cpuhw,
963 u64 event_id[], unsigned int cflags[],
964 int n_ev, struct perf_event **event)
966 unsigned long mask, value, nv;
967 unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
968 int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
970 unsigned long addf = ppmu->add_fields;
971 unsigned long tadd = ppmu->test_adder;
972 unsigned long grp_mask = ppmu->group_constraint_mask;
973 unsigned long grp_val = ppmu->group_constraint_val;
975 if (n_ev > ppmu->n_counter)
978 /* First see if the events will go on as-is */
979 for (i = 0; i < n_ev; ++i) {
980 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
981 && !ppmu->limited_pmc_event(event_id[i])) {
982 ppmu->get_alternatives(event_id[i], cflags[i],
983 cpuhw->alternatives[i]);
984 event_id[i] = cpuhw->alternatives[i][0];
986 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
987 &cpuhw->avalues[i][0], event[i]->attr.config1))
991 for (i = 0; i < n_ev; ++i) {
992 nv = (value | cpuhw->avalues[i][0]) +
993 (value & cpuhw->avalues[i][0] & addf);
995 if (((((nv + tadd) ^ value) & mask) & (~grp_mask)) != 0)
998 if (((((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0])
1003 mask |= cpuhw->amasks[i][0];
1006 if ((value & mask & grp_mask) != (mask & grp_val))
1009 return 0; /* all OK */
1012 /* doesn't work, gather alternatives... */
1013 if (!ppmu->get_alternatives)
1015 for (i = 0; i < n_ev; ++i) {
1017 n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
1018 cpuhw->alternatives[i]);
1019 for (j = 1; j < n_alt[i]; ++j)
1020 ppmu->get_constraint(cpuhw->alternatives[i][j],
1021 &cpuhw->amasks[i][j],
1022 &cpuhw->avalues[i][j],
1023 event[i]->attr.config1);
1026 /* enumerate all possibilities and see if any will work */
1029 value = mask = nv = 0;
1032 /* we're backtracking, restore context */
1038 * See if any alternative k for event_id i,
1039 * where k > j, will satisfy the constraints.
1041 while (++j < n_alt[i]) {
1042 nv = (value | cpuhw->avalues[i][j]) +
1043 (value & cpuhw->avalues[i][j] & addf);
1044 if ((((nv + tadd) ^ value) & mask) == 0 &&
1045 (((nv + tadd) ^ cpuhw->avalues[i][j])
1046 & cpuhw->amasks[i][j]) == 0)
1049 if (j >= n_alt[i]) {
1051 * No feasible alternative, backtrack
1052 * to event_id i-1 and continue enumerating its
1053 * alternatives from where we got up to.
1059 * Found a feasible alternative for event_id i,
1060 * remember where we got up to with this event_id,
1061 * go on to the next event_id, and start with
1062 * the first alternative for it.
1068 mask |= cpuhw->amasks[i][j];
1074 /* OK, we have a feasible combination, tell the caller the solution */
1075 for (i = 0; i < n_ev; ++i)
1076 event_id[i] = cpuhw->alternatives[i][choice[i]];
1081 * Check if newly-added events have consistent settings for
1082 * exclude_{user,kernel,hv} with each other and any previously
1085 static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
1086 int n_prev, int n_new)
1088 int eu = 0, ek = 0, eh = 0;
1090 struct perf_event *event;
1093 * If the PMU we're on supports per event exclude settings then we
1094 * don't need to do any of this logic. NB. This assumes no PMU has both
1095 * per event exclude and limited PMCs.
1097 if (ppmu->flags & PPMU_ARCH_207S)
1105 for (i = 0; i < n; ++i) {
1106 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
1107 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
1112 eu = event->attr.exclude_user;
1113 ek = event->attr.exclude_kernel;
1114 eh = event->attr.exclude_hv;
1116 } else if (event->attr.exclude_user != eu ||
1117 event->attr.exclude_kernel != ek ||
1118 event->attr.exclude_hv != eh) {
1124 for (i = 0; i < n; ++i)
1125 if (cflags[i] & PPMU_LIMITED_PMC_OK)
1126 cflags[i] |= PPMU_LIMITED_PMC_REQD;
1131 static u64 check_and_compute_delta(u64 prev, u64 val)
1133 u64 delta = (val - prev) & 0xfffffffful;
1136 * POWER7 can roll back counter values, if the new value is smaller
1137 * than the previous value it will cause the delta and the counter to
1138 * have bogus values unless we rolled a counter over. If a counter is
1139 * rolled back, it will be smaller, but within 256, which is the maximum
1140 * number of events to rollback at once. If we detect a rollback
1141 * return 0. This can lead to a small lack of precision in the
1144 if (prev > val && (prev - val) < 256)
1150 static void power_pmu_read(struct perf_event *event)
1152 s64 val, delta, prev;
1154 if (event->hw.state & PERF_HES_STOPPED)
1160 if (is_ebb_event(event)) {
1161 val = read_pmc(event->hw.idx);
1162 local64_set(&event->hw.prev_count, val);
1167 * Performance monitor interrupts come even when interrupts
1168 * are soft-disabled, as long as interrupts are hard-enabled.
1169 * Therefore we treat them like NMIs.
1172 prev = local64_read(&event->hw.prev_count);
1174 val = read_pmc(event->hw.idx);
1175 delta = check_and_compute_delta(prev, val);
1178 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
1180 local64_add(delta, &event->count);
1183 * A number of places program the PMC with (0x80000000 - period_left).
1184 * We never want period_left to be less than 1 because we will program
1185 * the PMC with a value >= 0x800000000 and an edge detected PMC will
1186 * roll around to 0 before taking an exception. We have seen this
1189 * To fix this, clamp the minimum value of period_left to 1.
1192 prev = local64_read(&event->hw.period_left);
1196 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
1200 * On some machines, PMC5 and PMC6 can't be written, don't respect
1201 * the freeze conditions, and don't generate interrupts. This tells
1202 * us if `event' is using such a PMC.
1204 static int is_limited_pmc(int pmcnum)
1206 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
1207 && (pmcnum == 5 || pmcnum == 6);
1210 static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
1211 unsigned long pmc5, unsigned long pmc6)
1213 struct perf_event *event;
1214 u64 val, prev, delta;
1217 for (i = 0; i < cpuhw->n_limited; ++i) {
1218 event = cpuhw->limited_counter[i];
1221 val = (event->hw.idx == 5) ? pmc5 : pmc6;
1222 prev = local64_read(&event->hw.prev_count);
1224 delta = check_and_compute_delta(prev, val);
1226 local64_add(delta, &event->count);
1230 static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
1231 unsigned long pmc5, unsigned long pmc6)
1233 struct perf_event *event;
1237 for (i = 0; i < cpuhw->n_limited; ++i) {
1238 event = cpuhw->limited_counter[i];
1239 event->hw.idx = cpuhw->limited_hwidx[i];
1240 val = (event->hw.idx == 5) ? pmc5 : pmc6;
1241 prev = local64_read(&event->hw.prev_count);
1242 if (check_and_compute_delta(prev, val))
1243 local64_set(&event->hw.prev_count, val);
1244 perf_event_update_userpage(event);
1249 * Since limited events don't respect the freeze conditions, we
1250 * have to read them immediately after freezing or unfreezing the
1251 * other events. We try to keep the values from the limited
1252 * events as consistent as possible by keeping the delay (in
1253 * cycles and instructions) between freezing/unfreezing and reading
1254 * the limited events as small and consistent as possible.
1255 * Therefore, if any limited events are in use, we read them
1256 * both, and always in the same order, to minimize variability,
1257 * and do it inside the same asm that writes MMCR0.
1259 static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
1261 unsigned long pmc5, pmc6;
1263 if (!cpuhw->n_limited) {
1264 mtspr(SPRN_MMCR0, mmcr0);
1269 * Write MMCR0, then read PMC5 and PMC6 immediately.
1270 * To ensure we don't get a performance monitor interrupt
1271 * between writing MMCR0 and freezing/thawing the limited
1272 * events, we first write MMCR0 with the event overflow
1273 * interrupt enable bits turned off.
1275 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
1276 : "=&r" (pmc5), "=&r" (pmc6)
1277 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
1279 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
1281 if (mmcr0 & MMCR0_FC)
1282 freeze_limited_counters(cpuhw, pmc5, pmc6);
1284 thaw_limited_counters(cpuhw, pmc5, pmc6);
1287 * Write the full MMCR0 including the event overflow interrupt
1288 * enable bits, if necessary.
1290 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
1291 mtspr(SPRN_MMCR0, mmcr0);
1295 * Disable all events to prevent PMU interrupts and to allow
1296 * events to be added or removed.
1298 static void power_pmu_disable(struct pmu *pmu)
1300 struct cpu_hw_events *cpuhw;
1301 unsigned long flags, mmcr0, val, mmcra;
1305 local_irq_save(flags);
1306 cpuhw = this_cpu_ptr(&cpu_hw_events);
1308 if (!cpuhw->disabled) {
1310 * Check if we ever enabled the PMU on this cpu.
1312 if (!cpuhw->pmcs_enabled) {
1314 cpuhw->pmcs_enabled = 1;
1318 * Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
1319 * Also clear PMXE to disable PMI's getting triggered in some
1320 * corner cases during PMU disable.
1322 val = mmcr0 = mfspr(SPRN_MMCR0);
1324 val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
1325 MMCR0_PMXE | MMCR0_FC56);
1326 /* Set mmcr0 PMCCEXT for p10 */
1327 if (ppmu->flags & PPMU_ARCH_31)
1328 val |= MMCR0_PMCCEXT;
1331 * The barrier is to make sure the mtspr has been
1332 * executed and the PMU has frozen the events etc.
1335 write_mmcr0(cpuhw, val);
1340 * Some corner cases could clear the PMU counter overflow
1341 * while a masked PMI is pending. One such case is when
1342 * a PMI happens during interrupt replay and perf counter
1343 * values are cleared by PMU callbacks before replay.
1345 * Disable the interrupt by clearing the paca bit for PMI
1346 * since we are disabling the PMU now. Otherwise provide a
1347 * warning if there is PMI pending, but no counter is found
1350 * Since power_pmu_disable runs under local_irq_save, it
1351 * could happen that code hits a PMC overflow without PMI
1352 * pending in paca. Hence only clear PMI pending if it was
1355 * If a PMI is pending, then MSR[EE] must be disabled (because
1356 * the masked PMI handler disabling EE). So it is safe to
1357 * call clear_pmi_irq_pending().
1359 if (pmi_irq_pending())
1360 clear_pmi_irq_pending();
1362 val = mmcra = cpuhw->mmcr.mmcra;
1365 * Disable instruction sampling if it was enabled
1367 val &= ~MMCRA_SAMPLE_ENABLE;
1369 /* Disable BHRB via mmcra (BHRBRD) for p10 */
1370 if (ppmu->flags & PPMU_ARCH_31)
1371 val |= MMCRA_BHRB_DISABLE;
1374 * Write SPRN_MMCRA if mmcra has either disabled
1375 * instruction sampling or BHRB.
1378 mtspr(SPRN_MMCRA, val);
1383 cpuhw->disabled = 1;
1386 ebb_switch_out(mmcr0);
1390 * These are readable by userspace, may contain kernel
1391 * addresses and are not switched by context switch, so clear
1392 * them now to avoid leaking anything to userspace in general
1393 * including to another process.
1395 if (ppmu->flags & PPMU_ARCH_207S) {
1396 mtspr(SPRN_SDAR, 0);
1397 mtspr(SPRN_SIAR, 0);
1402 local_irq_restore(flags);
1406 * Re-enable all events if disable == 0.
1407 * If we were previously disabled and events were added, then
1408 * put the new config on the PMU.
1410 static void power_pmu_enable(struct pmu *pmu)
1412 struct perf_event *event;
1413 struct cpu_hw_events *cpuhw;
1414 unsigned long flags;
1416 unsigned long val, mmcr0;
1418 unsigned int hwc_index[MAX_HWEVENTS];
1425 local_irq_save(flags);
1427 cpuhw = this_cpu_ptr(&cpu_hw_events);
1428 if (!cpuhw->disabled)
1431 if (cpuhw->n_events == 0) {
1432 ppc_set_pmu_inuse(0);
1436 cpuhw->disabled = 0;
1439 * EBB requires an exclusive group and all events must have the EBB
1440 * flag set, or not set, so we can just check a single event. Also we
1441 * know we have at least one event.
1443 ebb = is_ebb_event(cpuhw->event[0]);
1446 * If we didn't change anything, or only removed events,
1447 * no need to recalculate MMCR* settings and reset the PMCs.
1448 * Just reenable the PMU with the current MMCR* settings
1449 * (possibly updated for removal of events).
1451 if (!cpuhw->n_added) {
1453 * If there is any active event with an overflown PMC
1454 * value, set back PACA_IRQ_PMI which would have been
1455 * cleared in power_pmu_disable().
1458 if (any_pmc_overflown(cpuhw))
1459 set_pmi_irq_pending();
1461 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
1462 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
1463 if (ppmu->flags & PPMU_ARCH_31)
1464 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
1469 * Clear all MMCR settings and recompute them for the new set of events.
1471 memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
1473 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
1474 &cpuhw->mmcr, cpuhw->event, ppmu->flags)) {
1475 /* shouldn't ever get here */
1476 printk(KERN_ERR "oops compute_mmcr failed\n");
1480 if (!(ppmu->flags & PPMU_ARCH_207S)) {
1482 * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
1483 * bits for the first event. We have already checked that all
1484 * events have the same value for these bits as the first event.
1486 event = cpuhw->event[0];
1487 if (event->attr.exclude_user)
1488 cpuhw->mmcr.mmcr0 |= MMCR0_FCP;
1489 if (event->attr.exclude_kernel)
1490 cpuhw->mmcr.mmcr0 |= freeze_events_kernel;
1491 if (event->attr.exclude_hv)
1492 cpuhw->mmcr.mmcr0 |= MMCR0_FCHV;
1496 * Write the new configuration to MMCR* with the freeze
1497 * bit set and set the hardware events to their initial values.
1498 * Then unfreeze the events.
1500 ppc_set_pmu_inuse(1);
1501 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
1502 mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
1503 mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
1505 if (ppmu->flags & PPMU_ARCH_207S)
1506 mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2);
1508 if (ppmu->flags & PPMU_ARCH_31)
1509 mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
1512 * Read off any pre-existing events that need to move
1515 for (i = 0; i < cpuhw->n_events; ++i) {
1516 event = cpuhw->event[i];
1517 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
1518 power_pmu_read(event);
1519 write_pmc(event->hw.idx, 0);
1525 * Initialize the PMCs for all the new and moved events.
1527 cpuhw->n_limited = n_lim = 0;
1528 for (i = 0; i < cpuhw->n_events; ++i) {
1529 event = cpuhw->event[i];
1532 idx = hwc_index[i] + 1;
1533 if (is_limited_pmc(idx)) {
1534 cpuhw->limited_counter[n_lim] = event;
1535 cpuhw->limited_hwidx[n_lim] = idx;
1541 val = local64_read(&event->hw.prev_count);
1544 if (event->hw.sample_period) {
1545 left = local64_read(&event->hw.period_left);
1546 if (left < 0x80000000L)
1547 val = 0x80000000L - left;
1549 local64_set(&event->hw.prev_count, val);
1552 event->hw.idx = idx;
1553 if (event->hw.state & PERF_HES_STOPPED)
1555 write_pmc(idx, val);
1557 perf_event_update_userpage(event);
1559 cpuhw->n_limited = n_lim;
1560 cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE;
1563 pmao_restore_workaround(ebb);
1565 mmcr0 = ebb_switch_in(ebb, cpuhw);
1568 if (cpuhw->bhrb_users)
1569 ppmu->config_bhrb(cpuhw->bhrb_filter);
1571 write_mmcr0(cpuhw, mmcr0);
1574 * Enable instruction sampling if necessary
1576 if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) {
1578 mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra);
1583 local_irq_restore(flags);
1586 static int collect_events(struct perf_event *group, int max_count,
1587 struct perf_event *ctrs[], u64 *events,
1588 unsigned int *flags)
1591 struct perf_event *event;
1593 if (group->pmu->task_ctx_nr == perf_hw_context) {
1597 flags[n] = group->hw.event_base;
1598 events[n++] = group->hw.config;
1600 for_each_sibling_event(event, group) {
1601 if (event->pmu->task_ctx_nr == perf_hw_context &&
1602 event->state != PERF_EVENT_STATE_OFF) {
1606 flags[n] = event->hw.event_base;
1607 events[n++] = event->hw.config;
1614 * Add an event to the PMU.
1615 * If all events are not already frozen, then we disable and
1616 * re-enable the PMU in order to get hw_perf_enable to do the
1617 * actual work of reconfiguring the PMU.
1619 static int power_pmu_add(struct perf_event *event, int ef_flags)
1621 struct cpu_hw_events *cpuhw;
1622 unsigned long flags;
1626 local_irq_save(flags);
1627 perf_pmu_disable(event->pmu);
1630 * Add the event to the list (if there is room)
1631 * and check whether the total set is still feasible.
1633 cpuhw = this_cpu_ptr(&cpu_hw_events);
1634 n0 = cpuhw->n_events;
1635 if (n0 >= ppmu->n_counter)
1637 cpuhw->event[n0] = event;
1638 cpuhw->events[n0] = event->hw.config;
1639 cpuhw->flags[n0] = event->hw.event_base;
1642 * This event may have been disabled/stopped in record_and_restart()
1643 * because we exceeded the ->event_limit. If re-starting the event,
1644 * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
1645 * notification is re-enabled.
1647 if (!(ef_flags & PERF_EF_START))
1648 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
1650 event->hw.state = 0;
1653 * If group events scheduling transaction was started,
1654 * skip the schedulability test here, it will be performed
1655 * at commit time(->commit_txn) as a whole
1657 if (cpuhw->txn_flags & PERF_PMU_TXN_ADD)
1660 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
1662 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1, cpuhw->event))
1664 event->hw.config = cpuhw->events[n0];
1667 ebb_event_add(event);
1674 if (has_branch_stack(event)) {
1675 u64 bhrb_filter = -1;
1677 if (ppmu->bhrb_filter_map)
1678 bhrb_filter = ppmu->bhrb_filter_map(
1679 event->attr.branch_sample_type);
1681 if (bhrb_filter != -1) {
1682 cpuhw->bhrb_filter = bhrb_filter;
1683 power_pmu_bhrb_enable(event);
1687 perf_pmu_enable(event->pmu);
1688 local_irq_restore(flags);
1693 * Remove an event from the PMU.
1695 static void power_pmu_del(struct perf_event *event, int ef_flags)
1697 struct cpu_hw_events *cpuhw;
1699 unsigned long flags;
1701 local_irq_save(flags);
1702 perf_pmu_disable(event->pmu);
1704 power_pmu_read(event);
1706 cpuhw = this_cpu_ptr(&cpu_hw_events);
1707 for (i = 0; i < cpuhw->n_events; ++i) {
1708 if (event == cpuhw->event[i]) {
1709 while (++i < cpuhw->n_events) {
1710 cpuhw->event[i-1] = cpuhw->event[i];
1711 cpuhw->events[i-1] = cpuhw->events[i];
1712 cpuhw->flags[i-1] = cpuhw->flags[i];
1715 ppmu->disable_pmc(event->hw.idx - 1, &cpuhw->mmcr);
1716 if (event->hw.idx) {
1717 write_pmc(event->hw.idx, 0);
1720 perf_event_update_userpage(event);
1724 for (i = 0; i < cpuhw->n_limited; ++i)
1725 if (event == cpuhw->limited_counter[i])
1727 if (i < cpuhw->n_limited) {
1728 while (++i < cpuhw->n_limited) {
1729 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
1730 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
1734 if (cpuhw->n_events == 0) {
1735 /* disable exceptions if no events are running */
1736 cpuhw->mmcr.mmcr0 &= ~(MMCR0_PMXE | MMCR0_FCECE);
1739 if (has_branch_stack(event))
1740 power_pmu_bhrb_disable(event);
1742 perf_pmu_enable(event->pmu);
1743 local_irq_restore(flags);
1747 * POWER-PMU does not support disabling individual counters, hence
1748 * program their cycle counter to their max value and ignore the interrupts.
1751 static void power_pmu_start(struct perf_event *event, int ef_flags)
1753 unsigned long flags;
1757 if (!event->hw.idx || !event->hw.sample_period)
1760 if (!(event->hw.state & PERF_HES_STOPPED))
1763 if (ef_flags & PERF_EF_RELOAD)
1764 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1766 local_irq_save(flags);
1767 perf_pmu_disable(event->pmu);
1769 event->hw.state = 0;
1770 left = local64_read(&event->hw.period_left);
1773 if (left < 0x80000000L)
1774 val = 0x80000000L - left;
1776 write_pmc(event->hw.idx, val);
1778 perf_event_update_userpage(event);
1779 perf_pmu_enable(event->pmu);
1780 local_irq_restore(flags);
1783 static void power_pmu_stop(struct perf_event *event, int ef_flags)
1785 unsigned long flags;
1787 if (!event->hw.idx || !event->hw.sample_period)
1790 if (event->hw.state & PERF_HES_STOPPED)
1793 local_irq_save(flags);
1794 perf_pmu_disable(event->pmu);
1796 power_pmu_read(event);
1797 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1798 write_pmc(event->hw.idx, 0);
1800 perf_event_update_userpage(event);
1801 perf_pmu_enable(event->pmu);
1802 local_irq_restore(flags);
1806 * Start group events scheduling transaction
1807 * Set the flag to make pmu::enable() not perform the
1808 * schedulability test, it will be performed at commit time
1810 * We only support PERF_PMU_TXN_ADD transactions. Save the
1811 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1814 static void power_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
1816 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1818 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
1820 cpuhw->txn_flags = txn_flags;
1821 if (txn_flags & ~PERF_PMU_TXN_ADD)
1824 perf_pmu_disable(pmu);
1825 cpuhw->n_txn_start = cpuhw->n_events;
1829 * Stop group events scheduling transaction
1830 * Clear the flag and pmu::enable() will perform the
1831 * schedulability test.
1833 static void power_pmu_cancel_txn(struct pmu *pmu)
1835 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1836 unsigned int txn_flags;
1838 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
1840 txn_flags = cpuhw->txn_flags;
1841 cpuhw->txn_flags = 0;
1842 if (txn_flags & ~PERF_PMU_TXN_ADD)
1845 perf_pmu_enable(pmu);
1849 * Commit group events scheduling transaction
1850 * Perform the group schedulability test as a whole
1851 * Return 0 if success
1853 static int power_pmu_commit_txn(struct pmu *pmu)
1855 struct cpu_hw_events *cpuhw;
1861 cpuhw = this_cpu_ptr(&cpu_hw_events);
1862 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
1864 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
1865 cpuhw->txn_flags = 0;
1869 n = cpuhw->n_events;
1870 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1872 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n, cpuhw->event);
1876 for (i = cpuhw->n_txn_start; i < n; ++i)
1877 cpuhw->event[i]->hw.config = cpuhw->events[i];
1879 cpuhw->txn_flags = 0;
1880 perf_pmu_enable(pmu);
1885 * Return 1 if we might be able to put event on a limited PMC,
1887 * An event can only go on a limited PMC if it counts something
1888 * that a limited PMC can count, doesn't require interrupts, and
1889 * doesn't exclude any processor mode.
1891 static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
1895 u64 alt[MAX_EVENT_ALTERNATIVES];
1897 if (event->attr.exclude_user
1898 || event->attr.exclude_kernel
1899 || event->attr.exclude_hv
1900 || event->attr.sample_period)
1903 if (ppmu->limited_pmc_event(ev))
1907 * The requested event_id isn't on a limited PMC already;
1908 * see if any alternative code goes on a limited PMC.
1910 if (!ppmu->get_alternatives)
1913 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
1914 n = ppmu->get_alternatives(ev, flags, alt);
1920 * Find an alternative event_id that goes on a normal PMC, if possible,
1921 * and return the event_id code, or 0 if there is no such alternative.
1922 * (Note: event_id code 0 is "don't count" on all machines.)
1924 static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
1926 u64 alt[MAX_EVENT_ALTERNATIVES];
1929 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
1930 n = ppmu->get_alternatives(ev, flags, alt);
1936 /* Number of perf_events counting hardware events */
1937 static atomic_t num_events;
1938 /* Used to avoid races in calling reserve/release_pmc_hardware */
1939 static DEFINE_MUTEX(pmc_reserve_mutex);
1942 * Release the PMU if this is the last perf_event.
1944 static void hw_perf_event_destroy(struct perf_event *event)
1946 if (!atomic_add_unless(&num_events, -1, 1)) {
1947 mutex_lock(&pmc_reserve_mutex);
1948 if (atomic_dec_return(&num_events) == 0)
1949 release_pmc_hardware();
1950 mutex_unlock(&pmc_reserve_mutex);
1955 * Translate a generic cache event_id config to a raw event_id code.
1957 static int hw_perf_cache_event(u64 config, u64 *eventp)
1959 unsigned long type, op, result;
1962 if (!ppmu->cache_events)
1966 type = config & 0xff;
1967 op = (config >> 8) & 0xff;
1968 result = (config >> 16) & 0xff;
1970 if (type >= PERF_COUNT_HW_CACHE_MAX ||
1971 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1972 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1975 ev = (*ppmu->cache_events)[type][op][result];
1984 static bool is_event_blacklisted(u64 ev)
1988 for (i=0; i < ppmu->n_blacklist_ev; i++) {
1989 if (ppmu->blacklist_ev[i] == ev)
1996 static int power_pmu_event_init(struct perf_event *event)
1999 unsigned long flags, irq_flags;
2000 struct perf_event *ctrs[MAX_HWEVENTS];
2001 u64 events[MAX_HWEVENTS];
2002 unsigned int cflags[MAX_HWEVENTS];
2005 struct cpu_hw_events *cpuhw;
2010 if (has_branch_stack(event)) {
2011 /* PMU has BHRB enabled */
2012 if (!(ppmu->flags & PPMU_ARCH_207S))
2016 switch (event->attr.type) {
2017 case PERF_TYPE_HARDWARE:
2018 ev = event->attr.config;
2019 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
2022 if (ppmu->blacklist_ev && is_event_blacklisted(ev))
2024 ev = ppmu->generic_events[ev];
2026 case PERF_TYPE_HW_CACHE:
2027 err = hw_perf_cache_event(event->attr.config, &ev);
2031 if (ppmu->blacklist_ev && is_event_blacklisted(ev))
2035 ev = event->attr.config;
2037 if (ppmu->blacklist_ev && is_event_blacklisted(ev))
2045 * PMU config registers have fields that are
2046 * reserved and some specific values for bit fields are reserved.
2047 * For ex., MMCRA[61:62] is Random Sampling Mode (SM)
2048 * and value of 0b11 to this field is reserved.
2049 * Check for invalid values in attr.config.
2051 if (ppmu->check_attr_config &&
2052 ppmu->check_attr_config(event))
2055 event->hw.config_base = ev;
2059 * If we are not running on a hypervisor, force the
2060 * exclude_hv bit to 0 so that we don't care what
2061 * the user set it to.
2063 if (!firmware_has_feature(FW_FEATURE_LPAR))
2064 event->attr.exclude_hv = 0;
2067 * If this is a per-task event, then we can use
2068 * PM_RUN_* events interchangeably with their non RUN_*
2069 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
2070 * XXX we should check if the task is an idle task.
2073 if (event->attach_state & PERF_ATTACH_TASK)
2074 flags |= PPMU_ONLY_COUNT_RUN;
2077 * If this machine has limited events, check whether this
2078 * event_id could go on a limited event.
2080 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
2081 if (can_go_on_limited_pmc(event, ev, flags)) {
2082 flags |= PPMU_LIMITED_PMC_OK;
2083 } else if (ppmu->limited_pmc_event(ev)) {
2085 * The requested event_id is on a limited PMC,
2086 * but we can't use a limited PMC; see if any
2087 * alternative goes on a normal PMC.
2089 ev = normal_pmc_alternative(ev, flags);
2095 /* Extra checks for EBB */
2096 err = ebb_event_check(event);
2101 * If this is in a group, check if it can go on with all the
2102 * other hardware events in the group. We assume the event
2103 * hasn't been linked into its leader's sibling list at this point.
2106 if (event->group_leader != event) {
2107 n = collect_events(event->group_leader, ppmu->n_counter - 1,
2108 ctrs, events, cflags);
2115 if (check_excludes(ctrs, cflags, n, 1))
2118 local_irq_save(irq_flags);
2119 cpuhw = this_cpu_ptr(&cpu_hw_events);
2121 err = power_check_constraints(cpuhw, events, cflags, n + 1, ctrs);
2123 if (has_branch_stack(event)) {
2124 u64 bhrb_filter = -1;
2127 * Currently no PMU supports having multiple branch filters
2128 * at the same time. Branch filters are set via MMCRA IFM[32:33]
2129 * bits for Power8 and above. Return EOPNOTSUPP when multiple
2130 * branch filters are requested in the event attr.
2132 * When opening event via perf_event_open(), branch_sample_type
2133 * gets adjusted in perf_copy_attr(). Kernel will automatically
2134 * adjust the branch_sample_type based on the event modifier
2135 * settings to include PERF_SAMPLE_BRANCH_PLM_ALL. Hence drop
2136 * the check for PERF_SAMPLE_BRANCH_PLM_ALL.
2138 if (hweight64(event->attr.branch_sample_type & ~PERF_SAMPLE_BRANCH_PLM_ALL) > 1) {
2139 local_irq_restore(irq_flags);
2143 if (ppmu->bhrb_filter_map)
2144 bhrb_filter = ppmu->bhrb_filter_map(
2145 event->attr.branch_sample_type);
2147 if (bhrb_filter == -1) {
2148 local_irq_restore(irq_flags);
2151 cpuhw->bhrb_filter = bhrb_filter;
2154 local_irq_restore(irq_flags);
2158 event->hw.config = events[n];
2159 event->hw.event_base = cflags[n];
2160 event->hw.last_period = event->hw.sample_period;
2161 local64_set(&event->hw.period_left, event->hw.last_period);
2164 * For EBB events we just context switch the PMC value, we don't do any
2165 * of the sample_period logic. We use hw.prev_count for this.
2167 if (is_ebb_event(event))
2168 local64_set(&event->hw.prev_count, 0);
2171 * See if we need to reserve the PMU.
2172 * If no events are currently in use, then we have to take a
2173 * mutex to ensure that we don't race with another task doing
2174 * reserve_pmc_hardware or release_pmc_hardware.
2177 if (!atomic_inc_not_zero(&num_events)) {
2178 mutex_lock(&pmc_reserve_mutex);
2179 if (atomic_read(&num_events) == 0 &&
2180 reserve_pmc_hardware(perf_event_interrupt))
2183 atomic_inc(&num_events);
2184 mutex_unlock(&pmc_reserve_mutex);
2186 event->destroy = hw_perf_event_destroy;
2191 static int power_pmu_event_idx(struct perf_event *event)
2193 return event->hw.idx;
2196 ssize_t power_events_sysfs_show(struct device *dev,
2197 struct device_attribute *attr, char *page)
2199 struct perf_pmu_events_attr *pmu_attr;
2201 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
2203 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
2206 static struct pmu power_pmu = {
2207 .pmu_enable = power_pmu_enable,
2208 .pmu_disable = power_pmu_disable,
2209 .event_init = power_pmu_event_init,
2210 .add = power_pmu_add,
2211 .del = power_pmu_del,
2212 .start = power_pmu_start,
2213 .stop = power_pmu_stop,
2214 .read = power_pmu_read,
2215 .start_txn = power_pmu_start_txn,
2216 .cancel_txn = power_pmu_cancel_txn,
2217 .commit_txn = power_pmu_commit_txn,
2218 .event_idx = power_pmu_event_idx,
2219 .sched_task = power_pmu_sched_task,
2222 #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
2223 PERF_SAMPLE_PHYS_ADDR | \
2224 PERF_SAMPLE_DATA_PAGE_SIZE)
2226 * A counter has overflowed; update its count and record
2227 * things if requested. Note that interrupts are hard-disabled
2228 * here so there is no possibility of being interrupted.
2230 static void record_and_restart(struct perf_event *event, unsigned long val,
2231 struct pt_regs *regs)
2233 u64 period = event->hw.sample_period;
2234 s64 prev, delta, left;
2237 if (event->hw.state & PERF_HES_STOPPED) {
2238 write_pmc(event->hw.idx, 0);
2242 /* we don't have to worry about interrupts here */
2243 prev = local64_read(&event->hw.prev_count);
2244 delta = check_and_compute_delta(prev, val);
2245 local64_add(delta, &event->count);
2248 * See if the total period for this event has expired,
2249 * and update for the next period.
2252 left = local64_read(&event->hw.period_left) - delta;
2262 * If address is not requested in the sample via
2263 * PERF_SAMPLE_IP, just record that sample irrespective
2264 * of SIAR valid check.
2266 if (event->attr.sample_type & PERF_SAMPLE_IP)
2267 record = siar_valid(regs);
2271 event->hw.last_period = event->hw.sample_period;
2273 if (left < 0x80000000LL)
2274 val = 0x80000000LL - left;
2277 write_pmc(event->hw.idx, val);
2278 local64_set(&event->hw.prev_count, val);
2279 local64_set(&event->hw.period_left, left);
2280 perf_event_update_userpage(event);
2283 * Due to hardware limitation, sometimes SIAR could sample a kernel
2284 * address even when freeze on supervisor state (kernel) is set in
2285 * MMCR2. Check attr.exclude_kernel and address to drop the sample in
2288 if (event->attr.exclude_kernel &&
2289 (event->attr.sample_type & PERF_SAMPLE_IP) &&
2290 is_kernel_addr(mfspr(SPRN_SIAR)))
2294 * Finally record data if requested.
2297 struct perf_sample_data data;
2299 perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
2301 if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE)
2302 perf_get_data_addr(event, regs, &data.addr);
2304 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
2305 struct cpu_hw_events *cpuhw;
2306 cpuhw = this_cpu_ptr(&cpu_hw_events);
2307 power_pmu_bhrb_read(event, cpuhw);
2308 perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack, NULL);
2311 if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
2312 ppmu->get_mem_data_src) {
2313 ppmu->get_mem_data_src(&data.data_src, ppmu->flags, regs);
2314 data.sample_flags |= PERF_SAMPLE_DATA_SRC;
2317 if (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE &&
2318 ppmu->get_mem_weight) {
2319 ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type);
2320 data.sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
2322 if (perf_event_overflow(event, &data, regs))
2323 power_pmu_stop(event, 0);
2324 } else if (period) {
2325 /* Account for interrupt in case of invalid SIAR */
2326 if (perf_event_account_interrupt(event))
2327 power_pmu_stop(event, 0);
2332 * Called from generic code to get the misc flags (i.e. processor mode)
2335 unsigned long perf_misc_flags(struct pt_regs *regs)
2337 u32 flags = perf_get_misc_flags(regs);
2341 return user_mode(regs) ? PERF_RECORD_MISC_USER :
2342 PERF_RECORD_MISC_KERNEL;
2346 * Called from generic code to get the instruction pointer
2349 unsigned long perf_instruction_pointer(struct pt_regs *regs)
2351 unsigned long siar = mfspr(SPRN_SIAR);
2353 if (regs_use_siar(regs) && siar_valid(regs) && siar)
2354 return siar + perf_ip_adjust(regs);
2359 static bool pmc_overflow_power7(unsigned long val)
2362 * Events on POWER7 can roll back if a speculative event doesn't
2363 * eventually complete. Unfortunately in some rare cases they will
2364 * raise a performance monitor exception. We need to catch this to
2365 * ensure we reset the PMC. In all cases the PMC will be 256 or less
2366 * cycles from overflow.
2368 * We only do this if the first pass fails to find any overflowing
2369 * PMCs because a user might set a period of less than 256 and we
2370 * don't want to mistakenly reset them.
2372 if ((0x80000000 - val) <= 256)
2378 static bool pmc_overflow(unsigned long val)
2387 * Performance monitor interrupt stuff
2389 static void __perf_event_interrupt(struct pt_regs *regs)
2392 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
2393 struct perf_event *event;
2396 if (cpuhw->n_limited)
2397 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
2400 perf_read_regs(regs);
2402 /* Read all the PMCs since we'll need them a bunch of times */
2403 for (i = 0; i < ppmu->n_counter; ++i)
2404 cpuhw->pmcs[i] = read_pmc(i + 1);
2406 /* Try to find what caused the IRQ */
2408 for (i = 0; i < ppmu->n_counter; ++i) {
2409 if (!pmc_overflow(cpuhw->pmcs[i]))
2411 if (is_limited_pmc(i + 1))
2412 continue; /* these won't generate IRQs */
2414 * We've found one that's overflowed. For active
2415 * counters we need to log this. For inactive
2416 * counters, we need to reset it anyway
2420 for (j = 0; j < cpuhw->n_events; ++j) {
2421 event = cpuhw->event[j];
2422 if (event->hw.idx == (i + 1)) {
2424 record_and_restart(event, cpuhw->pmcs[i], regs);
2430 * Clear PACA_IRQ_PMI in case it was set by
2431 * set_pmi_irq_pending() when PMU was enabled
2432 * after accounting for interrupts.
2434 clear_pmi_irq_pending();
2437 /* reset non active counters that have overflowed */
2438 write_pmc(i + 1, 0);
2440 if (!found && pvr_version_is(PVR_POWER7)) {
2441 /* check active counters for special buggy p7 overflow */
2442 for (i = 0; i < cpuhw->n_events; ++i) {
2443 event = cpuhw->event[i];
2444 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
2446 if (pmc_overflow_power7(cpuhw->pmcs[event->hw.idx - 1])) {
2447 /* event has overflowed in a buggy way*/
2449 record_and_restart(event,
2450 cpuhw->pmcs[event->hw.idx - 1],
2457 * During system wide profiling or while specific CPU is monitored for an
2458 * event, some corner cases could cause PMC to overflow in idle path. This
2459 * will trigger a PMI after waking up from idle. Since counter values are _not_
2460 * saved/restored in idle path, can lead to below "Can't find PMC" message.
2462 if (unlikely(!found) && !arch_irq_disabled_regs(regs))
2463 printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
2466 * Reset MMCR0 to its normal value. This will set PMXE and
2467 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
2468 * and thus allow interrupts to occur again.
2469 * XXX might want to use MSR.PM to keep the events frozen until
2470 * we get back out of this interrupt.
2472 write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0);
2474 /* Clear the cpuhw->pmcs */
2475 memset(&cpuhw->pmcs, 0, sizeof(cpuhw->pmcs));
2479 static void perf_event_interrupt(struct pt_regs *regs)
2481 u64 start_clock = sched_clock();
2483 __perf_event_interrupt(regs);
2484 perf_sample_event_took(sched_clock() - start_clock);
2487 static int power_pmu_prepare_cpu(unsigned int cpu)
2489 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
2492 memset(cpuhw, 0, sizeof(*cpuhw));
2493 cpuhw->mmcr.mmcr0 = MMCR0_FC;
2498 static ssize_t pmu_name_show(struct device *cdev,
2499 struct device_attribute *attr,
2503 return sysfs_emit(buf, "%s", ppmu->name);
2508 static DEVICE_ATTR_RO(pmu_name);
2510 static struct attribute *pmu_caps_attrs[] = {
2511 &dev_attr_pmu_name.attr,
2515 static const struct attribute_group pmu_caps_group = {
2517 .attrs = pmu_caps_attrs,
2520 static const struct attribute_group *pmu_caps_groups[] = {
2525 int __init register_power_pmu(struct power_pmu *pmu)
2528 return -EBUSY; /* something's already registered */
2531 pr_info("%s performance monitor hardware support registered\n",
2534 power_pmu.attr_groups = ppmu->attr_groups;
2536 if (ppmu->flags & PPMU_ARCH_207S)
2537 power_pmu.attr_update = pmu_caps_groups;
2539 power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
2543 * Use FCHV to ignore kernel events if MSR.HV is set.
2545 if (mfmsr() & MSR_HV)
2546 freeze_events_kernel = MMCR0_FCHV;
2547 #endif /* CONFIG_PPC64 */
2549 perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
2550 cpuhp_setup_state(CPUHP_PERF_POWER, "perf/powerpc:prepare",
2551 power_pmu_prepare_cpu, NULL);
2556 static bool pmu_override = false;
2557 static unsigned long pmu_override_val;
2558 static void do_pmu_override(void *data)
2560 ppc_set_pmu_inuse(1);
2561 if (pmu_override_val)
2562 mtspr(SPRN_MMCR1, pmu_override_val);
2563 mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
2566 static int __init init_ppc64_pmu(void)
2568 if (cpu_has_feature(CPU_FTR_HVMODE) && pmu_override) {
2569 pr_warn("disabling perf due to pmu_override= command line option.\n");
2570 on_each_cpu(do_pmu_override, NULL, 1);
2574 /* run through all the pmu drivers one at a time */
2575 if (!init_power5_pmu())
2577 else if (!init_power5p_pmu())
2579 else if (!init_power6_pmu())
2581 else if (!init_power7_pmu())
2583 else if (!init_power8_pmu())
2585 else if (!init_power9_pmu())
2587 else if (!init_power10_pmu())
2589 else if (!init_power11_pmu())
2591 else if (!init_ppc970_pmu())
2594 return init_generic_compat_pmu();
2596 early_initcall(init_ppc64_pmu);
2598 static int __init pmu_setup(char *str)
2602 if (!early_cpu_has_feature(CPU_FTR_HVMODE))
2605 pmu_override = true;
2607 if (kstrtoul(str, 0, &val))
2610 pmu_override_val = val;
2614 __setup("pmu_override=", pmu_setup);