]> Git Repo - linux.git/blob - arch/arm/kernel/perf_event_v7.c
x86/config: Fix warning for 'make ARCH=x86_64 tinyconfig'
[linux.git] / arch / arm / kernel / perf_event_v7.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4  *
5  * ARMv7 support: Jean Pihet <[email protected]>
6  * 2010 (c) MontaVista Software, LLC.
7  *
8  * Copied from ARMv6 code, with the low level code inspired
9  *  by the ARMv7 Oprofile code.
10  *
11  * Cortex-A8 has up to 4 configurable performance counters and
12  *  a single cycle counter.
13  * Cortex-A9 has up to 31 configurable performance counters and
14  *  a single cycle counter.
15  *
16  * All counters can be enabled/disabled and IRQ masked separately. The cycle
17  *  counter and all 4 performance counters together can be reset separately.
18  */
19
20 #ifdef CONFIG_CPU_V7
21
22 #include <asm/cp15.h>
23 #include <asm/cputype.h>
24 #include <asm/irq_regs.h>
25 #include <asm/vfp.h>
26 #include "../vfp/vfpinstr.h"
27
28 #include <linux/of.h>
29 #include <linux/perf/arm_pmu.h>
30 #include <linux/platform_device.h>
31
32 /*
33  * Common ARMv7 event types
34  *
35  * Note: An implementation may not be able to count all of these events
36  * but the encodings are considered to be `reserved' in the case that
37  * they are not available.
38  */
39 #define ARMV7_PERFCTR_PMNC_SW_INCR                      0x00
40 #define ARMV7_PERFCTR_L1_ICACHE_REFILL                  0x01
41 #define ARMV7_PERFCTR_ITLB_REFILL                       0x02
42 #define ARMV7_PERFCTR_L1_DCACHE_REFILL                  0x03
43 #define ARMV7_PERFCTR_L1_DCACHE_ACCESS                  0x04
44 #define ARMV7_PERFCTR_DTLB_REFILL                       0x05
45 #define ARMV7_PERFCTR_MEM_READ                          0x06
46 #define ARMV7_PERFCTR_MEM_WRITE                         0x07
47 #define ARMV7_PERFCTR_INSTR_EXECUTED                    0x08
48 #define ARMV7_PERFCTR_EXC_TAKEN                         0x09
49 #define ARMV7_PERFCTR_EXC_EXECUTED                      0x0A
50 #define ARMV7_PERFCTR_CID_WRITE                         0x0B
51
52 /*
53  * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
54  * It counts:
55  *  - all (taken) branch instructions,
56  *  - instructions that explicitly write the PC,
57  *  - exception generating instructions.
58  */
59 #define ARMV7_PERFCTR_PC_WRITE                          0x0C
60 #define ARMV7_PERFCTR_PC_IMM_BRANCH                     0x0D
61 #define ARMV7_PERFCTR_PC_PROC_RETURN                    0x0E
62 #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS              0x0F
63 #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED                0x10
64 #define ARMV7_PERFCTR_CLOCK_CYCLES                      0x11
65 #define ARMV7_PERFCTR_PC_BRANCH_PRED                    0x12
66
67 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
68 #define ARMV7_PERFCTR_MEM_ACCESS                        0x13
69 #define ARMV7_PERFCTR_L1_ICACHE_ACCESS                  0x14
70 #define ARMV7_PERFCTR_L1_DCACHE_WB                      0x15
71 #define ARMV7_PERFCTR_L2_CACHE_ACCESS                   0x16
72 #define ARMV7_PERFCTR_L2_CACHE_REFILL                   0x17
73 #define ARMV7_PERFCTR_L2_CACHE_WB                       0x18
74 #define ARMV7_PERFCTR_BUS_ACCESS                        0x19
75 #define ARMV7_PERFCTR_MEM_ERROR                         0x1A
76 #define ARMV7_PERFCTR_INSTR_SPEC                        0x1B
77 #define ARMV7_PERFCTR_TTBR_WRITE                        0x1C
78 #define ARMV7_PERFCTR_BUS_CYCLES                        0x1D
79
80 #define ARMV7_PERFCTR_CPU_CYCLES                        0xFF
81
82 /* ARMv7 Cortex-A8 specific event types */
83 #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS                0x43
84 #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL                0x44
85 #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS               0x50
86 #define ARMV7_A8_PERFCTR_STALL_ISIDE                    0x56
87
88 /* ARMv7 Cortex-A9 specific event types */
89 #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME              0x68
90 #define ARMV7_A9_PERFCTR_STALL_ICACHE                   0x60
91 #define ARMV7_A9_PERFCTR_STALL_DISPATCH                 0x66
92
93 /* ARMv7 Cortex-A5 specific event types */
94 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL              0xc2
95 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP         0xc3
96
97 /* ARMv7 Cortex-A15 specific event types */
98 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ         0x40
99 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE        0x41
100 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ         0x42
101 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE        0x43
102
103 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ           0x4C
104 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE          0x4D
105
106 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ          0x50
107 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE         0x51
108 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ          0x52
109 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE         0x53
110
111 #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC                 0x76
112
113 /* ARMv7 Cortex-A12 specific event types */
114 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ         0x40
115 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE        0x41
116
117 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ          0x50
118 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE         0x51
119
120 #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC                 0x76
121
122 #define ARMV7_A12_PERFCTR_PF_TLB_REFILL                 0xe7
123
124 /* ARMv7 Krait specific event types */
125 #define KRAIT_PMRESR0_GROUP0                            0xcc
126 #define KRAIT_PMRESR1_GROUP0                            0xd0
127 #define KRAIT_PMRESR2_GROUP0                            0xd4
128 #define KRAIT_VPMRESR0_GROUP0                           0xd8
129
130 #define KRAIT_PERFCTR_L1_ICACHE_ACCESS                  0x10011
131 #define KRAIT_PERFCTR_L1_ICACHE_MISS                    0x10010
132
133 #define KRAIT_PERFCTR_L1_ITLB_ACCESS                    0x12222
134 #define KRAIT_PERFCTR_L1_DTLB_ACCESS                    0x12210
135
136 /* ARMv7 Scorpion specific event types */
137 #define SCORPION_LPM0_GROUP0                            0x4c
138 #define SCORPION_LPM1_GROUP0                            0x50
139 #define SCORPION_LPM2_GROUP0                            0x54
140 #define SCORPION_L2LPM_GROUP0                           0x58
141 #define SCORPION_VLPM_GROUP0                            0x5c
142
143 #define SCORPION_ICACHE_ACCESS                          0x10053
144 #define SCORPION_ICACHE_MISS                            0x10052
145
146 #define SCORPION_DTLB_ACCESS                            0x12013
147 #define SCORPION_DTLB_MISS                              0x12012
148
149 #define SCORPION_ITLB_MISS                              0x12021
150
151 /*
152  * Cortex-A8 HW events mapping
153  *
154  * The hardware events that we support. We do support cache operations but
155  * we have harvard caches and no way to combine instruction and data
156  * accesses/misses in hardware.
157  */
158 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
159         PERF_MAP_ALL_UNSUPPORTED,
160         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
161         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
162         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
163         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
164         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
165         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
166         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
167 };
168
169 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
170                                           [PERF_COUNT_HW_CACHE_OP_MAX]
171                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
172         PERF_CACHE_MAP_ALL_UNSUPPORTED,
173
174         /*
175          * The performance counters don't differentiate between read and write
176          * accesses/misses so this isn't strictly correct, but it's the best we
177          * can do. Writes and reads get combined.
178          */
179         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
180         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
181         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
182         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
183
184         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
185         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
186
187         [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
188         [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
189         [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
190         [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
191
192         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
193         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
194
195         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
196         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
197
198         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
199         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
200         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
201         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
202 };
203
204 /*
205  * Cortex-A9 HW events mapping
206  */
207 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
208         PERF_MAP_ALL_UNSUPPORTED,
209         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
210         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
211         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
212         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
213         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
214         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
215         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
216         [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV7_A9_PERFCTR_STALL_DISPATCH,
217 };
218
219 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
220                                           [PERF_COUNT_HW_CACHE_OP_MAX]
221                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
222         PERF_CACHE_MAP_ALL_UNSUPPORTED,
223
224         /*
225          * The performance counters don't differentiate between read and write
226          * accesses/misses so this isn't strictly correct, but it's the best we
227          * can do. Writes and reads get combined.
228          */
229         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
230         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
231         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
232         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
233
234         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
235
236         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
237         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
238
239         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
240         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
241
242         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
243         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
244         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
245         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
246 };
247
248 /*
249  * Cortex-A5 HW events mapping
250  */
251 static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
252         PERF_MAP_ALL_UNSUPPORTED,
253         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
254         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
255         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
256         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
257         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
258         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
259 };
260
261 static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
262                                         [PERF_COUNT_HW_CACHE_OP_MAX]
263                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
264         PERF_CACHE_MAP_ALL_UNSUPPORTED,
265
266         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
267         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
268         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
269         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
270         [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
271         [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
272
273         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
274         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
275         /*
276          * The prefetch counters don't differentiate between the I side and the
277          * D side.
278          */
279         [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)]      = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
280         [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)]        = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
281
282         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
283         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
284
285         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
286         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
287
288         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
289         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
290         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
291         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
292 };
293
294 /*
295  * Cortex-A15 HW events mapping
296  */
297 static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
298         PERF_MAP_ALL_UNSUPPORTED,
299         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
300         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
301         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
302         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
303         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
304         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
305         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
306 };
307
308 static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
309                                         [PERF_COUNT_HW_CACHE_OP_MAX]
310                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
311         PERF_CACHE_MAP_ALL_UNSUPPORTED,
312
313         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
314         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
315         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
316         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
317
318         /*
319          * Not all performance counters differentiate between read and write
320          * accesses/misses so we're not always strictly correct, but it's the
321          * best we can do. Writes and reads get combined in these cases.
322          */
323         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
324         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
325
326         [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
327         [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
328         [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
329         [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
330
331         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
332         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
333
334         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
335         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
336
337         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
338         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
339         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
340         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
341 };
342
343 /*
344  * Cortex-A7 HW events mapping
345  */
346 static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
347         PERF_MAP_ALL_UNSUPPORTED,
348         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
349         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
350         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
351         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
352         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_PERFCTR_PC_WRITE,
353         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
354         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
355 };
356
357 static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
358                                         [PERF_COUNT_HW_CACHE_OP_MAX]
359                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
360         PERF_CACHE_MAP_ALL_UNSUPPORTED,
361
362         /*
363          * The performance counters don't differentiate between read and write
364          * accesses/misses so this isn't strictly correct, but it's the best we
365          * can do. Writes and reads get combined.
366          */
367         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
368         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
369         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
370         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
371
372         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
373         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
374
375         [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_PERFCTR_L2_CACHE_ACCESS,
376         [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_PERFCTR_L2_CACHE_REFILL,
377         [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L2_CACHE_ACCESS,
378         [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L2_CACHE_REFILL,
379
380         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
381         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
382
383         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
384         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
385
386         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
387         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
388         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
389         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
390 };
391
392 /*
393  * Cortex-A12 HW events mapping
394  */
395 static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
396         PERF_MAP_ALL_UNSUPPORTED,
397         [PERF_COUNT_HW_CPU_CYCLES]              = ARMV7_PERFCTR_CPU_CYCLES,
398         [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV7_PERFCTR_INSTR_EXECUTED,
399         [PERF_COUNT_HW_CACHE_REFERENCES]        = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
400         [PERF_COUNT_HW_CACHE_MISSES]            = ARMV7_PERFCTR_L1_DCACHE_REFILL,
401         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
402         [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
403         [PERF_COUNT_HW_BUS_CYCLES]              = ARMV7_PERFCTR_BUS_CYCLES,
404 };
405
406 static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
407                                         [PERF_COUNT_HW_CACHE_OP_MAX]
408                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
409         PERF_CACHE_MAP_ALL_UNSUPPORTED,
410
411         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
412         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
413         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
414         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
415
416         /*
417          * Not all performance counters differentiate between read and write
418          * accesses/misses so we're not always strictly correct, but it's the
419          * best we can do. Writes and reads get combined in these cases.
420          */
421         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
422         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_ICACHE_REFILL,
423
424         [C(LL)][C(OP_READ)][C(RESULT_ACCESS)]   = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
425         [C(LL)][C(OP_READ)][C(RESULT_MISS)]     = ARMV7_PERFCTR_L2_CACHE_REFILL,
426         [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)]  = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
427         [C(LL)][C(OP_WRITE)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L2_CACHE_REFILL,
428
429         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_DTLB_REFILL,
430         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_DTLB_REFILL,
431         [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)]       = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
432
433         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = ARMV7_PERFCTR_ITLB_REFILL,
434         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = ARMV7_PERFCTR_ITLB_REFILL,
435
436         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
437         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
438         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
439         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
440 };
441
442 /*
443  * Krait HW events mapping
444  */
445 static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
446         PERF_MAP_ALL_UNSUPPORTED,
447         [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
448         [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
449         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
450         [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
451         [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
452 };
453
454 static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
455         PERF_MAP_ALL_UNSUPPORTED,
456         [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
457         [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
458         [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
459         [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
460 };
461
462 static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
463                                           [PERF_COUNT_HW_CACHE_OP_MAX]
464                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
465         PERF_CACHE_MAP_ALL_UNSUPPORTED,
466
467         /*
468          * The performance counters don't differentiate between read and write
469          * accesses/misses so this isn't strictly correct, but it's the best we
470          * can do. Writes and reads get combined.
471          */
472         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
473         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_L1_DCACHE_REFILL,
474         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
475         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_L1_DCACHE_REFILL,
476
477         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)]  = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
478         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = KRAIT_PERFCTR_L1_ICACHE_MISS,
479
480         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
481         [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)]        = KRAIT_PERFCTR_L1_DTLB_ACCESS,
482
483         [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
484         [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)]        = KRAIT_PERFCTR_L1_ITLB_ACCESS,
485
486         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]  = ARMV7_PERFCTR_PC_BRANCH_PRED,
487         [C(BPU)][C(OP_READ)][C(RESULT_MISS)]    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
488         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
489         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]   = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
490 };
491
492 /*
493  * Scorpion HW events mapping
494  */
495 static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
496         PERF_MAP_ALL_UNSUPPORTED,
497         [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
498         [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
499         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
500         [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
501         [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
502 };
503
504 static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
505                                             [PERF_COUNT_HW_CACHE_OP_MAX]
506                                             [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
507         PERF_CACHE_MAP_ALL_UNSUPPORTED,
508         /*
509          * The performance counters don't differentiate between read and write
510          * accesses/misses so this isn't strictly correct, but it's the best we
511          * can do. Writes and reads get combined.
512          */
513         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
514         [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
515         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
516         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
517         [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
518         [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
519         /*
520          * Only ITLB misses and DTLB refills are supported.  If users want the
521          * DTLB refills misses a raw counter must be used.
522          */
523         [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
524         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
525         [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
526         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
527         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
528         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
529         [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
530         [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
531         [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
532         [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
533 };
534
535 PMU_FORMAT_ATTR(event, "config:0-7");
536
537 static struct attribute *armv7_pmu_format_attrs[] = {
538         &format_attr_event.attr,
539         NULL,
540 };
541
542 static struct attribute_group armv7_pmu_format_attr_group = {
543         .name = "format",
544         .attrs = armv7_pmu_format_attrs,
545 };
546
547 #define ARMV7_EVENT_ATTR_RESOLVE(m) #m
548 #define ARMV7_EVENT_ATTR(name, config) \
549         PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
550                               "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
551
552 ARMV7_EVENT_ATTR(sw_incr, ARMV7_PERFCTR_PMNC_SW_INCR);
553 ARMV7_EVENT_ATTR(l1i_cache_refill, ARMV7_PERFCTR_L1_ICACHE_REFILL);
554 ARMV7_EVENT_ATTR(l1i_tlb_refill, ARMV7_PERFCTR_ITLB_REFILL);
555 ARMV7_EVENT_ATTR(l1d_cache_refill, ARMV7_PERFCTR_L1_DCACHE_REFILL);
556 ARMV7_EVENT_ATTR(l1d_cache, ARMV7_PERFCTR_L1_DCACHE_ACCESS);
557 ARMV7_EVENT_ATTR(l1d_tlb_refill, ARMV7_PERFCTR_DTLB_REFILL);
558 ARMV7_EVENT_ATTR(ld_retired, ARMV7_PERFCTR_MEM_READ);
559 ARMV7_EVENT_ATTR(st_retired, ARMV7_PERFCTR_MEM_WRITE);
560 ARMV7_EVENT_ATTR(inst_retired, ARMV7_PERFCTR_INSTR_EXECUTED);
561 ARMV7_EVENT_ATTR(exc_taken, ARMV7_PERFCTR_EXC_TAKEN);
562 ARMV7_EVENT_ATTR(exc_return, ARMV7_PERFCTR_EXC_EXECUTED);
563 ARMV7_EVENT_ATTR(cid_write_retired, ARMV7_PERFCTR_CID_WRITE);
564 ARMV7_EVENT_ATTR(pc_write_retired, ARMV7_PERFCTR_PC_WRITE);
565 ARMV7_EVENT_ATTR(br_immed_retired, ARMV7_PERFCTR_PC_IMM_BRANCH);
566 ARMV7_EVENT_ATTR(br_return_retired, ARMV7_PERFCTR_PC_PROC_RETURN);
567 ARMV7_EVENT_ATTR(unaligned_ldst_retired, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS);
568 ARMV7_EVENT_ATTR(br_mis_pred, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED);
569 ARMV7_EVENT_ATTR(cpu_cycles, ARMV7_PERFCTR_CLOCK_CYCLES);
570 ARMV7_EVENT_ATTR(br_pred, ARMV7_PERFCTR_PC_BRANCH_PRED);
571
572 static struct attribute *armv7_pmuv1_event_attrs[] = {
573         &armv7_event_attr_sw_incr.attr.attr,
574         &armv7_event_attr_l1i_cache_refill.attr.attr,
575         &armv7_event_attr_l1i_tlb_refill.attr.attr,
576         &armv7_event_attr_l1d_cache_refill.attr.attr,
577         &armv7_event_attr_l1d_cache.attr.attr,
578         &armv7_event_attr_l1d_tlb_refill.attr.attr,
579         &armv7_event_attr_ld_retired.attr.attr,
580         &armv7_event_attr_st_retired.attr.attr,
581         &armv7_event_attr_inst_retired.attr.attr,
582         &armv7_event_attr_exc_taken.attr.attr,
583         &armv7_event_attr_exc_return.attr.attr,
584         &armv7_event_attr_cid_write_retired.attr.attr,
585         &armv7_event_attr_pc_write_retired.attr.attr,
586         &armv7_event_attr_br_immed_retired.attr.attr,
587         &armv7_event_attr_br_return_retired.attr.attr,
588         &armv7_event_attr_unaligned_ldst_retired.attr.attr,
589         &armv7_event_attr_br_mis_pred.attr.attr,
590         &armv7_event_attr_cpu_cycles.attr.attr,
591         &armv7_event_attr_br_pred.attr.attr,
592         NULL,
593 };
594
595 static struct attribute_group armv7_pmuv1_events_attr_group = {
596         .name = "events",
597         .attrs = armv7_pmuv1_event_attrs,
598 };
599
600 ARMV7_EVENT_ATTR(mem_access, ARMV7_PERFCTR_MEM_ACCESS);
601 ARMV7_EVENT_ATTR(l1i_cache, ARMV7_PERFCTR_L1_ICACHE_ACCESS);
602 ARMV7_EVENT_ATTR(l1d_cache_wb, ARMV7_PERFCTR_L1_DCACHE_WB);
603 ARMV7_EVENT_ATTR(l2d_cache, ARMV7_PERFCTR_L2_CACHE_ACCESS);
604 ARMV7_EVENT_ATTR(l2d_cache_refill, ARMV7_PERFCTR_L2_CACHE_REFILL);
605 ARMV7_EVENT_ATTR(l2d_cache_wb, ARMV7_PERFCTR_L2_CACHE_WB);
606 ARMV7_EVENT_ATTR(bus_access, ARMV7_PERFCTR_BUS_ACCESS);
607 ARMV7_EVENT_ATTR(memory_error, ARMV7_PERFCTR_MEM_ERROR);
608 ARMV7_EVENT_ATTR(inst_spec, ARMV7_PERFCTR_INSTR_SPEC);
609 ARMV7_EVENT_ATTR(ttbr_write_retired, ARMV7_PERFCTR_TTBR_WRITE);
610 ARMV7_EVENT_ATTR(bus_cycles, ARMV7_PERFCTR_BUS_CYCLES);
611
612 static struct attribute *armv7_pmuv2_event_attrs[] = {
613         &armv7_event_attr_sw_incr.attr.attr,
614         &armv7_event_attr_l1i_cache_refill.attr.attr,
615         &armv7_event_attr_l1i_tlb_refill.attr.attr,
616         &armv7_event_attr_l1d_cache_refill.attr.attr,
617         &armv7_event_attr_l1d_cache.attr.attr,
618         &armv7_event_attr_l1d_tlb_refill.attr.attr,
619         &armv7_event_attr_ld_retired.attr.attr,
620         &armv7_event_attr_st_retired.attr.attr,
621         &armv7_event_attr_inst_retired.attr.attr,
622         &armv7_event_attr_exc_taken.attr.attr,
623         &armv7_event_attr_exc_return.attr.attr,
624         &armv7_event_attr_cid_write_retired.attr.attr,
625         &armv7_event_attr_pc_write_retired.attr.attr,
626         &armv7_event_attr_br_immed_retired.attr.attr,
627         &armv7_event_attr_br_return_retired.attr.attr,
628         &armv7_event_attr_unaligned_ldst_retired.attr.attr,
629         &armv7_event_attr_br_mis_pred.attr.attr,
630         &armv7_event_attr_cpu_cycles.attr.attr,
631         &armv7_event_attr_br_pred.attr.attr,
632         &armv7_event_attr_mem_access.attr.attr,
633         &armv7_event_attr_l1i_cache.attr.attr,
634         &armv7_event_attr_l1d_cache_wb.attr.attr,
635         &armv7_event_attr_l2d_cache.attr.attr,
636         &armv7_event_attr_l2d_cache_refill.attr.attr,
637         &armv7_event_attr_l2d_cache_wb.attr.attr,
638         &armv7_event_attr_bus_access.attr.attr,
639         &armv7_event_attr_memory_error.attr.attr,
640         &armv7_event_attr_inst_spec.attr.attr,
641         &armv7_event_attr_ttbr_write_retired.attr.attr,
642         &armv7_event_attr_bus_cycles.attr.attr,
643         NULL,
644 };
645
646 static struct attribute_group armv7_pmuv2_events_attr_group = {
647         .name = "events",
648         .attrs = armv7_pmuv2_event_attrs,
649 };
650
651 /*
652  * Perf Events' indices
653  */
654 #define ARMV7_IDX_CYCLE_COUNTER 0
655 #define ARMV7_IDX_COUNTER0      1
656 #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
657         (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
658
659 #define ARMV7_MAX_COUNTERS      32
660 #define ARMV7_COUNTER_MASK      (ARMV7_MAX_COUNTERS - 1)
661
662 /*
663  * ARMv7 low level PMNC access
664  */
665
666 /*
667  * Perf Event to low level counters mapping
668  */
669 #define ARMV7_IDX_TO_COUNTER(x) \
670         (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
671
672 /*
673  * Per-CPU PMNC: config reg
674  */
675 #define ARMV7_PMNC_E            (1 << 0) /* Enable all counters */
676 #define ARMV7_PMNC_P            (1 << 1) /* Reset all counters */
677 #define ARMV7_PMNC_C            (1 << 2) /* Cycle counter reset */
678 #define ARMV7_PMNC_D            (1 << 3) /* CCNT counts every 64th cpu cycle */
679 #define ARMV7_PMNC_X            (1 << 4) /* Export to ETM */
680 #define ARMV7_PMNC_DP           (1 << 5) /* Disable CCNT if non-invasive debug*/
681 #define ARMV7_PMNC_N_SHIFT      11       /* Number of counters supported */
682 #define ARMV7_PMNC_N_MASK       0x1f
683 #define ARMV7_PMNC_MASK         0x3f     /* Mask for writable bits */
684
685 /*
686  * FLAG: counters overflow flag status reg
687  */
688 #define ARMV7_FLAG_MASK         0xffffffff      /* Mask for writable bits */
689 #define ARMV7_OVERFLOWED_MASK   ARMV7_FLAG_MASK
690
691 /*
692  * PMXEVTYPER: Event selection reg
693  */
694 #define ARMV7_EVTYPE_MASK       0xc80000ff      /* Mask for writable bits */
695 #define ARMV7_EVTYPE_EVENT      0xff            /* Mask for EVENT bits */
696
697 /*
698  * Event filters for PMUv2
699  */
700 #define ARMV7_EXCLUDE_PL1       BIT(31)
701 #define ARMV7_EXCLUDE_USER      BIT(30)
702 #define ARMV7_INCLUDE_HYP       BIT(27)
703
704 /*
705  * Secure debug enable reg
706  */
707 #define ARMV7_SDER_SUNIDEN      BIT(1) /* Permit non-invasive debug */
708
709 static inline u32 armv7_pmnc_read(void)
710 {
711         u32 val;
712         asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
713         return val;
714 }
715
716 static inline void armv7_pmnc_write(u32 val)
717 {
718         val &= ARMV7_PMNC_MASK;
719         isb();
720         asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
721 }
722
723 static inline int armv7_pmnc_has_overflowed(u32 pmnc)
724 {
725         return pmnc & ARMV7_OVERFLOWED_MASK;
726 }
727
728 static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
729 {
730         return idx >= ARMV7_IDX_CYCLE_COUNTER &&
731                 idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
732 }
733
734 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
735 {
736         return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
737 }
738
739 static inline void armv7_pmnc_select_counter(int idx)
740 {
741         u32 counter = ARMV7_IDX_TO_COUNTER(idx);
742         asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
743         isb();
744 }
745
746 static inline u64 armv7pmu_read_counter(struct perf_event *event)
747 {
748         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
749         struct hw_perf_event *hwc = &event->hw;
750         int idx = hwc->idx;
751         u32 value = 0;
752
753         if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
754                 pr_err("CPU%u reading wrong counter %d\n",
755                         smp_processor_id(), idx);
756         } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
757                 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
758         } else {
759                 armv7_pmnc_select_counter(idx);
760                 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
761         }
762
763         return value;
764 }
765
766 static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
767 {
768         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
769         struct hw_perf_event *hwc = &event->hw;
770         int idx = hwc->idx;
771
772         if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
773                 pr_err("CPU%u writing wrong counter %d\n",
774                         smp_processor_id(), idx);
775         } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
776                 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
777         } else {
778                 armv7_pmnc_select_counter(idx);
779                 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
780         }
781 }
782
783 static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
784 {
785         armv7_pmnc_select_counter(idx);
786         val &= ARMV7_EVTYPE_MASK;
787         asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
788 }
789
790 static inline void armv7_pmnc_enable_counter(int idx)
791 {
792         u32 counter = ARMV7_IDX_TO_COUNTER(idx);
793         asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
794 }
795
796 static inline void armv7_pmnc_disable_counter(int idx)
797 {
798         u32 counter = ARMV7_IDX_TO_COUNTER(idx);
799         asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
800 }
801
802 static inline void armv7_pmnc_enable_intens(int idx)
803 {
804         u32 counter = ARMV7_IDX_TO_COUNTER(idx);
805         asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
806 }
807
808 static inline void armv7_pmnc_disable_intens(int idx)
809 {
810         u32 counter = ARMV7_IDX_TO_COUNTER(idx);
811         asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
812         isb();
813         /* Clear the overflow flag in case an interrupt is pending. */
814         asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
815         isb();
816 }
817
818 static inline u32 armv7_pmnc_getreset_flags(void)
819 {
820         u32 val;
821
822         /* Read */
823         asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
824
825         /* Write to clear flags */
826         val &= ARMV7_FLAG_MASK;
827         asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
828
829         return val;
830 }
831
832 #ifdef DEBUG
833 static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
834 {
835         u32 val;
836         unsigned int cnt;
837
838         pr_info("PMNC registers dump:\n");
839
840         asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
841         pr_info("PMNC  =0x%08x\n", val);
842
843         asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
844         pr_info("CNTENS=0x%08x\n", val);
845
846         asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
847         pr_info("INTENS=0x%08x\n", val);
848
849         asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
850         pr_info("FLAGS =0x%08x\n", val);
851
852         asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
853         pr_info("SELECT=0x%08x\n", val);
854
855         asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
856         pr_info("CCNT  =0x%08x\n", val);
857
858         for (cnt = ARMV7_IDX_COUNTER0;
859                         cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
860                 armv7_pmnc_select_counter(cnt);
861                 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
862                 pr_info("CNT[%d] count =0x%08x\n",
863                         ARMV7_IDX_TO_COUNTER(cnt), val);
864                 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
865                 pr_info("CNT[%d] evtsel=0x%08x\n",
866                         ARMV7_IDX_TO_COUNTER(cnt), val);
867         }
868 }
869 #endif
870
871 static void armv7pmu_enable_event(struct perf_event *event)
872 {
873         struct hw_perf_event *hwc = &event->hw;
874         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
875         int idx = hwc->idx;
876
877         if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
878                 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
879                         smp_processor_id(), idx);
880                 return;
881         }
882
883         /*
884          * Enable counter and interrupt, and set the counter to count
885          * the event that we're interested in.
886          */
887
888         /*
889          * Disable counter
890          */
891         armv7_pmnc_disable_counter(idx);
892
893         /*
894          * Set event (if destined for PMNx counters)
895          * We only need to set the event for the cycle counter if we
896          * have the ability to perform event filtering.
897          */
898         if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
899                 armv7_pmnc_write_evtsel(idx, hwc->config_base);
900
901         /*
902          * Enable interrupt for this counter
903          */
904         armv7_pmnc_enable_intens(idx);
905
906         /*
907          * Enable counter
908          */
909         armv7_pmnc_enable_counter(idx);
910 }
911
912 static void armv7pmu_disable_event(struct perf_event *event)
913 {
914         struct hw_perf_event *hwc = &event->hw;
915         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
916         int idx = hwc->idx;
917
918         if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
919                 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
920                         smp_processor_id(), idx);
921                 return;
922         }
923
924         /*
925          * Disable counter and interrupt
926          */
927
928         /*
929          * Disable counter
930          */
931         armv7_pmnc_disable_counter(idx);
932
933         /*
934          * Disable interrupt for this counter
935          */
936         armv7_pmnc_disable_intens(idx);
937 }
938
939 static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
940 {
941         u32 pmnc;
942         struct perf_sample_data data;
943         struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
944         struct pt_regs *regs;
945         int idx;
946
947         /*
948          * Get and reset the IRQ flags
949          */
950         pmnc = armv7_pmnc_getreset_flags();
951
952         /*
953          * Did an overflow occur?
954          */
955         if (!armv7_pmnc_has_overflowed(pmnc))
956                 return IRQ_NONE;
957
958         /*
959          * Handle the counter(s) overflow(s)
960          */
961         regs = get_irq_regs();
962
963         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
964                 struct perf_event *event = cpuc->events[idx];
965                 struct hw_perf_event *hwc;
966
967                 /* Ignore if we don't have an event. */
968                 if (!event)
969                         continue;
970
971                 /*
972                  * We have a single interrupt for all counters. Check that
973                  * each counter has overflowed before we process it.
974                  */
975                 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
976                         continue;
977
978                 hwc = &event->hw;
979                 armpmu_event_update(event);
980                 perf_sample_data_init(&data, 0, hwc->last_period);
981                 if (!armpmu_event_set_period(event))
982                         continue;
983
984                 if (perf_event_overflow(event, &data, regs))
985                         cpu_pmu->disable(event);
986         }
987
988         /*
989          * Handle the pending perf events.
990          *
991          * Note: this call *must* be run with interrupts disabled. For
992          * platforms that can have the PMU interrupts raised as an NMI, this
993          * will not work.
994          */
995         irq_work_run();
996
997         return IRQ_HANDLED;
998 }
999
1000 static void armv7pmu_start(struct arm_pmu *cpu_pmu)
1001 {
1002         /* Enable all counters */
1003         armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1004 }
1005
1006 static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
1007 {
1008         /* Disable all counters */
1009         armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1010 }
1011
1012 static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1013                                   struct perf_event *event)
1014 {
1015         int idx;
1016         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1017         struct hw_perf_event *hwc = &event->hw;
1018         unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
1019
1020         /* Always place a cycle counter into the cycle counter. */
1021         if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1022                 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1023                         return -EAGAIN;
1024
1025                 return ARMV7_IDX_CYCLE_COUNTER;
1026         }
1027
1028         /*
1029          * For anything other than a cycle counter, try and use
1030          * the events counters
1031          */
1032         for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1033                 if (!test_and_set_bit(idx, cpuc->used_mask))
1034                         return idx;
1035         }
1036
1037         /* The counters are all in use. */
1038         return -EAGAIN;
1039 }
1040
1041 static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1042                                      struct perf_event *event)
1043 {
1044         clear_bit(event->hw.idx, cpuc->used_mask);
1045 }
1046
1047 /*
1048  * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1049  */
1050 static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1051                                      struct perf_event_attr *attr)
1052 {
1053         unsigned long config_base = 0;
1054
1055         if (attr->exclude_idle) {
1056                 pr_debug("ARM performance counters do not support mode exclusion\n");
1057                 return -EOPNOTSUPP;
1058         }
1059         if (attr->exclude_user)
1060                 config_base |= ARMV7_EXCLUDE_USER;
1061         if (attr->exclude_kernel)
1062                 config_base |= ARMV7_EXCLUDE_PL1;
1063         if (!attr->exclude_hv)
1064                 config_base |= ARMV7_INCLUDE_HYP;
1065
1066         /*
1067          * Install the filter into config_base as this is used to
1068          * construct the event type.
1069          */
1070         event->config_base = config_base;
1071
1072         return 0;
1073 }
1074
1075 static void armv7pmu_reset(void *info)
1076 {
1077         struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
1078         u32 idx, nb_cnt = cpu_pmu->num_events, val;
1079
1080         if (cpu_pmu->secure_access) {
1081                 asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val));
1082                 val |= ARMV7_SDER_SUNIDEN;
1083                 asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val));
1084         }
1085
1086         /* The counter and interrupt enable registers are unknown at reset. */
1087         for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1088                 armv7_pmnc_disable_counter(idx);
1089                 armv7_pmnc_disable_intens(idx);
1090         }
1091
1092         /* Initialize & Reset PMNC: C and P bits */
1093         armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1094 }
1095
1096 static int armv7_a8_map_event(struct perf_event *event)
1097 {
1098         return armpmu_map_event(event, &armv7_a8_perf_map,
1099                                 &armv7_a8_perf_cache_map, 0xFF);
1100 }
1101
1102 static int armv7_a9_map_event(struct perf_event *event)
1103 {
1104         return armpmu_map_event(event, &armv7_a9_perf_map,
1105                                 &armv7_a9_perf_cache_map, 0xFF);
1106 }
1107
1108 static int armv7_a5_map_event(struct perf_event *event)
1109 {
1110         return armpmu_map_event(event, &armv7_a5_perf_map,
1111                                 &armv7_a5_perf_cache_map, 0xFF);
1112 }
1113
1114 static int armv7_a15_map_event(struct perf_event *event)
1115 {
1116         return armpmu_map_event(event, &armv7_a15_perf_map,
1117                                 &armv7_a15_perf_cache_map, 0xFF);
1118 }
1119
1120 static int armv7_a7_map_event(struct perf_event *event)
1121 {
1122         return armpmu_map_event(event, &armv7_a7_perf_map,
1123                                 &armv7_a7_perf_cache_map, 0xFF);
1124 }
1125
1126 static int armv7_a12_map_event(struct perf_event *event)
1127 {
1128         return armpmu_map_event(event, &armv7_a12_perf_map,
1129                                 &armv7_a12_perf_cache_map, 0xFF);
1130 }
1131
1132 static int krait_map_event(struct perf_event *event)
1133 {
1134         return armpmu_map_event(event, &krait_perf_map,
1135                                 &krait_perf_cache_map, 0xFFFFF);
1136 }
1137
1138 static int krait_map_event_no_branch(struct perf_event *event)
1139 {
1140         return armpmu_map_event(event, &krait_perf_map_no_branch,
1141                                 &krait_perf_cache_map, 0xFFFFF);
1142 }
1143
1144 static int scorpion_map_event(struct perf_event *event)
1145 {
1146         return armpmu_map_event(event, &scorpion_perf_map,
1147                                 &scorpion_perf_cache_map, 0xFFFFF);
1148 }
1149
1150 static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1151 {
1152         cpu_pmu->handle_irq     = armv7pmu_handle_irq;
1153         cpu_pmu->enable         = armv7pmu_enable_event;
1154         cpu_pmu->disable        = armv7pmu_disable_event;
1155         cpu_pmu->read_counter   = armv7pmu_read_counter;
1156         cpu_pmu->write_counter  = armv7pmu_write_counter;
1157         cpu_pmu->get_event_idx  = armv7pmu_get_event_idx;
1158         cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
1159         cpu_pmu->start          = armv7pmu_start;
1160         cpu_pmu->stop           = armv7pmu_stop;
1161         cpu_pmu->reset          = armv7pmu_reset;
1162 };
1163
1164 static void armv7_read_num_pmnc_events(void *info)
1165 {
1166         int *nb_cnt = info;
1167
1168         /* Read the nb of CNTx counters supported from PMNC */
1169         *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
1170
1171         /* Add the CPU cycles counter */
1172         *nb_cnt += 1;
1173 }
1174
1175 static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1176 {
1177         return smp_call_function_any(&arm_pmu->supported_cpus,
1178                                      armv7_read_num_pmnc_events,
1179                                      &arm_pmu->num_events, 1);
1180 }
1181
1182 static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
1183 {
1184         armv7pmu_init(cpu_pmu);
1185         cpu_pmu->name           = "armv7_cortex_a8";
1186         cpu_pmu->map_event      = armv7_a8_map_event;
1187         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1188                 &armv7_pmuv1_events_attr_group;
1189         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1190                 &armv7_pmu_format_attr_group;
1191         return armv7_probe_num_events(cpu_pmu);
1192 }
1193
1194 static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
1195 {
1196         armv7pmu_init(cpu_pmu);
1197         cpu_pmu->name           = "armv7_cortex_a9";
1198         cpu_pmu->map_event      = armv7_a9_map_event;
1199         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1200                 &armv7_pmuv1_events_attr_group;
1201         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1202                 &armv7_pmu_format_attr_group;
1203         return armv7_probe_num_events(cpu_pmu);
1204 }
1205
1206 static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
1207 {
1208         armv7pmu_init(cpu_pmu);
1209         cpu_pmu->name           = "armv7_cortex_a5";
1210         cpu_pmu->map_event      = armv7_a5_map_event;
1211         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1212                 &armv7_pmuv1_events_attr_group;
1213         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1214                 &armv7_pmu_format_attr_group;
1215         return armv7_probe_num_events(cpu_pmu);
1216 }
1217
1218 static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
1219 {
1220         armv7pmu_init(cpu_pmu);
1221         cpu_pmu->name           = "armv7_cortex_a15";
1222         cpu_pmu->map_event      = armv7_a15_map_event;
1223         cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1224         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1225                 &armv7_pmuv2_events_attr_group;
1226         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1227                 &armv7_pmu_format_attr_group;
1228         return armv7_probe_num_events(cpu_pmu);
1229 }
1230
1231 static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
1232 {
1233         armv7pmu_init(cpu_pmu);
1234         cpu_pmu->name           = "armv7_cortex_a7";
1235         cpu_pmu->map_event      = armv7_a7_map_event;
1236         cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1237         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1238                 &armv7_pmuv2_events_attr_group;
1239         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1240                 &armv7_pmu_format_attr_group;
1241         return armv7_probe_num_events(cpu_pmu);
1242 }
1243
1244 static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1245 {
1246         armv7pmu_init(cpu_pmu);
1247         cpu_pmu->name           = "armv7_cortex_a12";
1248         cpu_pmu->map_event      = armv7_a12_map_event;
1249         cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1250         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1251                 &armv7_pmuv2_events_attr_group;
1252         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1253                 &armv7_pmu_format_attr_group;
1254         return armv7_probe_num_events(cpu_pmu);
1255 }
1256
1257 static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1258 {
1259         int ret = armv7_a12_pmu_init(cpu_pmu);
1260         cpu_pmu->name = "armv7_cortex_a17";
1261         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1262                 &armv7_pmuv2_events_attr_group;
1263         cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1264                 &armv7_pmu_format_attr_group;
1265         return ret;
1266 }
1267
1268 /*
1269  * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1270  *
1271  *            31   30     24     16     8      0
1272  *            +--------------------------------+
1273  *  PMRESR0   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1274  *            +--------------------------------+
1275  *  PMRESR1   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1276  *            +--------------------------------+
1277  *  PMRESR2   | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1278  *            +--------------------------------+
1279  *  VPMRESR0  | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1280  *            +--------------------------------+
1281  *              EN | G=3  | G=2  | G=1  | G=0
1282  *
1283  *  Event Encoding:
1284  *
1285  *      hwc->config_base = 0xNRCCG
1286  *
1287  *      N  = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1288  *      R  = region register
1289  *      CC = class of events the group G is choosing from
1290  *      G  = group or particular event
1291  *
1292  *  Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1293  *
1294  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1295  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1296  *  events (interrupts for example). An event code is broken down into
1297  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1298  *  example).
1299  */
1300
1301 #define KRAIT_EVENT             (1 << 16)
1302 #define VENUM_EVENT             (2 << 16)
1303 #define KRAIT_EVENT_MASK        (KRAIT_EVENT | VENUM_EVENT)
1304 #define PMRESRn_EN              BIT(31)
1305
1306 #define EVENT_REGION(event)     (((event) >> 12) & 0xf)         /* R */
1307 #define EVENT_GROUP(event)      ((event) & 0xf)                 /* G */
1308 #define EVENT_CODE(event)       (((event) >> 4) & 0xff)         /* CC */
1309 #define EVENT_VENUM(event)      (!!(event & VENUM_EVENT))       /* N=2 */
1310 #define EVENT_CPU(event)        (!!(event & KRAIT_EVENT))       /* N=1 */
1311
1312 static u32 krait_read_pmresrn(int n)
1313 {
1314         u32 val;
1315
1316         switch (n) {
1317         case 0:
1318                 asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1319                 break;
1320         case 1:
1321                 asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1322                 break;
1323         case 2:
1324                 asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1325                 break;
1326         default:
1327                 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1328         }
1329
1330         return val;
1331 }
1332
1333 static void krait_write_pmresrn(int n, u32 val)
1334 {
1335         switch (n) {
1336         case 0:
1337                 asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1338                 break;
1339         case 1:
1340                 asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1341                 break;
1342         case 2:
1343                 asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1344                 break;
1345         default:
1346                 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1347         }
1348 }
1349
1350 static u32 venum_read_pmresr(void)
1351 {
1352         u32 val;
1353         asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1354         return val;
1355 }
1356
1357 static void venum_write_pmresr(u32 val)
1358 {
1359         asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1360 }
1361
1362 static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
1363 {
1364         u32 venum_new_val;
1365         u32 fp_new_val;
1366
1367         BUG_ON(preemptible());
1368         /* CPACR Enable CP10 and CP11 access */
1369         *venum_orig_val = get_copro_access();
1370         venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1371         set_copro_access(venum_new_val);
1372
1373         /* Enable FPEXC */
1374         *fp_orig_val = fmrx(FPEXC);
1375         fp_new_val = *fp_orig_val | FPEXC_EN;
1376         fmxr(FPEXC, fp_new_val);
1377 }
1378
1379 static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
1380 {
1381         BUG_ON(preemptible());
1382         /* Restore FPEXC */
1383         fmxr(FPEXC, fp_orig_val);
1384         isb();
1385         /* Restore CPACR */
1386         set_copro_access(venum_orig_val);
1387 }
1388
1389 static u32 krait_get_pmresrn_event(unsigned int region)
1390 {
1391         static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1392                                              KRAIT_PMRESR1_GROUP0,
1393                                              KRAIT_PMRESR2_GROUP0 };
1394         return pmresrn_table[region];
1395 }
1396
1397 static void krait_evt_setup(int idx, u32 config_base)
1398 {
1399         u32 val;
1400         u32 mask;
1401         u32 vval, fval;
1402         unsigned int region = EVENT_REGION(config_base);
1403         unsigned int group = EVENT_GROUP(config_base);
1404         unsigned int code = EVENT_CODE(config_base);
1405         unsigned int group_shift;
1406         bool venum_event = EVENT_VENUM(config_base);
1407
1408         group_shift = group * 8;
1409         mask = 0xff << group_shift;
1410
1411         /* Configure evtsel for the region and group */
1412         if (venum_event)
1413                 val = KRAIT_VPMRESR0_GROUP0;
1414         else
1415                 val = krait_get_pmresrn_event(region);
1416         val += group;
1417         /* Mix in mode-exclusion bits */
1418         val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1419         armv7_pmnc_write_evtsel(idx, val);
1420
1421         if (venum_event) {
1422                 venum_pre_pmresr(&vval, &fval);
1423                 val = venum_read_pmresr();
1424                 val &= ~mask;
1425                 val |= code << group_shift;
1426                 val |= PMRESRn_EN;
1427                 venum_write_pmresr(val);
1428                 venum_post_pmresr(vval, fval);
1429         } else {
1430                 val = krait_read_pmresrn(region);
1431                 val &= ~mask;
1432                 val |= code << group_shift;
1433                 val |= PMRESRn_EN;
1434                 krait_write_pmresrn(region, val);
1435         }
1436 }
1437
1438 static u32 clear_pmresrn_group(u32 val, int group)
1439 {
1440         u32 mask;
1441         int group_shift;
1442
1443         group_shift = group * 8;
1444         mask = 0xff << group_shift;
1445         val &= ~mask;
1446
1447         /* Don't clear enable bit if entire region isn't disabled */
1448         if (val & ~PMRESRn_EN)
1449                 return val |= PMRESRn_EN;
1450
1451         return 0;
1452 }
1453
1454 static void krait_clearpmu(u32 config_base)
1455 {
1456         u32 val;
1457         u32 vval, fval;
1458         unsigned int region = EVENT_REGION(config_base);
1459         unsigned int group = EVENT_GROUP(config_base);
1460         bool venum_event = EVENT_VENUM(config_base);
1461
1462         if (venum_event) {
1463                 venum_pre_pmresr(&vval, &fval);
1464                 val = venum_read_pmresr();
1465                 val = clear_pmresrn_group(val, group);
1466                 venum_write_pmresr(val);
1467                 venum_post_pmresr(vval, fval);
1468         } else {
1469                 val = krait_read_pmresrn(region);
1470                 val = clear_pmresrn_group(val, group);
1471                 krait_write_pmresrn(region, val);
1472         }
1473 }
1474
1475 static void krait_pmu_disable_event(struct perf_event *event)
1476 {
1477         struct hw_perf_event *hwc = &event->hw;
1478         int idx = hwc->idx;
1479
1480         /* Disable counter and interrupt */
1481
1482         /* Disable counter */
1483         armv7_pmnc_disable_counter(idx);
1484
1485         /*
1486          * Clear pmresr code (if destined for PMNx counters)
1487          */
1488         if (hwc->config_base & KRAIT_EVENT_MASK)
1489                 krait_clearpmu(hwc->config_base);
1490
1491         /* Disable interrupt for this counter */
1492         armv7_pmnc_disable_intens(idx);
1493 }
1494
1495 static void krait_pmu_enable_event(struct perf_event *event)
1496 {
1497         struct hw_perf_event *hwc = &event->hw;
1498         int idx = hwc->idx;
1499
1500         /*
1501          * Enable counter and interrupt, and set the counter to count
1502          * the event that we're interested in.
1503          */
1504
1505         /* Disable counter */
1506         armv7_pmnc_disable_counter(idx);
1507
1508         /*
1509          * Set event (if destined for PMNx counters)
1510          * We set the event for the cycle counter because we
1511          * have the ability to perform event filtering.
1512          */
1513         if (hwc->config_base & KRAIT_EVENT_MASK)
1514                 krait_evt_setup(idx, hwc->config_base);
1515         else
1516                 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1517
1518         /* Enable interrupt for this counter */
1519         armv7_pmnc_enable_intens(idx);
1520
1521         /* Enable counter */
1522         armv7_pmnc_enable_counter(idx);
1523 }
1524
1525 static void krait_pmu_reset(void *info)
1526 {
1527         u32 vval, fval;
1528         struct arm_pmu *cpu_pmu = info;
1529         u32 idx, nb_cnt = cpu_pmu->num_events;
1530
1531         armv7pmu_reset(info);
1532
1533         /* Clear all pmresrs */
1534         krait_write_pmresrn(0, 0);
1535         krait_write_pmresrn(1, 0);
1536         krait_write_pmresrn(2, 0);
1537
1538         venum_pre_pmresr(&vval, &fval);
1539         venum_write_pmresr(0);
1540         venum_post_pmresr(vval, fval);
1541
1542         /* Reset PMxEVNCTCR to sane default */
1543         for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1544                 armv7_pmnc_select_counter(idx);
1545                 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1546         }
1547
1548 }
1549
1550 static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1551                               unsigned int group)
1552 {
1553         int bit;
1554         struct hw_perf_event *hwc = &event->hw;
1555         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1556
1557         if (hwc->config_base & VENUM_EVENT)
1558                 bit = KRAIT_VPMRESR0_GROUP0;
1559         else
1560                 bit = krait_get_pmresrn_event(region);
1561         bit -= krait_get_pmresrn_event(0);
1562         bit += group;
1563         /*
1564          * Lower bits are reserved for use by the counters (see
1565          * armv7pmu_get_event_idx() for more info)
1566          */
1567         bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1568
1569         return bit;
1570 }
1571
1572 /*
1573  * We check for column exclusion constraints here.
1574  * Two events cant use the same group within a pmresr register.
1575  */
1576 static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1577                                    struct perf_event *event)
1578 {
1579         int idx;
1580         int bit = -1;
1581         struct hw_perf_event *hwc = &event->hw;
1582         unsigned int region = EVENT_REGION(hwc->config_base);
1583         unsigned int code = EVENT_CODE(hwc->config_base);
1584         unsigned int group = EVENT_GROUP(hwc->config_base);
1585         bool venum_event = EVENT_VENUM(hwc->config_base);
1586         bool krait_event = EVENT_CPU(hwc->config_base);
1587
1588         if (venum_event || krait_event) {
1589                 /* Ignore invalid events */
1590                 if (group > 3 || region > 2)
1591                         return -EINVAL;
1592                 if (venum_event && (code & 0xe0))
1593                         return -EINVAL;
1594
1595                 bit = krait_event_to_bit(event, region, group);
1596                 if (test_and_set_bit(bit, cpuc->used_mask))
1597                         return -EAGAIN;
1598         }
1599
1600         idx = armv7pmu_get_event_idx(cpuc, event);
1601         if (idx < 0 && bit >= 0)
1602                 clear_bit(bit, cpuc->used_mask);
1603
1604         return idx;
1605 }
1606
1607 static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1608                                       struct perf_event *event)
1609 {
1610         int bit;
1611         struct hw_perf_event *hwc = &event->hw;
1612         unsigned int region = EVENT_REGION(hwc->config_base);
1613         unsigned int group = EVENT_GROUP(hwc->config_base);
1614         bool venum_event = EVENT_VENUM(hwc->config_base);
1615         bool krait_event = EVENT_CPU(hwc->config_base);
1616
1617         armv7pmu_clear_event_idx(cpuc, event);
1618         if (venum_event || krait_event) {
1619                 bit = krait_event_to_bit(event, region, group);
1620                 clear_bit(bit, cpuc->used_mask);
1621         }
1622 }
1623
1624 static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1625 {
1626         armv7pmu_init(cpu_pmu);
1627         cpu_pmu->name           = "armv7_krait";
1628         /* Some early versions of Krait don't support PC write events */
1629         if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1630                                   "qcom,no-pc-write"))
1631                 cpu_pmu->map_event = krait_map_event_no_branch;
1632         else
1633                 cpu_pmu->map_event = krait_map_event;
1634         cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
1635         cpu_pmu->reset          = krait_pmu_reset;
1636         cpu_pmu->enable         = krait_pmu_enable_event;
1637         cpu_pmu->disable        = krait_pmu_disable_event;
1638         cpu_pmu->get_event_idx  = krait_pmu_get_event_idx;
1639         cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
1640         return armv7_probe_num_events(cpu_pmu);
1641 }
1642
1643 /*
1644  * Scorpion Local Performance Monitor Register (LPMn)
1645  *
1646  *            31   30     24     16     8      0
1647  *            +--------------------------------+
1648  *  LPM0      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 0
1649  *            +--------------------------------+
1650  *  LPM1      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 1
1651  *            +--------------------------------+
1652  *  LPM2      | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 2
1653  *            +--------------------------------+
1654  *  L2LPM     | EN |  CC  |  CC  |  CC  |  CC  |   N = 1, R = 3
1655  *            +--------------------------------+
1656  *  VLPM      | EN |  CC  |  CC  |  CC  |  CC  |   N = 2, R = ?
1657  *            +--------------------------------+
1658  *              EN | G=3  | G=2  | G=1  | G=0
1659  *
1660  *
1661  *  Event Encoding:
1662  *
1663  *      hwc->config_base = 0xNRCCG
1664  *
1665  *      N  = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1666  *      R  = region register
1667  *      CC = class of events the group G is choosing from
1668  *      G  = group or particular event
1669  *
1670  *  Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1671  *
1672  *  A region (R) corresponds to a piece of the CPU (execution unit, instruction
1673  *  unit, etc.) while the event code (CC) corresponds to a particular class of
1674  *  events (interrupts for example). An event code is broken down into
1675  *  groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1676  *  example).
1677  */
1678
1679 static u32 scorpion_read_pmresrn(int n)
1680 {
1681         u32 val;
1682
1683         switch (n) {
1684         case 0:
1685                 asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1686                 break;
1687         case 1:
1688                 asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1689                 break;
1690         case 2:
1691                 asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1692                 break;
1693         case 3:
1694                 asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1695                 break;
1696         default:
1697                 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1698         }
1699
1700         return val;
1701 }
1702
1703 static void scorpion_write_pmresrn(int n, u32 val)
1704 {
1705         switch (n) {
1706         case 0:
1707                 asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1708                 break;
1709         case 1:
1710                 asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1711                 break;
1712         case 2:
1713                 asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1714                 break;
1715         case 3:
1716                 asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1717                 break;
1718         default:
1719                 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1720         }
1721 }
1722
1723 static u32 scorpion_get_pmresrn_event(unsigned int region)
1724 {
1725         static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1726                                              SCORPION_LPM1_GROUP0,
1727                                              SCORPION_LPM2_GROUP0,
1728                                              SCORPION_L2LPM_GROUP0 };
1729         return pmresrn_table[region];
1730 }
1731
1732 static void scorpion_evt_setup(int idx, u32 config_base)
1733 {
1734         u32 val;
1735         u32 mask;
1736         u32 vval, fval;
1737         unsigned int region = EVENT_REGION(config_base);
1738         unsigned int group = EVENT_GROUP(config_base);
1739         unsigned int code = EVENT_CODE(config_base);
1740         unsigned int group_shift;
1741         bool venum_event = EVENT_VENUM(config_base);
1742
1743         group_shift = group * 8;
1744         mask = 0xff << group_shift;
1745
1746         /* Configure evtsel for the region and group */
1747         if (venum_event)
1748                 val = SCORPION_VLPM_GROUP0;
1749         else
1750                 val = scorpion_get_pmresrn_event(region);
1751         val += group;
1752         /* Mix in mode-exclusion bits */
1753         val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1754         armv7_pmnc_write_evtsel(idx, val);
1755
1756         asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1757
1758         if (venum_event) {
1759                 venum_pre_pmresr(&vval, &fval);
1760                 val = venum_read_pmresr();
1761                 val &= ~mask;
1762                 val |= code << group_shift;
1763                 val |= PMRESRn_EN;
1764                 venum_write_pmresr(val);
1765                 venum_post_pmresr(vval, fval);
1766         } else {
1767                 val = scorpion_read_pmresrn(region);
1768                 val &= ~mask;
1769                 val |= code << group_shift;
1770                 val |= PMRESRn_EN;
1771                 scorpion_write_pmresrn(region, val);
1772         }
1773 }
1774
1775 static void scorpion_clearpmu(u32 config_base)
1776 {
1777         u32 val;
1778         u32 vval, fval;
1779         unsigned int region = EVENT_REGION(config_base);
1780         unsigned int group = EVENT_GROUP(config_base);
1781         bool venum_event = EVENT_VENUM(config_base);
1782
1783         if (venum_event) {
1784                 venum_pre_pmresr(&vval, &fval);
1785                 val = venum_read_pmresr();
1786                 val = clear_pmresrn_group(val, group);
1787                 venum_write_pmresr(val);
1788                 venum_post_pmresr(vval, fval);
1789         } else {
1790                 val = scorpion_read_pmresrn(region);
1791                 val = clear_pmresrn_group(val, group);
1792                 scorpion_write_pmresrn(region, val);
1793         }
1794 }
1795
1796 static void scorpion_pmu_disable_event(struct perf_event *event)
1797 {
1798         struct hw_perf_event *hwc = &event->hw;
1799         int idx = hwc->idx;
1800
1801         /* Disable counter and interrupt */
1802
1803         /* Disable counter */
1804         armv7_pmnc_disable_counter(idx);
1805
1806         /*
1807          * Clear pmresr code (if destined for PMNx counters)
1808          */
1809         if (hwc->config_base & KRAIT_EVENT_MASK)
1810                 scorpion_clearpmu(hwc->config_base);
1811
1812         /* Disable interrupt for this counter */
1813         armv7_pmnc_disable_intens(idx);
1814 }
1815
1816 static void scorpion_pmu_enable_event(struct perf_event *event)
1817 {
1818         struct hw_perf_event *hwc = &event->hw;
1819         int idx = hwc->idx;
1820
1821         /*
1822          * Enable counter and interrupt, and set the counter to count
1823          * the event that we're interested in.
1824          */
1825
1826         /* Disable counter */
1827         armv7_pmnc_disable_counter(idx);
1828
1829         /*
1830          * Set event (if destined for PMNx counters)
1831          * We don't set the event for the cycle counter because we
1832          * don't have the ability to perform event filtering.
1833          */
1834         if (hwc->config_base & KRAIT_EVENT_MASK)
1835                 scorpion_evt_setup(idx, hwc->config_base);
1836         else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1837                 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1838
1839         /* Enable interrupt for this counter */
1840         armv7_pmnc_enable_intens(idx);
1841
1842         /* Enable counter */
1843         armv7_pmnc_enable_counter(idx);
1844 }
1845
1846 static void scorpion_pmu_reset(void *info)
1847 {
1848         u32 vval, fval;
1849         struct arm_pmu *cpu_pmu = info;
1850         u32 idx, nb_cnt = cpu_pmu->num_events;
1851
1852         armv7pmu_reset(info);
1853
1854         /* Clear all pmresrs */
1855         scorpion_write_pmresrn(0, 0);
1856         scorpion_write_pmresrn(1, 0);
1857         scorpion_write_pmresrn(2, 0);
1858         scorpion_write_pmresrn(3, 0);
1859
1860         venum_pre_pmresr(&vval, &fval);
1861         venum_write_pmresr(0);
1862         venum_post_pmresr(vval, fval);
1863
1864         /* Reset PMxEVNCTCR to sane default */
1865         for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1866                 armv7_pmnc_select_counter(idx);
1867                 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1868         }
1869 }
1870
1871 static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1872                               unsigned int group)
1873 {
1874         int bit;
1875         struct hw_perf_event *hwc = &event->hw;
1876         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1877
1878         if (hwc->config_base & VENUM_EVENT)
1879                 bit = SCORPION_VLPM_GROUP0;
1880         else
1881                 bit = scorpion_get_pmresrn_event(region);
1882         bit -= scorpion_get_pmresrn_event(0);
1883         bit += group;
1884         /*
1885          * Lower bits are reserved for use by the counters (see
1886          * armv7pmu_get_event_idx() for more info)
1887          */
1888         bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1889
1890         return bit;
1891 }
1892
1893 /*
1894  * We check for column exclusion constraints here.
1895  * Two events cant use the same group within a pmresr register.
1896  */
1897 static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1898                                    struct perf_event *event)
1899 {
1900         int idx;
1901         int bit = -1;
1902         struct hw_perf_event *hwc = &event->hw;
1903         unsigned int region = EVENT_REGION(hwc->config_base);
1904         unsigned int group = EVENT_GROUP(hwc->config_base);
1905         bool venum_event = EVENT_VENUM(hwc->config_base);
1906         bool scorpion_event = EVENT_CPU(hwc->config_base);
1907
1908         if (venum_event || scorpion_event) {
1909                 /* Ignore invalid events */
1910                 if (group > 3 || region > 3)
1911                         return -EINVAL;
1912
1913                 bit = scorpion_event_to_bit(event, region, group);
1914                 if (test_and_set_bit(bit, cpuc->used_mask))
1915                         return -EAGAIN;
1916         }
1917
1918         idx = armv7pmu_get_event_idx(cpuc, event);
1919         if (idx < 0 && bit >= 0)
1920                 clear_bit(bit, cpuc->used_mask);
1921
1922         return idx;
1923 }
1924
1925 static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1926                                       struct perf_event *event)
1927 {
1928         int bit;
1929         struct hw_perf_event *hwc = &event->hw;
1930         unsigned int region = EVENT_REGION(hwc->config_base);
1931         unsigned int group = EVENT_GROUP(hwc->config_base);
1932         bool venum_event = EVENT_VENUM(hwc->config_base);
1933         bool scorpion_event = EVENT_CPU(hwc->config_base);
1934
1935         armv7pmu_clear_event_idx(cpuc, event);
1936         if (venum_event || scorpion_event) {
1937                 bit = scorpion_event_to_bit(event, region, group);
1938                 clear_bit(bit, cpuc->used_mask);
1939         }
1940 }
1941
1942 static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1943 {
1944         armv7pmu_init(cpu_pmu);
1945         cpu_pmu->name           = "armv7_scorpion";
1946         cpu_pmu->map_event      = scorpion_map_event;
1947         cpu_pmu->reset          = scorpion_pmu_reset;
1948         cpu_pmu->enable         = scorpion_pmu_enable_event;
1949         cpu_pmu->disable        = scorpion_pmu_disable_event;
1950         cpu_pmu->get_event_idx  = scorpion_pmu_get_event_idx;
1951         cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1952         return armv7_probe_num_events(cpu_pmu);
1953 }
1954
1955 static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1956 {
1957         armv7pmu_init(cpu_pmu);
1958         cpu_pmu->name           = "armv7_scorpion_mp";
1959         cpu_pmu->map_event      = scorpion_map_event;
1960         cpu_pmu->reset          = scorpion_pmu_reset;
1961         cpu_pmu->enable         = scorpion_pmu_enable_event;
1962         cpu_pmu->disable        = scorpion_pmu_disable_event;
1963         cpu_pmu->get_event_idx  = scorpion_pmu_get_event_idx;
1964         cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
1965         return armv7_probe_num_events(cpu_pmu);
1966 }
1967
1968 static const struct of_device_id armv7_pmu_of_device_ids[] = {
1969         {.compatible = "arm,cortex-a17-pmu",    .data = armv7_a17_pmu_init},
1970         {.compatible = "arm,cortex-a15-pmu",    .data = armv7_a15_pmu_init},
1971         {.compatible = "arm,cortex-a12-pmu",    .data = armv7_a12_pmu_init},
1972         {.compatible = "arm,cortex-a9-pmu",     .data = armv7_a9_pmu_init},
1973         {.compatible = "arm,cortex-a8-pmu",     .data = armv7_a8_pmu_init},
1974         {.compatible = "arm,cortex-a7-pmu",     .data = armv7_a7_pmu_init},
1975         {.compatible = "arm,cortex-a5-pmu",     .data = armv7_a5_pmu_init},
1976         {.compatible = "qcom,krait-pmu",        .data = krait_pmu_init},
1977         {.compatible = "qcom,scorpion-pmu",     .data = scorpion_pmu_init},
1978         {.compatible = "qcom,scorpion-mp-pmu",  .data = scorpion_mp_pmu_init},
1979         {},
1980 };
1981
1982 static const struct pmu_probe_info armv7_pmu_probe_table[] = {
1983         ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
1984         ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
1985         { /* sentinel value */ }
1986 };
1987
1988
1989 static int armv7_pmu_device_probe(struct platform_device *pdev)
1990 {
1991         return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
1992                                     armv7_pmu_probe_table);
1993 }
1994
1995 static struct platform_driver armv7_pmu_driver = {
1996         .driver         = {
1997                 .name   = "armv7-pmu",
1998                 .of_match_table = armv7_pmu_of_device_ids,
1999                 .suppress_bind_attrs = true,
2000         },
2001         .probe          = armv7_pmu_device_probe,
2002 };
2003
2004 builtin_platform_driver(armv7_pmu_driver);
2005 #endif  /* CONFIG_CPU_V7 */
This page took 0.147286 seconds and 4 git commands to generate.