]>
Commit | Line | Data |
---|---|---|
3de6be7a RM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // CCI Cache Coherent Interconnect PMU driver | |
3 | // Copyright (C) 2013-2018 Arm Ltd. | |
4 | // Author: Punit Agrawal <[email protected]>, Suzuki Poulose <[email protected]> | |
5 | ||
6 | #include <linux/arm-cci.h> | |
7 | #include <linux/io.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/module.h> | |
918dc87b | 10 | #include <linux/of.h> |
3de6be7a RM |
11 | #include <linux/perf_event.h> |
12 | #include <linux/platform_device.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/spinlock.h> | |
15 | ||
3de6be7a RM |
16 | #define DRIVER_NAME "ARM-CCI PMU" |
17 | ||
18 | #define CCI_PMCR 0x0100 | |
19 | #define CCI_PID2 0x0fe8 | |
20 | ||
21 | #define CCI_PMCR_CEN 0x00000001 | |
22 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | |
23 | #define CCI_PMCR_NCNT_SHIFT 11 | |
24 | ||
25 | #define CCI_PID2_REV_MASK 0xf0 | |
26 | #define CCI_PID2_REV_SHIFT 4 | |
27 | ||
28 | #define CCI_PMU_EVT_SEL 0x000 | |
29 | #define CCI_PMU_CNTR 0x004 | |
30 | #define CCI_PMU_CNTR_CTRL 0x008 | |
31 | #define CCI_PMU_OVRFLW 0x00c | |
32 | ||
33 | #define CCI_PMU_OVRFLW_FLAG 1 | |
34 | ||
35 | #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) | |
36 | #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) | |
eb2b22f0 | 37 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1) |
3de6be7a RM |
38 | #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) |
39 | ||
40 | #define CCI_PMU_MAX_HW_CNTRS(model) \ | |
41 | ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) | |
42 | ||
43 | /* Types of interfaces that can generate events */ | |
44 | enum { | |
45 | CCI_IF_SLAVE, | |
46 | CCI_IF_MASTER, | |
47 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
48 | CCI_IF_GLOBAL, | |
49 | #endif | |
50 | CCI_IF_MAX, | |
51 | }; | |
52 | ||
1201a5a2 KC |
53 | #define NUM_HW_CNTRS_CII_4XX 4 |
54 | #define NUM_HW_CNTRS_CII_5XX 8 | |
55 | #define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX | |
56 | ||
57 | #define FIXED_HW_CNTRS_CII_4XX 1 | |
58 | #define FIXED_HW_CNTRS_CII_5XX 0 | |
59 | #define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX | |
60 | ||
61 | #define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX) | |
62 | ||
3de6be7a RM |
63 | struct event_range { |
64 | u32 min; | |
65 | u32 max; | |
66 | }; | |
67 | ||
68 | struct cci_pmu_hw_events { | |
69 | struct perf_event **events; | |
70 | unsigned long *used_mask; | |
71 | raw_spinlock_t pmu_lock; | |
72 | }; | |
73 | ||
74 | struct cci_pmu; | |
75 | /* | |
76 | * struct cci_pmu_model: | |
77 | * @fixed_hw_cntrs - Number of fixed event counters | |
78 | * @num_hw_cntrs - Maximum number of programmable event counters | |
79 | * @cntr_size - Size of an event counter mapping | |
80 | */ | |
81 | struct cci_pmu_model { | |
82 | char *name; | |
83 | u32 fixed_hw_cntrs; | |
84 | u32 num_hw_cntrs; | |
85 | u32 cntr_size; | |
86 | struct attribute **format_attrs; | |
87 | struct attribute **event_attrs; | |
88 | struct event_range event_ranges[CCI_IF_MAX]; | |
89 | int (*validate_hw_event)(struct cci_pmu *, unsigned long); | |
90 | int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); | |
91 | void (*write_counters)(struct cci_pmu *, unsigned long *); | |
92 | }; | |
93 | ||
94 | static struct cci_pmu_model cci_pmu_models[]; | |
95 | ||
96 | struct cci_pmu { | |
97 | void __iomem *base; | |
e9c112c9 | 98 | void __iomem *ctrl_base; |
3de6be7a | 99 | struct pmu pmu; |
03057f26 | 100 | int cpu; |
3de6be7a RM |
101 | int nr_irqs; |
102 | int *irqs; | |
103 | unsigned long active_irqs; | |
104 | const struct cci_pmu_model *model; | |
105 | struct cci_pmu_hw_events hw_events; | |
106 | struct platform_device *plat_device; | |
107 | int num_cntrs; | |
108 | atomic_t active_events; | |
109 | struct mutex reserve_mutex; | |
3de6be7a RM |
110 | }; |
111 | ||
112 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | |
113 | ||
03057f26 RM |
114 | static struct cci_pmu *g_cci_pmu; |
115 | ||
3de6be7a RM |
116 | enum cci_models { |
117 | #ifdef CONFIG_ARM_CCI400_PMU | |
118 | CCI400_R0, | |
119 | CCI400_R1, | |
120 | #endif | |
121 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
122 | CCI500_R0, | |
123 | CCI550_R0, | |
124 | #endif | |
125 | CCI_MODEL_MAX | |
126 | }; | |
127 | ||
128 | static void pmu_write_counters(struct cci_pmu *cci_pmu, | |
129 | unsigned long *mask); | |
984e9cf1 | 130 | static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev, |
3de6be7a RM |
131 | struct device_attribute *attr, char *buf); |
132 | ||
133 | #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ | |
134 | &((struct dev_ext_attribute[]) { \ | |
135 | { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ | |
136 | })[0].attr.attr | |
137 | ||
138 | #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ | |
b91b73a4 | 139 | CCI_EXT_ATTR_ENTRY(_name, device_show_string, _config) |
3de6be7a RM |
140 | #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ |
141 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) | |
142 | ||
143 | /* CCI400 PMU Specific definitions */ | |
144 | ||
145 | #ifdef CONFIG_ARM_CCI400_PMU | |
146 | ||
147 | /* Port ids */ | |
148 | #define CCI400_PORT_S0 0 | |
149 | #define CCI400_PORT_S1 1 | |
150 | #define CCI400_PORT_S2 2 | |
151 | #define CCI400_PORT_S3 3 | |
152 | #define CCI400_PORT_S4 4 | |
153 | #define CCI400_PORT_M0 5 | |
154 | #define CCI400_PORT_M1 6 | |
155 | #define CCI400_PORT_M2 7 | |
156 | ||
157 | #define CCI400_R1_PX 5 | |
158 | ||
159 | /* | |
160 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | |
161 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | |
162 | * make use of this event in hardware. | |
163 | */ | |
164 | enum cci400_perf_events { | |
165 | CCI400_PMU_CYCLES = 0xff | |
166 | }; | |
167 | ||
168 | #define CCI400_PMU_CYCLE_CNTR_IDX 0 | |
169 | #define CCI400_PMU_CNTR0_IDX 1 | |
170 | ||
171 | /* | |
172 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | |
173 | * ports and bits 4:0 are event codes. There are different event codes | |
174 | * associated with each port type. | |
175 | * | |
176 | * Additionally, the range of events associated with the port types changed | |
177 | * between Rev0 and Rev1. | |
178 | * | |
179 | * The constants below define the range of valid codes for each port type for | |
180 | * the different revisions and are used to validate the event to be monitored. | |
181 | */ | |
182 | ||
183 | #define CCI400_PMU_EVENT_MASK 0xffUL | |
184 | #define CCI400_PMU_EVENT_SOURCE_SHIFT 5 | |
185 | #define CCI400_PMU_EVENT_SOURCE_MASK 0x7 | |
186 | #define CCI400_PMU_EVENT_CODE_SHIFT 0 | |
187 | #define CCI400_PMU_EVENT_CODE_MASK 0x1f | |
188 | #define CCI400_PMU_EVENT_SOURCE(event) \ | |
189 | ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ | |
190 | CCI400_PMU_EVENT_SOURCE_MASK) | |
191 | #define CCI400_PMU_EVENT_CODE(event) \ | |
192 | ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) | |
193 | ||
194 | #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 | |
195 | #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 | |
196 | #define CCI400_R0_MASTER_PORT_MIN_EV 0x14 | |
197 | #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a | |
198 | ||
199 | #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 | |
200 | #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 | |
201 | #define CCI400_R1_MASTER_PORT_MIN_EV 0x00 | |
202 | #define CCI400_R1_MASTER_PORT_MAX_EV 0x11 | |
203 | ||
204 | #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
205 | CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ | |
206 | (unsigned long)_config) | |
207 | ||
208 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
209 | struct device_attribute *attr, char *buf); | |
210 | ||
211 | static struct attribute *cci400_pmu_format_attrs[] = { | |
212 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
213 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), | |
214 | NULL | |
215 | }; | |
216 | ||
217 | static struct attribute *cci400_r0_pmu_event_attrs[] = { | |
218 | /* Slave events */ | |
219 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
220 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
221 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
222 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
223 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
224 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
225 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
226 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
227 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
228 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
229 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
230 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
231 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
232 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
233 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
234 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
235 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
236 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
237 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
238 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
239 | /* Master events */ | |
240 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), | |
241 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), | |
242 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), | |
243 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), | |
244 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), | |
245 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), | |
246 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), | |
247 | /* Special event for cycles counter */ | |
248 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
249 | NULL | |
250 | }; | |
251 | ||
252 | static struct attribute *cci400_r1_pmu_event_attrs[] = { | |
253 | /* Slave events */ | |
254 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
255 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
256 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
257 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
258 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
259 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
260 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
261 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
262 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
263 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
264 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
265 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
266 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
267 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
268 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
269 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
270 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
271 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
272 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
273 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
274 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), | |
275 | /* Master events */ | |
276 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), | |
277 | CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), | |
278 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), | |
279 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), | |
280 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), | |
281 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), | |
282 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), | |
283 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), | |
284 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), | |
285 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), | |
286 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), | |
287 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), | |
288 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), | |
289 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), | |
290 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), | |
291 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), | |
292 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), | |
293 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), | |
294 | /* Special event for cycles counter */ | |
295 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
296 | NULL | |
297 | }; | |
298 | ||
299 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
300 | struct device_attribute *attr, char *buf) | |
301 | { | |
302 | struct dev_ext_attribute *eattr = container_of(attr, | |
303 | struct dev_ext_attribute, attr); | |
700a9cf0 | 304 | return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var); |
3de6be7a RM |
305 | } |
306 | ||
307 | static int cci400_get_event_idx(struct cci_pmu *cci_pmu, | |
308 | struct cci_pmu_hw_events *hw, | |
309 | unsigned long cci_event) | |
310 | { | |
311 | int idx; | |
312 | ||
313 | /* cycles event idx is fixed */ | |
314 | if (cci_event == CCI400_PMU_CYCLES) { | |
315 | if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | |
316 | return -EAGAIN; | |
317 | ||
318 | return CCI400_PMU_CYCLE_CNTR_IDX; | |
319 | } | |
320 | ||
321 | for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | |
322 | if (!test_and_set_bit(idx, hw->used_mask)) | |
323 | return idx; | |
324 | ||
325 | /* No counters available */ | |
326 | return -EAGAIN; | |
327 | } | |
328 | ||
329 | static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) | |
330 | { | |
331 | u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); | |
332 | u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); | |
333 | int if_type; | |
334 | ||
335 | if (hw_event & ~CCI400_PMU_EVENT_MASK) | |
336 | return -ENOENT; | |
337 | ||
338 | if (hw_event == CCI400_PMU_CYCLES) | |
339 | return hw_event; | |
340 | ||
341 | switch (ev_source) { | |
342 | case CCI400_PORT_S0: | |
343 | case CCI400_PORT_S1: | |
344 | case CCI400_PORT_S2: | |
345 | case CCI400_PORT_S3: | |
346 | case CCI400_PORT_S4: | |
347 | /* Slave Interface */ | |
348 | if_type = CCI_IF_SLAVE; | |
349 | break; | |
350 | case CCI400_PORT_M0: | |
351 | case CCI400_PORT_M1: | |
352 | case CCI400_PORT_M2: | |
353 | /* Master Interface */ | |
354 | if_type = CCI_IF_MASTER; | |
355 | break; | |
356 | default: | |
357 | return -ENOENT; | |
358 | } | |
359 | ||
360 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
361 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
362 | return hw_event; | |
363 | ||
364 | return -ENOENT; | |
365 | } | |
366 | ||
e9c112c9 | 367 | static int probe_cci400_revision(struct cci_pmu *cci_pmu) |
3de6be7a RM |
368 | { |
369 | int rev; | |
e9c112c9 | 370 | rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; |
3de6be7a RM |
371 | rev >>= CCI_PID2_REV_SHIFT; |
372 | ||
373 | if (rev < CCI400_R1_PX) | |
374 | return CCI400_R0; | |
375 | else | |
376 | return CCI400_R1; | |
377 | } | |
378 | ||
e9c112c9 | 379 | static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) |
3de6be7a RM |
380 | { |
381 | if (platform_has_secure_cci_access()) | |
e9c112c9 | 382 | return &cci_pmu_models[probe_cci400_revision(cci_pmu)]; |
3de6be7a RM |
383 | return NULL; |
384 | } | |
385 | #else /* !CONFIG_ARM_CCI400_PMU */ | |
e9c112c9 | 386 | static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) |
3de6be7a RM |
387 | { |
388 | return NULL; | |
389 | } | |
390 | #endif /* CONFIG_ARM_CCI400_PMU */ | |
391 | ||
392 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
393 | ||
394 | /* | |
395 | * CCI5xx PMU event id is an 9-bit value made of two parts. | |
396 | * bits [8:5] - Source for the event | |
397 | * bits [4:0] - Event code (specific to type of interface) | |
398 | * | |
399 | * | |
400 | */ | |
401 | ||
402 | /* Port ids */ | |
403 | #define CCI5xx_PORT_S0 0x0 | |
404 | #define CCI5xx_PORT_S1 0x1 | |
405 | #define CCI5xx_PORT_S2 0x2 | |
406 | #define CCI5xx_PORT_S3 0x3 | |
407 | #define CCI5xx_PORT_S4 0x4 | |
408 | #define CCI5xx_PORT_S5 0x5 | |
409 | #define CCI5xx_PORT_S6 0x6 | |
410 | ||
411 | #define CCI5xx_PORT_M0 0x8 | |
412 | #define CCI5xx_PORT_M1 0x9 | |
413 | #define CCI5xx_PORT_M2 0xa | |
414 | #define CCI5xx_PORT_M3 0xb | |
415 | #define CCI5xx_PORT_M4 0xc | |
416 | #define CCI5xx_PORT_M5 0xd | |
417 | #define CCI5xx_PORT_M6 0xe | |
418 | ||
419 | #define CCI5xx_PORT_GLOBAL 0xf | |
420 | ||
421 | #define CCI5xx_PMU_EVENT_MASK 0x1ffUL | |
422 | #define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5 | |
423 | #define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf | |
424 | #define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0 | |
425 | #define CCI5xx_PMU_EVENT_CODE_MASK 0x1f | |
426 | ||
427 | #define CCI5xx_PMU_EVENT_SOURCE(event) \ | |
428 | ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) | |
429 | #define CCI5xx_PMU_EVENT_CODE(event) \ | |
430 | ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) | |
431 | ||
432 | #define CCI5xx_SLAVE_PORT_MIN_EV 0x00 | |
433 | #define CCI5xx_SLAVE_PORT_MAX_EV 0x1f | |
434 | #define CCI5xx_MASTER_PORT_MIN_EV 0x00 | |
435 | #define CCI5xx_MASTER_PORT_MAX_EV 0x06 | |
436 | #define CCI5xx_GLOBAL_PORT_MIN_EV 0x00 | |
437 | #define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f | |
438 | ||
439 | ||
440 | #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
441 | CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \ | |
442 | (unsigned long) _config) | |
443 | ||
444 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
445 | struct device_attribute *attr, char *buf); | |
446 | ||
447 | static struct attribute *cci5xx_pmu_format_attrs[] = { | |
448 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
449 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), | |
450 | NULL, | |
451 | }; | |
452 | ||
453 | static struct attribute *cci5xx_pmu_event_attrs[] = { | |
454 | /* Slave events */ | |
455 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), | |
456 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), | |
457 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), | |
458 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), | |
459 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), | |
460 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), | |
461 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), | |
462 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
463 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), | |
464 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), | |
465 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), | |
466 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), | |
467 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), | |
468 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), | |
469 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), | |
470 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), | |
471 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), | |
472 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), | |
473 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), | |
474 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), | |
475 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), | |
476 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), | |
477 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), | |
478 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), | |
479 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), | |
480 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), | |
481 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), | |
482 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), | |
483 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), | |
484 | CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), | |
485 | CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), | |
486 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), | |
487 | ||
488 | /* Master events */ | |
489 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), | |
490 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), | |
491 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), | |
492 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), | |
493 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), | |
494 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), | |
495 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), | |
496 | ||
497 | /* Global events */ | |
498 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), | |
499 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), | |
500 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), | |
501 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), | |
502 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), | |
503 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), | |
504 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), | |
505 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), | |
506 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), | |
507 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), | |
508 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), | |
509 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), | |
510 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), | |
511 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), | |
512 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), | |
513 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), | |
514 | NULL | |
515 | }; | |
516 | ||
517 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
518 | struct device_attribute *attr, char *buf) | |
519 | { | |
520 | struct dev_ext_attribute *eattr = container_of(attr, | |
521 | struct dev_ext_attribute, attr); | |
522 | /* Global events have single fixed source code */ | |
700a9cf0 ZT |
523 | return sysfs_emit(buf, "event=0x%lx,source=0x%x\n", |
524 | (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); | |
3de6be7a RM |
525 | } |
526 | ||
527 | /* | |
528 | * CCI500 provides 8 independent event counters that can count | |
529 | * any of the events available. | |
530 | * CCI500 PMU event source ids | |
531 | * 0x0-0x6 - Slave interfaces | |
532 | * 0x8-0xD - Master interfaces | |
533 | * 0xf - Global Events | |
534 | * 0x7,0xe - Reserved | |
535 | */ | |
536 | static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, | |
537 | unsigned long hw_event) | |
538 | { | |
539 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
540 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
541 | int if_type; | |
542 | ||
543 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
544 | return -ENOENT; | |
545 | ||
546 | switch (ev_source) { | |
547 | case CCI5xx_PORT_S0: | |
548 | case CCI5xx_PORT_S1: | |
549 | case CCI5xx_PORT_S2: | |
550 | case CCI5xx_PORT_S3: | |
551 | case CCI5xx_PORT_S4: | |
552 | case CCI5xx_PORT_S5: | |
553 | case CCI5xx_PORT_S6: | |
554 | if_type = CCI_IF_SLAVE; | |
555 | break; | |
556 | case CCI5xx_PORT_M0: | |
557 | case CCI5xx_PORT_M1: | |
558 | case CCI5xx_PORT_M2: | |
559 | case CCI5xx_PORT_M3: | |
560 | case CCI5xx_PORT_M4: | |
561 | case CCI5xx_PORT_M5: | |
562 | if_type = CCI_IF_MASTER; | |
563 | break; | |
564 | case CCI5xx_PORT_GLOBAL: | |
565 | if_type = CCI_IF_GLOBAL; | |
566 | break; | |
567 | default: | |
568 | return -ENOENT; | |
569 | } | |
570 | ||
571 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
572 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
573 | return hw_event; | |
574 | ||
575 | return -ENOENT; | |
576 | } | |
577 | ||
578 | /* | |
579 | * CCI550 provides 8 independent event counters that can count | |
580 | * any of the events available. | |
581 | * CCI550 PMU event source ids | |
582 | * 0x0-0x6 - Slave interfaces | |
583 | * 0x8-0xe - Master interfaces | |
584 | * 0xf - Global Events | |
585 | * 0x7 - Reserved | |
586 | */ | |
587 | static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, | |
588 | unsigned long hw_event) | |
589 | { | |
590 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
591 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
592 | int if_type; | |
593 | ||
594 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
595 | return -ENOENT; | |
596 | ||
597 | switch (ev_source) { | |
598 | case CCI5xx_PORT_S0: | |
599 | case CCI5xx_PORT_S1: | |
600 | case CCI5xx_PORT_S2: | |
601 | case CCI5xx_PORT_S3: | |
602 | case CCI5xx_PORT_S4: | |
603 | case CCI5xx_PORT_S5: | |
604 | case CCI5xx_PORT_S6: | |
605 | if_type = CCI_IF_SLAVE; | |
606 | break; | |
607 | case CCI5xx_PORT_M0: | |
608 | case CCI5xx_PORT_M1: | |
609 | case CCI5xx_PORT_M2: | |
610 | case CCI5xx_PORT_M3: | |
611 | case CCI5xx_PORT_M4: | |
612 | case CCI5xx_PORT_M5: | |
613 | case CCI5xx_PORT_M6: | |
614 | if_type = CCI_IF_MASTER; | |
615 | break; | |
616 | case CCI5xx_PORT_GLOBAL: | |
617 | if_type = CCI_IF_GLOBAL; | |
618 | break; | |
619 | default: | |
620 | return -ENOENT; | |
621 | } | |
622 | ||
623 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
624 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
625 | return hw_event; | |
626 | ||
627 | return -ENOENT; | |
628 | } | |
629 | ||
630 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
631 | ||
632 | /* | |
633 | * Program the CCI PMU counters which have PERF_HES_ARCH set | |
634 | * with the event period and mark them ready before we enable | |
635 | * PMU. | |
636 | */ | |
637 | static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) | |
638 | { | |
639 | int i; | |
640 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
1201a5a2 | 641 | DECLARE_BITMAP(mask, HW_CNTRS_MAX); |
3de6be7a | 642 | |
f818947a | 643 | bitmap_zero(mask, HW_CNTRS_MAX); |
3de6be7a RM |
644 | for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { |
645 | struct perf_event *event = cci_hw->events[i]; | |
646 | ||
647 | if (WARN_ON(!event)) | |
648 | continue; | |
649 | ||
650 | /* Leave the events which are not counting */ | |
651 | if (event->hw.state & PERF_HES_STOPPED) | |
652 | continue; | |
653 | if (event->hw.state & PERF_HES_ARCH) { | |
f818947a | 654 | __set_bit(i, mask); |
3de6be7a RM |
655 | event->hw.state &= ~PERF_HES_ARCH; |
656 | } | |
657 | } | |
658 | ||
659 | pmu_write_counters(cci_pmu, mask); | |
660 | } | |
661 | ||
662 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
663 | static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) | |
664 | { | |
665 | u32 val; | |
666 | ||
667 | /* Enable all the PMU counters. */ | |
e9c112c9 RM |
668 | val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; |
669 | writel(val, cci_pmu->ctrl_base + CCI_PMCR); | |
3de6be7a RM |
670 | } |
671 | ||
672 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
673 | static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) | |
674 | { | |
675 | cci_pmu_sync_counters(cci_pmu); | |
676 | __cci_pmu_enable_nosync(cci_pmu); | |
677 | } | |
678 | ||
679 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
e9c112c9 | 680 | static void __cci_pmu_disable(struct cci_pmu *cci_pmu) |
3de6be7a RM |
681 | { |
682 | u32 val; | |
683 | ||
684 | /* Disable all the PMU counters. */ | |
e9c112c9 RM |
685 | val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; |
686 | writel(val, cci_pmu->ctrl_base + CCI_PMCR); | |
3de6be7a RM |
687 | } |
688 | ||
3de6be7a RM |
689 | static ssize_t cci_pmu_event_show(struct device *dev, |
690 | struct device_attribute *attr, char *buf) | |
691 | { | |
692 | struct dev_ext_attribute *eattr = container_of(attr, | |
693 | struct dev_ext_attribute, attr); | |
694 | /* source parameter is mandatory for normal PMU events */ | |
700a9cf0 ZT |
695 | return sysfs_emit(buf, "source=?,event=0x%lx\n", |
696 | (unsigned long)eattr->var); | |
3de6be7a RM |
697 | } |
698 | ||
699 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) | |
700 | { | |
701 | return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); | |
702 | } | |
703 | ||
704 | static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) | |
705 | { | |
706 | return readl_relaxed(cci_pmu->base + | |
707 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
708 | } | |
709 | ||
710 | static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, | |
711 | int idx, unsigned int offset) | |
712 | { | |
713 | writel_relaxed(value, cci_pmu->base + | |
714 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
715 | } | |
716 | ||
717 | static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) | |
718 | { | |
719 | pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); | |
720 | } | |
721 | ||
722 | static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) | |
723 | { | |
724 | pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); | |
725 | } | |
726 | ||
727 | static bool __maybe_unused | |
728 | pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) | |
729 | { | |
730 | return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; | |
731 | } | |
732 | ||
733 | static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) | |
734 | { | |
735 | pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); | |
736 | } | |
737 | ||
738 | /* | |
739 | * For all counters on the CCI-PMU, disable any 'enabled' counters, | |
740 | * saving the changed counters in the mask, so that we can restore | |
741 | * it later using pmu_restore_counters. The mask is private to the | |
742 | * caller. We cannot rely on the used_mask maintained by the CCI_PMU | |
743 | * as it only tells us if the counter is assigned to perf_event or not. | |
744 | * The state of the perf_event cannot be locked by the PMU layer, hence | |
745 | * we check the individual counter status (which can be locked by | |
746 | * cci_pm->hw_events->pmu_lock). | |
747 | * | |
748 | * @mask should be initialised to empty by the caller. | |
749 | */ | |
750 | static void __maybe_unused | |
751 | pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
752 | { | |
753 | int i; | |
754 | ||
755 | for (i = 0; i < cci_pmu->num_cntrs; i++) { | |
756 | if (pmu_counter_is_enabled(cci_pmu, i)) { | |
757 | set_bit(i, mask); | |
758 | pmu_disable_counter(cci_pmu, i); | |
759 | } | |
760 | } | |
761 | } | |
762 | ||
763 | /* | |
764 | * Restore the status of the counters. Reversal of the pmu_save_counters(). | |
765 | * For each counter set in the mask, enable the counter back. | |
766 | */ | |
767 | static void __maybe_unused | |
768 | pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
769 | { | |
770 | int i; | |
771 | ||
772 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) | |
773 | pmu_enable_counter(cci_pmu, i); | |
774 | } | |
775 | ||
776 | /* | |
777 | * Returns the number of programmable counters actually implemented | |
778 | * by the cci | |
779 | */ | |
e9c112c9 | 780 | static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu) |
3de6be7a | 781 | { |
e9c112c9 | 782 | return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & |
3de6be7a RM |
783 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; |
784 | } | |
785 | ||
786 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) | |
787 | { | |
788 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
789 | unsigned long cci_event = event->hw.config_base; | |
790 | int idx; | |
791 | ||
792 | if (cci_pmu->model->get_event_idx) | |
793 | return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); | |
794 | ||
795 | /* Generic code to find an unused idx from the mask */ | |
eb2b22f0 | 796 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) |
3de6be7a RM |
797 | if (!test_and_set_bit(idx, hw->used_mask)) |
798 | return idx; | |
799 | ||
800 | /* No counters available */ | |
801 | return -EAGAIN; | |
802 | } | |
803 | ||
804 | static int pmu_map_event(struct perf_event *event) | |
805 | { | |
806 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
807 | ||
808 | if (event->attr.type < PERF_TYPE_MAX || | |
809 | !cci_pmu->model->validate_hw_event) | |
810 | return -ENOENT; | |
811 | ||
812 | return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); | |
813 | } | |
814 | ||
815 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) | |
816 | { | |
817 | int i; | |
818 | struct platform_device *pmu_device = cci_pmu->plat_device; | |
819 | ||
820 | if (unlikely(!pmu_device)) | |
821 | return -ENODEV; | |
822 | ||
823 | if (cci_pmu->nr_irqs < 1) { | |
824 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | |
825 | return -ENODEV; | |
826 | } | |
827 | ||
828 | /* | |
829 | * Register all available CCI PMU interrupts. In the interrupt handler | |
830 | * we iterate over the counters checking for interrupt source (the | |
831 | * overflowing counter) and clear it. | |
832 | * | |
833 | * This should allow handling of non-unique interrupt for the counters. | |
834 | */ | |
835 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
836 | int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, | |
837 | "arm-cci-pmu", cci_pmu); | |
838 | if (err) { | |
839 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | |
840 | cci_pmu->irqs[i]); | |
841 | return err; | |
842 | } | |
843 | ||
844 | set_bit(i, &cci_pmu->active_irqs); | |
845 | } | |
846 | ||
847 | return 0; | |
848 | } | |
849 | ||
850 | static void pmu_free_irq(struct cci_pmu *cci_pmu) | |
851 | { | |
852 | int i; | |
853 | ||
854 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
855 | if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) | |
856 | continue; | |
857 | ||
858 | free_irq(cci_pmu->irqs[i], cci_pmu); | |
859 | } | |
860 | } | |
861 | ||
862 | static u32 pmu_read_counter(struct perf_event *event) | |
863 | { | |
864 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
865 | struct hw_perf_event *hw_counter = &event->hw; | |
866 | int idx = hw_counter->idx; | |
867 | u32 value; | |
868 | ||
869 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
870 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
871 | return 0; | |
872 | } | |
873 | value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); | |
874 | ||
875 | return value; | |
876 | } | |
877 | ||
878 | static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) | |
879 | { | |
880 | pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); | |
881 | } | |
882 | ||
883 | static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
884 | { | |
885 | int i; | |
886 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
887 | ||
888 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
889 | struct perf_event *event = cci_hw->events[i]; | |
890 | ||
891 | if (WARN_ON(!event)) | |
892 | continue; | |
893 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
894 | } | |
895 | } | |
896 | ||
897 | static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
898 | { | |
899 | if (cci_pmu->model->write_counters) | |
900 | cci_pmu->model->write_counters(cci_pmu, mask); | |
901 | else | |
902 | __pmu_write_counters(cci_pmu, mask); | |
903 | } | |
904 | ||
905 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
906 | ||
907 | /* | |
908 | * CCI-500/CCI-550 has advanced power saving policies, which could gate the | |
909 | * clocks to the PMU counters, which makes the writes to them ineffective. | |
910 | * The only way to write to those counters is when the global counters | |
911 | * are enabled and the particular counter is enabled. | |
912 | * | |
913 | * So we do the following : | |
914 | * | |
915 | * 1) Disable all the PMU counters, saving their current state | |
916 | * 2) Enable the global PMU profiling, now that all counters are | |
917 | * disabled. | |
918 | * | |
919 | * For each counter to be programmed, repeat steps 3-7: | |
920 | * | |
921 | * 3) Write an invalid event code to the event control register for the | |
922 | counter, so that the counters are not modified. | |
923 | * 4) Enable the counter control for the counter. | |
924 | * 5) Set the counter value | |
925 | * 6) Disable the counter | |
926 | * 7) Restore the event in the target counter | |
927 | * | |
928 | * 8) Disable the global PMU. | |
929 | * 9) Restore the status of the rest of the counters. | |
930 | * | |
931 | * We choose an event which for CCI-5xx is guaranteed not to count. | |
932 | * We use the highest possible event code (0x1f) for the master interface 0. | |
933 | */ | |
934 | #define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \ | |
935 | (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT)) | |
936 | static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
937 | { | |
938 | int i; | |
1201a5a2 | 939 | DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX); |
3de6be7a RM |
940 | |
941 | bitmap_zero(saved_mask, cci_pmu->num_cntrs); | |
942 | pmu_save_counters(cci_pmu, saved_mask); | |
943 | ||
944 | /* | |
945 | * Now that all the counters are disabled, we can safely turn the PMU on, | |
946 | * without syncing the status of the counters | |
947 | */ | |
948 | __cci_pmu_enable_nosync(cci_pmu); | |
949 | ||
950 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
951 | struct perf_event *event = cci_pmu->hw_events.events[i]; | |
952 | ||
953 | if (WARN_ON(!event)) | |
954 | continue; | |
955 | ||
956 | pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); | |
957 | pmu_enable_counter(cci_pmu, i); | |
958 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
959 | pmu_disable_counter(cci_pmu, i); | |
960 | pmu_set_event(cci_pmu, i, event->hw.config_base); | |
961 | } | |
962 | ||
e9c112c9 | 963 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
964 | |
965 | pmu_restore_counters(cci_pmu, saved_mask); | |
966 | } | |
967 | ||
968 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
969 | ||
970 | static u64 pmu_event_update(struct perf_event *event) | |
971 | { | |
972 | struct hw_perf_event *hwc = &event->hw; | |
973 | u64 delta, prev_raw_count, new_raw_count; | |
974 | ||
975 | do { | |
976 | prev_raw_count = local64_read(&hwc->prev_count); | |
977 | new_raw_count = pmu_read_counter(event); | |
978 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
979 | new_raw_count) != prev_raw_count); | |
980 | ||
981 | delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; | |
982 | ||
983 | local64_add(delta, &event->count); | |
984 | ||
985 | return new_raw_count; | |
986 | } | |
987 | ||
988 | static void pmu_read(struct perf_event *event) | |
989 | { | |
990 | pmu_event_update(event); | |
991 | } | |
992 | ||
993 | static void pmu_event_set_period(struct perf_event *event) | |
994 | { | |
995 | struct hw_perf_event *hwc = &event->hw; | |
996 | /* | |
997 | * The CCI PMU counters have a period of 2^32. To account for the | |
998 | * possiblity of extreme interrupt latency we program for a period of | |
999 | * half that. Hopefully we can handle the interrupt before another 2^31 | |
1000 | * events occur and the counter overtakes its previous value. | |
1001 | */ | |
1002 | u64 val = 1ULL << 31; | |
1003 | local64_set(&hwc->prev_count, val); | |
1004 | ||
1005 | /* | |
1006 | * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose | |
1007 | * values needs to be sync-ed with the s/w state before the PMU is | |
1008 | * enabled. | |
1009 | * Mark this counter for sync. | |
1010 | */ | |
1011 | hwc->state |= PERF_HES_ARCH; | |
1012 | } | |
1013 | ||
1014 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |
1015 | { | |
3de6be7a RM |
1016 | struct cci_pmu *cci_pmu = dev; |
1017 | struct cci_pmu_hw_events *events = &cci_pmu->hw_events; | |
1018 | int idx, handled = IRQ_NONE; | |
1019 | ||
8ee37e0f | 1020 | raw_spin_lock(&events->pmu_lock); |
3de6be7a RM |
1021 | |
1022 | /* Disable the PMU while we walk through the counters */ | |
e9c112c9 | 1023 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
1024 | /* |
1025 | * Iterate over counters and update the corresponding perf events. | |
1026 | * This should work regardless of whether we have per-counter overflow | |
1027 | * interrupt or a combined overflow interrupt. | |
1028 | */ | |
1029 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | |
1030 | struct perf_event *event = events->events[idx]; | |
1031 | ||
1032 | if (!event) | |
1033 | continue; | |
1034 | ||
1035 | /* Did this counter overflow? */ | |
1036 | if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & | |
1037 | CCI_PMU_OVRFLW_FLAG)) | |
1038 | continue; | |
1039 | ||
1040 | pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, | |
1041 | CCI_PMU_OVRFLW); | |
1042 | ||
1043 | pmu_event_update(event); | |
1044 | pmu_event_set_period(event); | |
1045 | handled = IRQ_HANDLED; | |
1046 | } | |
1047 | ||
1048 | /* Enable the PMU and sync possibly overflowed counters */ | |
1049 | __cci_pmu_enable_sync(cci_pmu); | |
8ee37e0f | 1050 | raw_spin_unlock(&events->pmu_lock); |
3de6be7a RM |
1051 | |
1052 | return IRQ_RETVAL(handled); | |
1053 | } | |
1054 | ||
1055 | static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) | |
1056 | { | |
1057 | int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); | |
1058 | if (ret) { | |
1059 | pmu_free_irq(cci_pmu); | |
1060 | return ret; | |
1061 | } | |
1062 | return 0; | |
1063 | } | |
1064 | ||
1065 | static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) | |
1066 | { | |
1067 | pmu_free_irq(cci_pmu); | |
1068 | } | |
1069 | ||
1070 | static void hw_perf_event_destroy(struct perf_event *event) | |
1071 | { | |
1072 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1073 | atomic_t *active_events = &cci_pmu->active_events; | |
1074 | struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; | |
1075 | ||
1076 | if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { | |
1077 | cci_pmu_put_hw(cci_pmu); | |
1078 | mutex_unlock(reserve_mutex); | |
1079 | } | |
1080 | } | |
1081 | ||
1082 | static void cci_pmu_enable(struct pmu *pmu) | |
1083 | { | |
1084 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1085 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
95ed57c7 | 1086 | bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs); |
3de6be7a RM |
1087 | unsigned long flags; |
1088 | ||
1089 | if (!enabled) | |
1090 | return; | |
1091 | ||
1092 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1093 | __cci_pmu_enable_sync(cci_pmu); | |
1094 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1095 | ||
1096 | } | |
1097 | ||
1098 | static void cci_pmu_disable(struct pmu *pmu) | |
1099 | { | |
1100 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1101 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1102 | unsigned long flags; | |
1103 | ||
1104 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
e9c112c9 | 1105 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
1106 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
1107 | } | |
1108 | ||
1109 | /* | |
1110 | * Check if the idx represents a non-programmable counter. | |
1111 | * All the fixed event counters are mapped before the programmable | |
1112 | * counters. | |
1113 | */ | |
1114 | static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) | |
1115 | { | |
1116 | return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); | |
1117 | } | |
1118 | ||
1119 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) | |
1120 | { | |
1121 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1122 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1123 | struct hw_perf_event *hwc = &event->hw; | |
1124 | int idx = hwc->idx; | |
1125 | unsigned long flags; | |
1126 | ||
1127 | /* | |
1128 | * To handle interrupt latency, we always reprogram the period | |
9ba86a47 | 1129 | * regardless of PERF_EF_RELOAD. |
3de6be7a RM |
1130 | */ |
1131 | if (pmu_flags & PERF_EF_RELOAD) | |
1132 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
1133 | ||
1134 | hwc->state = 0; | |
1135 | ||
1136 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1137 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1138 | return; | |
1139 | } | |
1140 | ||
1141 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1142 | ||
1143 | /* Configure the counter unless you are counting a fixed event */ | |
1144 | if (!pmu_fixed_hw_idx(cci_pmu, idx)) | |
1145 | pmu_set_event(cci_pmu, idx, hwc->config_base); | |
1146 | ||
1147 | pmu_event_set_period(event); | |
1148 | pmu_enable_counter(cci_pmu, idx); | |
1149 | ||
1150 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1151 | } | |
1152 | ||
1153 | static void cci_pmu_stop(struct perf_event *event, int pmu_flags) | |
1154 | { | |
1155 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1156 | struct hw_perf_event *hwc = &event->hw; | |
1157 | int idx = hwc->idx; | |
1158 | ||
1159 | if (hwc->state & PERF_HES_STOPPED) | |
1160 | return; | |
1161 | ||
1162 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1163 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1164 | return; | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | |
1169 | * cci_pmu_start() | |
1170 | */ | |
1171 | pmu_disable_counter(cci_pmu, idx); | |
1172 | pmu_event_update(event); | |
1173 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1174 | } | |
1175 | ||
1176 | static int cci_pmu_add(struct perf_event *event, int flags) | |
1177 | { | |
1178 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1179 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1180 | struct hw_perf_event *hwc = &event->hw; | |
1181 | int idx; | |
3de6be7a RM |
1182 | |
1183 | /* If we don't have a space for the counter then finish early. */ | |
1184 | idx = pmu_get_event_idx(hw_events, event); | |
28c01dc9 RM |
1185 | if (idx < 0) |
1186 | return idx; | |
3de6be7a RM |
1187 | |
1188 | event->hw.idx = idx; | |
1189 | hw_events->events[idx] = event; | |
1190 | ||
1191 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1192 | if (flags & PERF_EF_START) | |
1193 | cci_pmu_start(event, PERF_EF_RELOAD); | |
1194 | ||
1195 | /* Propagate our changes to the userspace mapping. */ | |
1196 | perf_event_update_userpage(event); | |
1197 | ||
28c01dc9 | 1198 | return 0; |
3de6be7a RM |
1199 | } |
1200 | ||
1201 | static void cci_pmu_del(struct perf_event *event, int flags) | |
1202 | { | |
1203 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1204 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1205 | struct hw_perf_event *hwc = &event->hw; | |
1206 | int idx = hwc->idx; | |
1207 | ||
1208 | cci_pmu_stop(event, PERF_EF_UPDATE); | |
1209 | hw_events->events[idx] = NULL; | |
1210 | clear_bit(idx, hw_events->used_mask); | |
1211 | ||
1212 | perf_event_update_userpage(event); | |
1213 | } | |
1214 | ||
1215 | static int validate_event(struct pmu *cci_pmu, | |
1216 | struct cci_pmu_hw_events *hw_events, | |
1217 | struct perf_event *event) | |
1218 | { | |
1219 | if (is_software_event(event)) | |
1220 | return 1; | |
1221 | ||
1222 | /* | |
1223 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
1224 | * core perf code won't check that the pmu->ctx == leader->ctx | |
1225 | * until after pmu->event_init(event). | |
1226 | */ | |
1227 | if (event->pmu != cci_pmu) | |
1228 | return 0; | |
1229 | ||
1230 | if (event->state < PERF_EVENT_STATE_OFF) | |
1231 | return 1; | |
1232 | ||
1233 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
1234 | return 1; | |
1235 | ||
1236 | return pmu_get_event_idx(hw_events, event) >= 0; | |
1237 | } | |
1238 | ||
1239 | static int validate_group(struct perf_event *event) | |
1240 | { | |
1241 | struct perf_event *sibling, *leader = event->group_leader; | |
1242 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1201a5a2 | 1243 | unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)]; |
3de6be7a RM |
1244 | struct cci_pmu_hw_events fake_pmu = { |
1245 | /* | |
1246 | * Initialise the fake PMU. We only need to populate the | |
1247 | * used_mask for the purposes of validation. | |
1248 | */ | |
1249 | .used_mask = mask, | |
1250 | }; | |
0e35850b | 1251 | bitmap_zero(mask, cci_pmu->num_cntrs); |
3de6be7a RM |
1252 | |
1253 | if (!validate_event(event->pmu, &fake_pmu, leader)) | |
1254 | return -EINVAL; | |
1255 | ||
38c23685 | 1256 | for_each_sibling_event(sibling, leader) { |
3de6be7a RM |
1257 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
1258 | return -EINVAL; | |
1259 | } | |
1260 | ||
1261 | if (!validate_event(event->pmu, &fake_pmu, event)) | |
1262 | return -EINVAL; | |
1263 | ||
1264 | return 0; | |
1265 | } | |
1266 | ||
1267 | static int __hw_perf_event_init(struct perf_event *event) | |
1268 | { | |
1269 | struct hw_perf_event *hwc = &event->hw; | |
1270 | int mapping; | |
1271 | ||
1272 | mapping = pmu_map_event(event); | |
1273 | ||
1274 | if (mapping < 0) { | |
1275 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
1276 | event->attr.config); | |
1277 | return mapping; | |
1278 | } | |
1279 | ||
1280 | /* | |
1281 | * We don't assign an index until we actually place the event onto | |
1282 | * hardware. Use -1 to signify that we haven't decided where to put it | |
1283 | * yet. | |
1284 | */ | |
1285 | hwc->idx = -1; | |
1286 | hwc->config_base = 0; | |
1287 | hwc->config = 0; | |
1288 | hwc->event_base = 0; | |
1289 | ||
1290 | /* | |
1291 | * Store the event encoding into the config_base field. | |
1292 | */ | |
1293 | hwc->config_base |= (unsigned long)mapping; | |
1294 | ||
3de6be7a RM |
1295 | if (event->group_leader != event) { |
1296 | if (validate_group(event) != 0) | |
1297 | return -EINVAL; | |
1298 | } | |
1299 | ||
1300 | return 0; | |
1301 | } | |
1302 | ||
1303 | static int cci_pmu_event_init(struct perf_event *event) | |
1304 | { | |
1305 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1306 | atomic_t *active_events = &cci_pmu->active_events; | |
1307 | int err = 0; | |
3de6be7a RM |
1308 | |
1309 | if (event->attr.type != event->pmu->type) | |
1310 | return -ENOENT; | |
1311 | ||
1312 | /* Shared by all CPUs, no meaningful state to sample */ | |
1313 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
1314 | return -EOPNOTSUPP; | |
1315 | ||
3de6be7a RM |
1316 | /* |
1317 | * Following the example set by other "uncore" PMUs, we accept any CPU | |
1318 | * and rewrite its affinity dynamically rather than having perf core | |
1319 | * handle cpu == -1 and pid == -1 for this case. | |
1320 | * | |
1321 | * The perf core will pin online CPUs for the duration of this call and | |
1322 | * the event being installed into its context, so the PMU's CPU can't | |
1323 | * change under our feet. | |
1324 | */ | |
03057f26 | 1325 | if (event->cpu < 0) |
3de6be7a | 1326 | return -EINVAL; |
03057f26 | 1327 | event->cpu = cci_pmu->cpu; |
3de6be7a RM |
1328 | |
1329 | event->destroy = hw_perf_event_destroy; | |
1330 | if (!atomic_inc_not_zero(active_events)) { | |
1331 | mutex_lock(&cci_pmu->reserve_mutex); | |
1332 | if (atomic_read(active_events) == 0) | |
1333 | err = cci_pmu_get_hw(cci_pmu); | |
1334 | if (!err) | |
1335 | atomic_inc(active_events); | |
1336 | mutex_unlock(&cci_pmu->reserve_mutex); | |
1337 | } | |
1338 | if (err) | |
1339 | return err; | |
1340 | ||
1341 | err = __hw_perf_event_init(event); | |
1342 | if (err) | |
1343 | hw_perf_event_destroy(event); | |
1344 | ||
1345 | return err; | |
1346 | } | |
1347 | ||
1348 | static ssize_t pmu_cpumask_attr_show(struct device *dev, | |
1349 | struct device_attribute *attr, char *buf) | |
1350 | { | |
1351 | struct pmu *pmu = dev_get_drvdata(dev); | |
1352 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1353 | ||
03057f26 | 1354 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); |
3de6be7a RM |
1355 | } |
1356 | ||
1357 | static struct device_attribute pmu_cpumask_attr = | |
1358 | __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); | |
1359 | ||
1360 | static struct attribute *pmu_attrs[] = { | |
1361 | &pmu_cpumask_attr.attr, | |
1362 | NULL, | |
1363 | }; | |
1364 | ||
f0c14048 | 1365 | static const struct attribute_group pmu_attr_group = { |
3de6be7a RM |
1366 | .attrs = pmu_attrs, |
1367 | }; | |
1368 | ||
1369 | static struct attribute_group pmu_format_attr_group = { | |
1370 | .name = "format", | |
1371 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1372 | }; | |
1373 | ||
1374 | static struct attribute_group pmu_event_attr_group = { | |
1375 | .name = "events", | |
1376 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1377 | }; | |
1378 | ||
1379 | static const struct attribute_group *pmu_attr_groups[] = { | |
1380 | &pmu_attr_group, | |
1381 | &pmu_format_attr_group, | |
1382 | &pmu_event_attr_group, | |
1383 | NULL | |
1384 | }; | |
1385 | ||
1386 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |
1387 | { | |
1388 | const struct cci_pmu_model *model = cci_pmu->model; | |
1389 | char *name = model->name; | |
1390 | u32 num_cntrs; | |
1391 | ||
1201a5a2 KC |
1392 | if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX)) |
1393 | return -EINVAL; | |
1394 | if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX)) | |
1395 | return -EINVAL; | |
1396 | ||
3de6be7a RM |
1397 | pmu_event_attr_group.attrs = model->event_attrs; |
1398 | pmu_format_attr_group.attrs = model->format_attrs; | |
1399 | ||
1400 | cci_pmu->pmu = (struct pmu) { | |
8b0c93c2 | 1401 | .module = THIS_MODULE, |
e7ec4791 | 1402 | .parent = &pdev->dev, |
3de6be7a RM |
1403 | .name = cci_pmu->model->name, |
1404 | .task_ctx_nr = perf_invalid_context, | |
1405 | .pmu_enable = cci_pmu_enable, | |
1406 | .pmu_disable = cci_pmu_disable, | |
1407 | .event_init = cci_pmu_event_init, | |
1408 | .add = cci_pmu_add, | |
1409 | .del = cci_pmu_del, | |
1410 | .start = cci_pmu_start, | |
1411 | .stop = cci_pmu_stop, | |
1412 | .read = pmu_read, | |
1413 | .attr_groups = pmu_attr_groups, | |
30656398 | 1414 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
3de6be7a RM |
1415 | }; |
1416 | ||
1417 | cci_pmu->plat_device = pdev; | |
e9c112c9 | 1418 | num_cntrs = pmu_get_max_counters(cci_pmu); |
3de6be7a RM |
1419 | if (num_cntrs > cci_pmu->model->num_hw_cntrs) { |
1420 | dev_warn(&pdev->dev, | |
1421 | "PMU implements more counters(%d) than supported by" | |
1422 | " the model(%d), truncated.", | |
1423 | num_cntrs, cci_pmu->model->num_hw_cntrs); | |
1424 | num_cntrs = cci_pmu->model->num_hw_cntrs; | |
1425 | } | |
1426 | cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; | |
1427 | ||
1428 | return perf_pmu_register(&cci_pmu->pmu, name, -1); | |
1429 | } | |
1430 | ||
03057f26 | 1431 | static int cci_pmu_offline_cpu(unsigned int cpu) |
3de6be7a | 1432 | { |
03057f26 | 1433 | int target; |
3de6be7a | 1434 | |
03057f26 | 1435 | if (!g_cci_pmu || cpu != g_cci_pmu->cpu) |
3de6be7a | 1436 | return 0; |
03057f26 | 1437 | |
3de6be7a RM |
1438 | target = cpumask_any_but(cpu_online_mask, cpu); |
1439 | if (target >= nr_cpu_ids) | |
1440 | return 0; | |
03057f26 RM |
1441 | |
1442 | perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); | |
1443 | g_cci_pmu->cpu = target; | |
3de6be7a RM |
1444 | return 0; |
1445 | } | |
1446 | ||
984e9cf1 | 1447 | static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { |
3de6be7a RM |
1448 | #ifdef CONFIG_ARM_CCI400_PMU |
1449 | [CCI400_R0] = { | |
1450 | .name = "CCI_400", | |
1201a5a2 KC |
1451 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ |
1452 | .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, | |
3de6be7a RM |
1453 | .cntr_size = SZ_4K, |
1454 | .format_attrs = cci400_pmu_format_attrs, | |
1455 | .event_attrs = cci400_r0_pmu_event_attrs, | |
1456 | .event_ranges = { | |
1457 | [CCI_IF_SLAVE] = { | |
1458 | CCI400_R0_SLAVE_PORT_MIN_EV, | |
1459 | CCI400_R0_SLAVE_PORT_MAX_EV, | |
1460 | }, | |
1461 | [CCI_IF_MASTER] = { | |
1462 | CCI400_R0_MASTER_PORT_MIN_EV, | |
1463 | CCI400_R0_MASTER_PORT_MAX_EV, | |
1464 | }, | |
1465 | }, | |
1466 | .validate_hw_event = cci400_validate_hw_event, | |
1467 | .get_event_idx = cci400_get_event_idx, | |
1468 | }, | |
1469 | [CCI400_R1] = { | |
1470 | .name = "CCI_400_r1", | |
1201a5a2 KC |
1471 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ |
1472 | .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, | |
3de6be7a RM |
1473 | .cntr_size = SZ_4K, |
1474 | .format_attrs = cci400_pmu_format_attrs, | |
1475 | .event_attrs = cci400_r1_pmu_event_attrs, | |
1476 | .event_ranges = { | |
1477 | [CCI_IF_SLAVE] = { | |
1478 | CCI400_R1_SLAVE_PORT_MIN_EV, | |
1479 | CCI400_R1_SLAVE_PORT_MAX_EV, | |
1480 | }, | |
1481 | [CCI_IF_MASTER] = { | |
1482 | CCI400_R1_MASTER_PORT_MIN_EV, | |
1483 | CCI400_R1_MASTER_PORT_MAX_EV, | |
1484 | }, | |
1485 | }, | |
1486 | .validate_hw_event = cci400_validate_hw_event, | |
1487 | .get_event_idx = cci400_get_event_idx, | |
1488 | }, | |
1489 | #endif | |
1490 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1491 | [CCI500_R0] = { | |
1492 | .name = "CCI_500", | |
1201a5a2 KC |
1493 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, |
1494 | .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, | |
3de6be7a RM |
1495 | .cntr_size = SZ_64K, |
1496 | .format_attrs = cci5xx_pmu_format_attrs, | |
1497 | .event_attrs = cci5xx_pmu_event_attrs, | |
1498 | .event_ranges = { | |
1499 | [CCI_IF_SLAVE] = { | |
1500 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1501 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1502 | }, | |
1503 | [CCI_IF_MASTER] = { | |
1504 | CCI5xx_MASTER_PORT_MIN_EV, | |
1505 | CCI5xx_MASTER_PORT_MAX_EV, | |
1506 | }, | |
1507 | [CCI_IF_GLOBAL] = { | |
1508 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1509 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1510 | }, | |
1511 | }, | |
1512 | .validate_hw_event = cci500_validate_hw_event, | |
1513 | .write_counters = cci5xx_pmu_write_counters, | |
1514 | }, | |
1515 | [CCI550_R0] = { | |
1516 | .name = "CCI_550", | |
1201a5a2 KC |
1517 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, |
1518 | .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, | |
3de6be7a RM |
1519 | .cntr_size = SZ_64K, |
1520 | .format_attrs = cci5xx_pmu_format_attrs, | |
1521 | .event_attrs = cci5xx_pmu_event_attrs, | |
1522 | .event_ranges = { | |
1523 | [CCI_IF_SLAVE] = { | |
1524 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1525 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1526 | }, | |
1527 | [CCI_IF_MASTER] = { | |
1528 | CCI5xx_MASTER_PORT_MIN_EV, | |
1529 | CCI5xx_MASTER_PORT_MAX_EV, | |
1530 | }, | |
1531 | [CCI_IF_GLOBAL] = { | |
1532 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1533 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1534 | }, | |
1535 | }, | |
1536 | .validate_hw_event = cci550_validate_hw_event, | |
1537 | .write_counters = cci5xx_pmu_write_counters, | |
1538 | }, | |
1539 | #endif | |
1540 | }; | |
1541 | ||
1542 | static const struct of_device_id arm_cci_pmu_matches[] = { | |
1543 | #ifdef CONFIG_ARM_CCI400_PMU | |
1544 | { | |
1545 | .compatible = "arm,cci-400-pmu", | |
1546 | .data = NULL, | |
1547 | }, | |
1548 | { | |
1549 | .compatible = "arm,cci-400-pmu,r0", | |
1550 | .data = &cci_pmu_models[CCI400_R0], | |
1551 | }, | |
1552 | { | |
1553 | .compatible = "arm,cci-400-pmu,r1", | |
1554 | .data = &cci_pmu_models[CCI400_R1], | |
1555 | }, | |
1556 | #endif | |
1557 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1558 | { | |
1559 | .compatible = "arm,cci-500-pmu,r0", | |
1560 | .data = &cci_pmu_models[CCI500_R0], | |
1561 | }, | |
1562 | { | |
1563 | .compatible = "arm,cci-550-pmu,r0", | |
1564 | .data = &cci_pmu_models[CCI550_R0], | |
1565 | }, | |
1566 | #endif | |
1567 | {}, | |
1568 | }; | |
8b0c93c2 | 1569 | MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches); |
3de6be7a | 1570 | |
3de6be7a RM |
1571 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) |
1572 | { | |
1573 | int i; | |
1574 | ||
1575 | for (i = 0; i < nr_irqs; i++) | |
1576 | if (irq == irqs[i]) | |
1577 | return true; | |
1578 | ||
1579 | return false; | |
1580 | } | |
1581 | ||
32837954 | 1582 | static struct cci_pmu *cci_pmu_alloc(struct device *dev) |
3de6be7a RM |
1583 | { |
1584 | struct cci_pmu *cci_pmu; | |
1585 | const struct cci_pmu_model *model; | |
1586 | ||
1587 | /* | |
1588 | * All allocations are devm_* hence we don't have to free | |
1589 | * them explicitly on an error, as it would end up in driver | |
1590 | * detach. | |
1591 | */ | |
e9c112c9 RM |
1592 | cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); |
1593 | if (!cci_pmu) | |
1594 | return ERR_PTR(-ENOMEM); | |
1595 | ||
1596 | cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; | |
1597 | ||
32837954 RM |
1598 | model = of_device_get_match_data(dev); |
1599 | if (!model) { | |
1600 | dev_warn(dev, | |
1601 | "DEPRECATED compatible property, requires secure access to CCI registers"); | |
e9c112c9 | 1602 | model = probe_cci_model(cci_pmu); |
32837954 | 1603 | } |
3de6be7a | 1604 | if (!model) { |
32837954 | 1605 | dev_warn(dev, "CCI PMU version not supported\n"); |
3de6be7a RM |
1606 | return ERR_PTR(-ENODEV); |
1607 | } | |
1608 | ||
3de6be7a | 1609 | cci_pmu->model = model; |
32837954 | 1610 | cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), |
3de6be7a RM |
1611 | sizeof(*cci_pmu->irqs), GFP_KERNEL); |
1612 | if (!cci_pmu->irqs) | |
1613 | return ERR_PTR(-ENOMEM); | |
32837954 | 1614 | cci_pmu->hw_events.events = devm_kcalloc(dev, |
3de6be7a RM |
1615 | CCI_PMU_MAX_HW_CNTRS(model), |
1616 | sizeof(*cci_pmu->hw_events.events), | |
1617 | GFP_KERNEL); | |
1618 | if (!cci_pmu->hw_events.events) | |
1619 | return ERR_PTR(-ENOMEM); | |
0e35850b CJ |
1620 | cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev, |
1621 | CCI_PMU_MAX_HW_CNTRS(model), | |
1622 | GFP_KERNEL); | |
3de6be7a RM |
1623 | if (!cci_pmu->hw_events.used_mask) |
1624 | return ERR_PTR(-ENOMEM); | |
1625 | ||
1626 | return cci_pmu; | |
1627 | } | |
1628 | ||
1629 | static int cci_pmu_probe(struct platform_device *pdev) | |
1630 | { | |
3de6be7a RM |
1631 | struct cci_pmu *cci_pmu; |
1632 | int i, ret, irq; | |
1633 | ||
32837954 | 1634 | cci_pmu = cci_pmu_alloc(&pdev->dev); |
3de6be7a RM |
1635 | if (IS_ERR(cci_pmu)) |
1636 | return PTR_ERR(cci_pmu); | |
1637 | ||
504db0f8 | 1638 | cci_pmu->base = devm_platform_ioremap_resource(pdev, 0); |
3de6be7a RM |
1639 | if (IS_ERR(cci_pmu->base)) |
1640 | return -ENOMEM; | |
1641 | ||
1642 | /* | |
1643 | * CCI PMU has one overflow interrupt per counter; but some may be tied | |
1644 | * together to a common interrupt. | |
1645 | */ | |
1646 | cci_pmu->nr_irqs = 0; | |
1647 | for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { | |
1648 | irq = platform_get_irq(pdev, i); | |
1649 | if (irq < 0) | |
1650 | break; | |
1651 | ||
1652 | if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) | |
1653 | continue; | |
1654 | ||
1655 | cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; | |
1656 | } | |
1657 | ||
1658 | /* | |
1659 | * Ensure that the device tree has as many interrupts as the number | |
1660 | * of counters. | |
1661 | */ | |
1662 | if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { | |
1663 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | |
1664 | i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); | |
1665 | return -EINVAL; | |
1666 | } | |
1667 | ||
1668 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); | |
1669 | mutex_init(&cci_pmu->reserve_mutex); | |
1670 | atomic_set(&cci_pmu->active_events, 0); | |
3de6be7a | 1671 | |
0d2e2a82 RM |
1672 | cci_pmu->cpu = raw_smp_processor_id(); |
1673 | g_cci_pmu = cci_pmu; | |
03057f26 RM |
1674 | cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, |
1675 | "perf/arm/cci:online", NULL, | |
1676 | cci_pmu_offline_cpu); | |
0d2e2a82 RM |
1677 | |
1678 | ret = cci_pmu_init(cci_pmu, pdev); | |
1679 | if (ret) | |
1680 | goto error_pmu_init; | |
1681 | ||
3de6be7a RM |
1682 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
1683 | return 0; | |
0d2e2a82 RM |
1684 | |
1685 | error_pmu_init: | |
1686 | cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); | |
1687 | g_cci_pmu = NULL; | |
1688 | return ret; | |
3de6be7a RM |
1689 | } |
1690 | ||
4df3bddf | 1691 | static void cci_pmu_remove(struct platform_device *pdev) |
8b0c93c2 RM |
1692 | { |
1693 | if (!g_cci_pmu) | |
4df3bddf | 1694 | return; |
8b0c93c2 RM |
1695 | |
1696 | cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); | |
1697 | perf_pmu_unregister(&g_cci_pmu->pmu); | |
1698 | g_cci_pmu = NULL; | |
8b0c93c2 RM |
1699 | } |
1700 | ||
3de6be7a RM |
1701 | static struct platform_driver cci_pmu_driver = { |
1702 | .driver = { | |
1703 | .name = DRIVER_NAME, | |
1704 | .of_match_table = arm_cci_pmu_matches, | |
f32ed8eb | 1705 | .suppress_bind_attrs = true, |
3de6be7a RM |
1706 | }, |
1707 | .probe = cci_pmu_probe, | |
4df3bddf | 1708 | .remove_new = cci_pmu_remove, |
3de6be7a RM |
1709 | }; |
1710 | ||
8b0c93c2 | 1711 | module_platform_driver(cci_pmu_driver); |
75dc3441 | 1712 | MODULE_LICENSE("GPL v2"); |
3de6be7a | 1713 | MODULE_DESCRIPTION("ARM CCI PMU support"); |