]>
Commit | Line | Data |
---|---|---|
1b8873a0 JI |
1 | #undef DEBUG |
2 | ||
3 | /* | |
4 | * ARM performance counter support. | |
5 | * | |
6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
43eab878 | 7 | * Copyright (C) 2010 ARM Ltd., Will Deacon <[email protected]> |
796d1295 | 8 | * |
1b8873a0 | 9 | * This code is based on the sparc64 perf event code, which is in turn based |
d39976f0 | 10 | * on the x86 code. |
1b8873a0 JI |
11 | */ |
12 | #define pr_fmt(fmt) "hw perfevents: " fmt | |
13 | ||
1b8873a0 | 14 | #include <linux/kernel.h> |
49c006b9 | 15 | #include <linux/platform_device.h> |
7be2958e | 16 | #include <linux/pm_runtime.h> |
bbd64559 SB |
17 | #include <linux/irq.h> |
18 | #include <linux/irqdesc.h> | |
1b8873a0 | 19 | |
1b8873a0 JI |
20 | #include <asm/irq_regs.h> |
21 | #include <asm/pmu.h> | |
1b8873a0 | 22 | |
1b8873a0 | 23 | static int |
e1f431b5 MR |
24 | armpmu_map_cache_event(const unsigned (*cache_map) |
25 | [PERF_COUNT_HW_CACHE_MAX] | |
26 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
27 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
28 | u64 config) | |
1b8873a0 JI |
29 | { |
30 | unsigned int cache_type, cache_op, cache_result, ret; | |
31 | ||
32 | cache_type = (config >> 0) & 0xff; | |
33 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
34 | return -EINVAL; | |
35 | ||
36 | cache_op = (config >> 8) & 0xff; | |
37 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
38 | return -EINVAL; | |
39 | ||
40 | cache_result = (config >> 16) & 0xff; | |
41 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
42 | return -EINVAL; | |
43 | ||
e1f431b5 | 44 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
1b8873a0 JI |
45 | |
46 | if (ret == CACHE_OP_UNSUPPORTED) | |
47 | return -ENOENT; | |
48 | ||
49 | return ret; | |
50 | } | |
51 | ||
84fee97a | 52 | static int |
6dbc0029 | 53 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
84fee97a | 54 | { |
d9f96635 SB |
55 | int mapping; |
56 | ||
57 | if (config >= PERF_COUNT_HW_MAX) | |
58 | return -EINVAL; | |
59 | ||
60 | mapping = (*event_map)[config]; | |
e1f431b5 | 61 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
84fee97a WD |
62 | } |
63 | ||
64 | static int | |
e1f431b5 | 65 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
84fee97a | 66 | { |
e1f431b5 MR |
67 | return (int)(config & raw_event_mask); |
68 | } | |
69 | ||
6dbc0029 WD |
70 | int |
71 | armpmu_map_event(struct perf_event *event, | |
72 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | |
73 | const unsigned (*cache_map) | |
74 | [PERF_COUNT_HW_CACHE_MAX] | |
75 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
76 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | |
77 | u32 raw_event_mask) | |
e1f431b5 MR |
78 | { |
79 | u64 config = event->attr.config; | |
67b4305a | 80 | int type = event->attr.type; |
e1f431b5 | 81 | |
67b4305a MR |
82 | if (type == event->pmu->type) |
83 | return armpmu_map_raw_event(raw_event_mask, config); | |
84 | ||
85 | switch (type) { | |
e1f431b5 | 86 | case PERF_TYPE_HARDWARE: |
6dbc0029 | 87 | return armpmu_map_hw_event(event_map, config); |
e1f431b5 MR |
88 | case PERF_TYPE_HW_CACHE: |
89 | return armpmu_map_cache_event(cache_map, config); | |
90 | case PERF_TYPE_RAW: | |
91 | return armpmu_map_raw_event(raw_event_mask, config); | |
92 | } | |
93 | ||
94 | return -ENOENT; | |
84fee97a WD |
95 | } |
96 | ||
ed6f2a52 | 97 | int armpmu_event_set_period(struct perf_event *event) |
1b8873a0 | 98 | { |
8a16b34e | 99 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 100 | struct hw_perf_event *hwc = &event->hw; |
e7850595 | 101 | s64 left = local64_read(&hwc->period_left); |
1b8873a0 JI |
102 | s64 period = hwc->sample_period; |
103 | int ret = 0; | |
104 | ||
105 | if (unlikely(left <= -period)) { | |
106 | left = period; | |
e7850595 | 107 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
108 | hwc->last_period = period; |
109 | ret = 1; | |
110 | } | |
111 | ||
112 | if (unlikely(left <= 0)) { | |
113 | left += period; | |
e7850595 | 114 | local64_set(&hwc->period_left, left); |
1b8873a0 JI |
115 | hwc->last_period = period; |
116 | ret = 1; | |
117 | } | |
118 | ||
119 | if (left > (s64)armpmu->max_period) | |
120 | left = armpmu->max_period; | |
121 | ||
e7850595 | 122 | local64_set(&hwc->prev_count, (u64)-left); |
1b8873a0 | 123 | |
ed6f2a52 | 124 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
1b8873a0 JI |
125 | |
126 | perf_event_update_userpage(event); | |
127 | ||
128 | return ret; | |
129 | } | |
130 | ||
ed6f2a52 | 131 | u64 armpmu_event_update(struct perf_event *event) |
1b8873a0 | 132 | { |
8a16b34e | 133 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
ed6f2a52 | 134 | struct hw_perf_event *hwc = &event->hw; |
a737823d | 135 | u64 delta, prev_raw_count, new_raw_count; |
1b8873a0 JI |
136 | |
137 | again: | |
e7850595 | 138 | prev_raw_count = local64_read(&hwc->prev_count); |
ed6f2a52 | 139 | new_raw_count = armpmu->read_counter(event); |
1b8873a0 | 140 | |
e7850595 | 141 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
1b8873a0 JI |
142 | new_raw_count) != prev_raw_count) |
143 | goto again; | |
144 | ||
57273471 | 145 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
1b8873a0 | 146 | |
e7850595 PZ |
147 | local64_add(delta, &event->count); |
148 | local64_sub(delta, &hwc->period_left); | |
1b8873a0 JI |
149 | |
150 | return new_raw_count; | |
151 | } | |
152 | ||
153 | static void | |
a4eaf7f1 | 154 | armpmu_read(struct perf_event *event) |
1b8873a0 | 155 | { |
ed6f2a52 | 156 | armpmu_event_update(event); |
1b8873a0 JI |
157 | } |
158 | ||
159 | static void | |
a4eaf7f1 | 160 | armpmu_stop(struct perf_event *event, int flags) |
1b8873a0 | 161 | { |
8a16b34e | 162 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
163 | struct hw_perf_event *hwc = &event->hw; |
164 | ||
a4eaf7f1 PZ |
165 | /* |
166 | * ARM pmu always has to update the counter, so ignore | |
167 | * PERF_EF_UPDATE, see comments in armpmu_start(). | |
168 | */ | |
169 | if (!(hwc->state & PERF_HES_STOPPED)) { | |
ed6f2a52 SH |
170 | armpmu->disable(event); |
171 | armpmu_event_update(event); | |
a4eaf7f1 PZ |
172 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
173 | } | |
1b8873a0 JI |
174 | } |
175 | ||
ed6f2a52 | 176 | static void armpmu_start(struct perf_event *event, int flags) |
1b8873a0 | 177 | { |
8a16b34e | 178 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 JI |
179 | struct hw_perf_event *hwc = &event->hw; |
180 | ||
a4eaf7f1 PZ |
181 | /* |
182 | * ARM pmu always has to reprogram the period, so ignore | |
183 | * PERF_EF_RELOAD, see the comment below. | |
184 | */ | |
185 | if (flags & PERF_EF_RELOAD) | |
186 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
187 | ||
188 | hwc->state = 0; | |
1b8873a0 JI |
189 | /* |
190 | * Set the period again. Some counters can't be stopped, so when we | |
a4eaf7f1 | 191 | * were stopped we simply disabled the IRQ source and the counter |
1b8873a0 JI |
192 | * may have been left counting. If we don't do this step then we may |
193 | * get an interrupt too soon or *way* too late if the overflow has | |
194 | * happened since disabling. | |
195 | */ | |
ed6f2a52 SH |
196 | armpmu_event_set_period(event); |
197 | armpmu->enable(event); | |
1b8873a0 JI |
198 | } |
199 | ||
a4eaf7f1 PZ |
200 | static void |
201 | armpmu_del(struct perf_event *event, int flags) | |
202 | { | |
8a16b34e | 203 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 204 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
a4eaf7f1 PZ |
205 | struct hw_perf_event *hwc = &event->hw; |
206 | int idx = hwc->idx; | |
207 | ||
a4eaf7f1 | 208 | armpmu_stop(event, PERF_EF_UPDATE); |
8be3f9a2 MR |
209 | hw_events->events[idx] = NULL; |
210 | clear_bit(idx, hw_events->used_mask); | |
eab443ef SB |
211 | if (armpmu->clear_event_idx) |
212 | armpmu->clear_event_idx(hw_events, event); | |
a4eaf7f1 PZ |
213 | |
214 | perf_event_update_userpage(event); | |
215 | } | |
216 | ||
1b8873a0 | 217 | static int |
a4eaf7f1 | 218 | armpmu_add(struct perf_event *event, int flags) |
1b8873a0 | 219 | { |
8a16b34e | 220 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
11679250 | 221 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
1b8873a0 JI |
222 | struct hw_perf_event *hwc = &event->hw; |
223 | int idx; | |
224 | int err = 0; | |
225 | ||
33696fc0 | 226 | perf_pmu_disable(event->pmu); |
24cd7f54 | 227 | |
1b8873a0 | 228 | /* If we don't have a space for the counter then finish early. */ |
ed6f2a52 | 229 | idx = armpmu->get_event_idx(hw_events, event); |
1b8873a0 JI |
230 | if (idx < 0) { |
231 | err = idx; | |
232 | goto out; | |
233 | } | |
234 | ||
235 | /* | |
236 | * If there is an event in the counter we are going to use then make | |
237 | * sure it is disabled. | |
238 | */ | |
239 | event->hw.idx = idx; | |
ed6f2a52 | 240 | armpmu->disable(event); |
8be3f9a2 | 241 | hw_events->events[idx] = event; |
1b8873a0 | 242 | |
a4eaf7f1 PZ |
243 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
244 | if (flags & PERF_EF_START) | |
245 | armpmu_start(event, PERF_EF_RELOAD); | |
1b8873a0 JI |
246 | |
247 | /* Propagate our changes to the userspace mapping. */ | |
248 | perf_event_update_userpage(event); | |
249 | ||
250 | out: | |
33696fc0 | 251 | perf_pmu_enable(event->pmu); |
1b8873a0 JI |
252 | return err; |
253 | } | |
254 | ||
1b8873a0 | 255 | static int |
8be3f9a2 | 256 | validate_event(struct pmu_hw_events *hw_events, |
1b8873a0 JI |
257 | struct perf_event *event) |
258 | { | |
8a16b34e | 259 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 260 | |
c95eb318 WD |
261 | if (is_software_event(event)) |
262 | return 1; | |
263 | ||
2dfcb802 | 264 | if (event->state < PERF_EVENT_STATE_OFF) |
cb2d8b34 WD |
265 | return 1; |
266 | ||
267 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
65b4711f | 268 | return 1; |
1b8873a0 | 269 | |
ed6f2a52 | 270 | return armpmu->get_event_idx(hw_events, event) >= 0; |
1b8873a0 JI |
271 | } |
272 | ||
273 | static int | |
274 | validate_group(struct perf_event *event) | |
275 | { | |
276 | struct perf_event *sibling, *leader = event->group_leader; | |
8be3f9a2 | 277 | struct pmu_hw_events fake_pmu; |
1b8873a0 | 278 | |
bce34d14 WD |
279 | /* |
280 | * Initialise the fake PMU. We only need to populate the | |
281 | * used_mask for the purposes of validation. | |
282 | */ | |
a4560846 | 283 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
1b8873a0 JI |
284 | |
285 | if (!validate_event(&fake_pmu, leader)) | |
aa2bc1ad | 286 | return -EINVAL; |
1b8873a0 JI |
287 | |
288 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | |
289 | if (!validate_event(&fake_pmu, sibling)) | |
aa2bc1ad | 290 | return -EINVAL; |
1b8873a0 JI |
291 | } |
292 | ||
293 | if (!validate_event(&fake_pmu, event)) | |
aa2bc1ad | 294 | return -EINVAL; |
1b8873a0 JI |
295 | |
296 | return 0; | |
297 | } | |
298 | ||
051f1b13 | 299 | static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) |
0e25a5c9 | 300 | { |
bbd64559 SB |
301 | struct arm_pmu *armpmu; |
302 | struct platform_device *plat_device; | |
303 | struct arm_pmu_platdata *plat; | |
5f5092e7 WD |
304 | int ret; |
305 | u64 start_clock, finish_clock; | |
bbd64559 | 306 | |
5ebd9200 MR |
307 | /* |
308 | * we request the IRQ with a (possibly percpu) struct arm_pmu**, but | |
309 | * the handlers expect a struct arm_pmu*. The percpu_irq framework will | |
310 | * do any necessary shifting, we just need to perform the first | |
311 | * dereference. | |
312 | */ | |
313 | armpmu = *(void **)dev; | |
bbd64559 SB |
314 | plat_device = armpmu->plat_device; |
315 | plat = dev_get_platdata(&plat_device->dev); | |
0e25a5c9 | 316 | |
5f5092e7 | 317 | start_clock = sched_clock(); |
051f1b13 | 318 | if (plat && plat->handle_irq) |
5ebd9200 | 319 | ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq); |
051f1b13 | 320 | else |
5ebd9200 | 321 | ret = armpmu->handle_irq(irq, armpmu); |
5f5092e7 WD |
322 | finish_clock = sched_clock(); |
323 | ||
324 | perf_sample_event_took(finish_clock - start_clock); | |
325 | return ret; | |
0e25a5c9 RV |
326 | } |
327 | ||
0b390e21 | 328 | static void |
8a16b34e | 329 | armpmu_release_hardware(struct arm_pmu *armpmu) |
0b390e21 | 330 | { |
ed6f2a52 | 331 | armpmu->free_irq(armpmu); |
051f1b13 | 332 | pm_runtime_put_sync(&armpmu->plat_device->dev); |
0b390e21 WD |
333 | } |
334 | ||
1b8873a0 | 335 | static int |
8a16b34e | 336 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
1b8873a0 | 337 | { |
051f1b13 | 338 | int err; |
a9356a04 | 339 | struct platform_device *pmu_device = armpmu->plat_device; |
1b8873a0 | 340 | |
e5a21327 WD |
341 | if (!pmu_device) |
342 | return -ENODEV; | |
343 | ||
7be2958e | 344 | pm_runtime_get_sync(&pmu_device->dev); |
ed6f2a52 | 345 | err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
051f1b13 SH |
346 | if (err) { |
347 | armpmu_release_hardware(armpmu); | |
348 | return err; | |
49c006b9 | 349 | } |
1b8873a0 | 350 | |
0b390e21 | 351 | return 0; |
1b8873a0 JI |
352 | } |
353 | ||
1b8873a0 JI |
354 | static void |
355 | hw_perf_event_destroy(struct perf_event *event) | |
356 | { | |
8a16b34e | 357 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
03b7898d MR |
358 | atomic_t *active_events = &armpmu->active_events; |
359 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | |
360 | ||
361 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { | |
8a16b34e | 362 | armpmu_release_hardware(armpmu); |
03b7898d | 363 | mutex_unlock(pmu_reserve_mutex); |
1b8873a0 JI |
364 | } |
365 | } | |
366 | ||
05d22fde WD |
367 | static int |
368 | event_requires_mode_exclusion(struct perf_event_attr *attr) | |
369 | { | |
370 | return attr->exclude_idle || attr->exclude_user || | |
371 | attr->exclude_kernel || attr->exclude_hv; | |
372 | } | |
373 | ||
1b8873a0 JI |
374 | static int |
375 | __hw_perf_event_init(struct perf_event *event) | |
376 | { | |
8a16b34e | 377 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 378 | struct hw_perf_event *hwc = &event->hw; |
9dcbf466 | 379 | int mapping; |
1b8873a0 | 380 | |
e1f431b5 | 381 | mapping = armpmu->map_event(event); |
1b8873a0 JI |
382 | |
383 | if (mapping < 0) { | |
384 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
385 | event->attr.config); | |
386 | return mapping; | |
387 | } | |
388 | ||
05d22fde WD |
389 | /* |
390 | * We don't assign an index until we actually place the event onto | |
391 | * hardware. Use -1 to signify that we haven't decided where to put it | |
392 | * yet. For SMP systems, each core has it's own PMU so we can't do any | |
393 | * clever allocation or constraints checking at this point. | |
394 | */ | |
395 | hwc->idx = -1; | |
396 | hwc->config_base = 0; | |
397 | hwc->config = 0; | |
398 | hwc->event_base = 0; | |
399 | ||
1b8873a0 JI |
400 | /* |
401 | * Check whether we need to exclude the counter from certain modes. | |
1b8873a0 | 402 | */ |
05d22fde WD |
403 | if ((!armpmu->set_event_filter || |
404 | armpmu->set_event_filter(hwc, &event->attr)) && | |
405 | event_requires_mode_exclusion(&event->attr)) { | |
1b8873a0 JI |
406 | pr_debug("ARM performance counters do not support " |
407 | "mode exclusion\n"); | |
fdeb8e35 | 408 | return -EOPNOTSUPP; |
1b8873a0 JI |
409 | } |
410 | ||
411 | /* | |
05d22fde | 412 | * Store the event encoding into the config_base field. |
1b8873a0 | 413 | */ |
05d22fde | 414 | hwc->config_base |= (unsigned long)mapping; |
1b8873a0 | 415 | |
edcb4d3c | 416 | if (!is_sampling_event(event)) { |
57273471 WD |
417 | /* |
418 | * For non-sampling runs, limit the sample_period to half | |
419 | * of the counter width. That way, the new counter value | |
420 | * is far less likely to overtake the previous one unless | |
421 | * you have some serious IRQ latency issues. | |
422 | */ | |
423 | hwc->sample_period = armpmu->max_period >> 1; | |
1b8873a0 | 424 | hwc->last_period = hwc->sample_period; |
e7850595 | 425 | local64_set(&hwc->period_left, hwc->sample_period); |
1b8873a0 JI |
426 | } |
427 | ||
1b8873a0 | 428 | if (event->group_leader != event) { |
e595ede6 | 429 | if (validate_group(event) != 0) |
1b8873a0 JI |
430 | return -EINVAL; |
431 | } | |
432 | ||
9dcbf466 | 433 | return 0; |
1b8873a0 JI |
434 | } |
435 | ||
b0a873eb | 436 | static int armpmu_event_init(struct perf_event *event) |
1b8873a0 | 437 | { |
8a16b34e | 438 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
1b8873a0 | 439 | int err = 0; |
03b7898d | 440 | atomic_t *active_events = &armpmu->active_events; |
1b8873a0 | 441 | |
2481c5fa SE |
442 | /* does not support taken branch sampling */ |
443 | if (has_branch_stack(event)) | |
444 | return -EOPNOTSUPP; | |
445 | ||
e1f431b5 | 446 | if (armpmu->map_event(event) == -ENOENT) |
b0a873eb | 447 | return -ENOENT; |
b0a873eb | 448 | |
1b8873a0 JI |
449 | event->destroy = hw_perf_event_destroy; |
450 | ||
03b7898d MR |
451 | if (!atomic_inc_not_zero(active_events)) { |
452 | mutex_lock(&armpmu->reserve_mutex); | |
453 | if (atomic_read(active_events) == 0) | |
8a16b34e | 454 | err = armpmu_reserve_hardware(armpmu); |
1b8873a0 JI |
455 | |
456 | if (!err) | |
03b7898d MR |
457 | atomic_inc(active_events); |
458 | mutex_unlock(&armpmu->reserve_mutex); | |
1b8873a0 JI |
459 | } |
460 | ||
461 | if (err) | |
b0a873eb | 462 | return err; |
1b8873a0 JI |
463 | |
464 | err = __hw_perf_event_init(event); | |
465 | if (err) | |
466 | hw_perf_event_destroy(event); | |
467 | ||
b0a873eb | 468 | return err; |
1b8873a0 JI |
469 | } |
470 | ||
a4eaf7f1 | 471 | static void armpmu_enable(struct pmu *pmu) |
1b8873a0 | 472 | { |
8be3f9a2 | 473 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
11679250 | 474 | struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events); |
7325eaec | 475 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
1b8873a0 | 476 | |
f4f38430 | 477 | if (enabled) |
ed6f2a52 | 478 | armpmu->start(armpmu); |
1b8873a0 JI |
479 | } |
480 | ||
a4eaf7f1 | 481 | static void armpmu_disable(struct pmu *pmu) |
1b8873a0 | 482 | { |
8a16b34e | 483 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
ed6f2a52 | 484 | armpmu->stop(armpmu); |
1b8873a0 JI |
485 | } |
486 | ||
bf7c5449 | 487 | #ifdef CONFIG_PM |
7be2958e JH |
488 | static int armpmu_runtime_resume(struct device *dev) |
489 | { | |
490 | struct arm_pmu_platdata *plat = dev_get_platdata(dev); | |
491 | ||
492 | if (plat && plat->runtime_resume) | |
493 | return plat->runtime_resume(dev); | |
494 | ||
495 | return 0; | |
496 | } | |
497 | ||
498 | static int armpmu_runtime_suspend(struct device *dev) | |
499 | { | |
500 | struct arm_pmu_platdata *plat = dev_get_platdata(dev); | |
501 | ||
502 | if (plat && plat->runtime_suspend) | |
503 | return plat->runtime_suspend(dev); | |
504 | ||
505 | return 0; | |
506 | } | |
507 | #endif | |
508 | ||
6dbc0029 WD |
509 | const struct dev_pm_ops armpmu_dev_pm_ops = { |
510 | SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) | |
511 | }; | |
512 | ||
44d6b1fc | 513 | static void armpmu_init(struct arm_pmu *armpmu) |
03b7898d MR |
514 | { |
515 | atomic_set(&armpmu->active_events, 0); | |
516 | mutex_init(&armpmu->reserve_mutex); | |
8a16b34e MR |
517 | |
518 | armpmu->pmu = (struct pmu) { | |
519 | .pmu_enable = armpmu_enable, | |
520 | .pmu_disable = armpmu_disable, | |
521 | .event_init = armpmu_event_init, | |
522 | .add = armpmu_add, | |
523 | .del = armpmu_del, | |
524 | .start = armpmu_start, | |
525 | .stop = armpmu_stop, | |
526 | .read = armpmu_read, | |
527 | }; | |
528 | } | |
529 | ||
0305230a | 530 | int armpmu_register(struct arm_pmu *armpmu, int type) |
8a16b34e MR |
531 | { |
532 | armpmu_init(armpmu); | |
2ac29a14 | 533 | pm_runtime_enable(&armpmu->plat_device->dev); |
04236f9f WD |
534 | pr_info("enabled with %s PMU driver, %d counters available\n", |
535 | armpmu->name, armpmu->num_events); | |
0305230a | 536 | return perf_pmu_register(&armpmu->pmu, armpmu->name, type); |
03b7898d MR |
537 | } |
538 |