]> Git Repo - linux.git/blob - drivers/perf/alibaba_uncore_drw_pmu.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / perf / alibaba_uncore_drw_pmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Alibaba DDR Sub-System Driveway PMU driver
4  *
5  * Copyright (C) 2022 Alibaba Inc
6  */
7
8 #define ALI_DRW_PMUNAME         "ali_drw"
9 #define ALI_DRW_DRVNAME         ALI_DRW_PMUNAME "_pmu"
10 #define pr_fmt(fmt)             ALI_DRW_DRVNAME ": " fmt
11
12 #include <linux/acpi.h>
13 #include <linux/bitfield.h>
14 #include <linux/bitmap.h>
15 #include <linux/bitops.h>
16 #include <linux/cpuhotplug.h>
17 #include <linux/cpumask.h>
18 #include <linux/device.h>
19 #include <linux/errno.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/perf_event.h>
27 #include <linux/platform_device.h>
28 #include <linux/printk.h>
29 #include <linux/rculist.h>
30 #include <linux/refcount.h>
31
32
33 #define ALI_DRW_PMU_COMMON_MAX_COUNTERS                 16
34 #define ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE        19
35
36 #define ALI_DRW_PMU_PA_SHIFT                    12
37 #define ALI_DRW_PMU_CNT_INIT                    0x00000000
38 #define ALI_DRW_CNT_MAX_PERIOD                  0xffffffff
39 #define ALI_DRW_PMU_CYCLE_EVT_ID                0x80
40
41 #define ALI_DRW_PMU_CNT_CTRL                    0xC00
42 #define ALI_DRW_PMU_CNT_RST                     BIT(2)
43 #define ALI_DRW_PMU_CNT_STOP                    BIT(1)
44 #define ALI_DRW_PMU_CNT_START                   BIT(0)
45
46 #define ALI_DRW_PMU_CNT_STATE                   0xC04
47 #define ALI_DRW_PMU_TEST_CTRL                   0xC08
48 #define ALI_DRW_PMU_CNT_PRELOAD                 0xC0C
49
50 #define ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK         GENMASK(23, 0)
51 #define ALI_DRW_PMU_CYCLE_CNT_LOW_MASK          GENMASK(31, 0)
52 #define ALI_DRW_PMU_CYCLE_CNT_HIGH              0xC10
53 #define ALI_DRW_PMU_CYCLE_CNT_LOW               0xC14
54
55 /* PMU EVENT SEL 0-3 are paired in 32-bit registers on a 4-byte stride */
56 #define ALI_DRW_PMU_EVENT_SEL0                  0xC68
57 /* counter 0-3 use sel0, counter 4-7 use sel1...*/
58 #define ALI_DRW_PMU_EVENT_SELn(n) \
59         (ALI_DRW_PMU_EVENT_SEL0 + (n / 4) * 0x4)
60 #define ALI_DRW_PMCOM_CNT_EN                    BIT(7)
61 #define ALI_DRW_PMCOM_CNT_EVENT_MASK            GENMASK(5, 0)
62 #define ALI_DRW_PMCOM_CNT_EVENT_OFFSET(n) \
63         (8 * (n % 4))
64
65 /* PMU COMMON COUNTER 0-15, are paired in 32-bit registers on a 4-byte stride */
66 #define ALI_DRW_PMU_COMMON_COUNTER0             0xC78
67 #define ALI_DRW_PMU_COMMON_COUNTERn(n) \
68         (ALI_DRW_PMU_COMMON_COUNTER0 + 0x4 * (n))
69
70 #define ALI_DRW_PMU_OV_INTR_ENABLE_CTL          0xCB8
71 #define ALI_DRW_PMU_OV_INTR_DISABLE_CTL         0xCBC
72 #define ALI_DRW_PMU_OV_INTR_ENABLE_STATUS       0xCC0
73 #define ALI_DRW_PMU_OV_INTR_CLR                 0xCC4
74 #define ALI_DRW_PMU_OV_INTR_STATUS              0xCC8
75 #define ALI_DRW_PMCOM_CNT_OV_INTR_MASK          GENMASK(23, 8)
76 #define ALI_DRW_PMBW_CNT_OV_INTR_MASK           GENMASK(7, 0)
77 #define ALI_DRW_PMU_OV_INTR_MASK                GENMASK_ULL(63, 0)
78
79 static int ali_drw_cpuhp_state_num;
80
81 static LIST_HEAD(ali_drw_pmu_irqs);
82 static DEFINE_MUTEX(ali_drw_pmu_irqs_lock);
83
84 struct ali_drw_pmu_irq {
85         struct hlist_node node;
86         struct list_head irqs_node;
87         struct list_head pmus_node;
88         int irq_num;
89         int cpu;
90         refcount_t refcount;
91 };
92
93 struct ali_drw_pmu {
94         void __iomem *cfg_base;
95         struct device *dev;
96
97         struct list_head pmus_node;
98         struct ali_drw_pmu_irq *irq;
99         int irq_num;
100         int cpu;
101         DECLARE_BITMAP(used_mask, ALI_DRW_PMU_COMMON_MAX_COUNTERS);
102         struct perf_event *events[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
103         int evtids[ALI_DRW_PMU_COMMON_MAX_COUNTERS];
104
105         struct pmu pmu;
106 };
107
108 #define to_ali_drw_pmu(p) (container_of(p, struct ali_drw_pmu, pmu))
109
110 #define DRW_CONFIG_EVENTID              GENMASK(7, 0)
111 #define GET_DRW_EVENTID(event)  FIELD_GET(DRW_CONFIG_EVENTID, (event)->attr.config)
112
113 static ssize_t ali_drw_pmu_format_show(struct device *dev,
114                                 struct device_attribute *attr, char *buf)
115 {
116         struct dev_ext_attribute *eattr;
117
118         eattr = container_of(attr, struct dev_ext_attribute, attr);
119
120         return sprintf(buf, "%s\n", (char *)eattr->var);
121 }
122
123 /*
124  * PMU event attributes
125  */
126 static ssize_t ali_drw_pmu_event_show(struct device *dev,
127                                struct device_attribute *attr, char *page)
128 {
129         struct dev_ext_attribute *eattr;
130
131         eattr = container_of(attr, struct dev_ext_attribute, attr);
132
133         return sprintf(page, "config=0x%lx\n", (unsigned long)eattr->var);
134 }
135
136 #define ALI_DRW_PMU_ATTR(_name, _func, _config)                            \
137                 (&((struct dev_ext_attribute[]) {                               \
138                                 { __ATTR(_name, 0444, _func, NULL), (void *)_config }   \
139                 })[0].attr.attr)
140
141 #define ALI_DRW_PMU_FORMAT_ATTR(_name, _config)            \
142         ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_format_show, (void *)_config)
143 #define ALI_DRW_PMU_EVENT_ATTR(_name, _config)             \
144         ALI_DRW_PMU_ATTR(_name, ali_drw_pmu_event_show, (unsigned long)_config)
145
146 static struct attribute *ali_drw_pmu_events_attrs[] = {
147         ALI_DRW_PMU_EVENT_ATTR(hif_rd_or_wr,                    0x0),
148         ALI_DRW_PMU_EVENT_ATTR(hif_wr,                          0x1),
149         ALI_DRW_PMU_EVENT_ATTR(hif_rd,                          0x2),
150         ALI_DRW_PMU_EVENT_ATTR(hif_rmw,                         0x3),
151         ALI_DRW_PMU_EVENT_ATTR(hif_hi_pri_rd,                   0x4),
152         ALI_DRW_PMU_EVENT_ATTR(dfi_wr_data_cycles,              0x7),
153         ALI_DRW_PMU_EVENT_ATTR(dfi_rd_data_cycles,              0x8),
154         ALI_DRW_PMU_EVENT_ATTR(hpr_xact_when_critical,          0x9),
155         ALI_DRW_PMU_EVENT_ATTR(lpr_xact_when_critical,          0xA),
156         ALI_DRW_PMU_EVENT_ATTR(wr_xact_when_critical,           0xB),
157         ALI_DRW_PMU_EVENT_ATTR(op_is_activate,                  0xC),
158         ALI_DRW_PMU_EVENT_ATTR(op_is_rd_or_wr,                  0xD),
159         ALI_DRW_PMU_EVENT_ATTR(op_is_rd_activate,               0xE),
160         ALI_DRW_PMU_EVENT_ATTR(op_is_rd,                        0xF),
161         ALI_DRW_PMU_EVENT_ATTR(op_is_wr,                        0x10),
162         ALI_DRW_PMU_EVENT_ATTR(op_is_mwr,                       0x11),
163         ALI_DRW_PMU_EVENT_ATTR(op_is_precharge,                 0x12),
164         ALI_DRW_PMU_EVENT_ATTR(precharge_for_rdwr,              0x13),
165         ALI_DRW_PMU_EVENT_ATTR(precharge_for_other,             0x14),
166         ALI_DRW_PMU_EVENT_ATTR(rdwr_transitions,                0x15),
167         ALI_DRW_PMU_EVENT_ATTR(write_combine,                   0x16),
168         ALI_DRW_PMU_EVENT_ATTR(war_hazard,                      0x17),
169         ALI_DRW_PMU_EVENT_ATTR(raw_hazard,                      0x18),
170         ALI_DRW_PMU_EVENT_ATTR(waw_hazard,                      0x19),
171         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk0,         0x1A),
172         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk1,         0x1B),
173         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk2,         0x1C),
174         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_selfref_rk3,         0x1D),
175         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk0,       0x1E),
176         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk1,       0x1F),
177         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk2,       0x20),
178         ALI_DRW_PMU_EVENT_ATTR(op_is_enter_powerdown_rk3,       0x21),
179         ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk0,                0x26),
180         ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk1,                0x27),
181         ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk2,                0x28),
182         ALI_DRW_PMU_EVENT_ATTR(selfref_mode_rk3,                0x29),
183         ALI_DRW_PMU_EVENT_ATTR(op_is_refresh,                   0x2A),
184         ALI_DRW_PMU_EVENT_ATTR(op_is_crit_ref,                  0x2B),
185         ALI_DRW_PMU_EVENT_ATTR(op_is_load_mode,                 0x2D),
186         ALI_DRW_PMU_EVENT_ATTR(op_is_zqcl,                      0x2E),
187         ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_rd, 0x30),
188         ALI_DRW_PMU_EVENT_ATTR(visible_window_limit_reached_wr, 0x31),
189         ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mpc,                0x34),
190         ALI_DRW_PMU_EVENT_ATTR(op_is_dqsosc_mrr,                0x35),
191         ALI_DRW_PMU_EVENT_ATTR(op_is_tcr_mrr,                   0x36),
192         ALI_DRW_PMU_EVENT_ATTR(op_is_zqstart,                   0x37),
193         ALI_DRW_PMU_EVENT_ATTR(op_is_zqlatch,                   0x38),
194         ALI_DRW_PMU_EVENT_ATTR(chi_txreq,                       0x39),
195         ALI_DRW_PMU_EVENT_ATTR(chi_txdat,                       0x3A),
196         ALI_DRW_PMU_EVENT_ATTR(chi_rxdat,                       0x3B),
197         ALI_DRW_PMU_EVENT_ATTR(chi_rxrsp,                       0x3C),
198         ALI_DRW_PMU_EVENT_ATTR(tsz_vio,                         0x3D),
199         ALI_DRW_PMU_EVENT_ATTR(cycle,                           0x80),
200         NULL,
201 };
202
203 static struct attribute_group ali_drw_pmu_events_attr_group = {
204         .name = "events",
205         .attrs = ali_drw_pmu_events_attrs,
206 };
207
208 static struct attribute *ali_drw_pmu_format_attr[] = {
209         ALI_DRW_PMU_FORMAT_ATTR(event, "config:0-7"),
210         NULL,
211 };
212
213 static const struct attribute_group ali_drw_pmu_format_group = {
214         .name = "format",
215         .attrs = ali_drw_pmu_format_attr,
216 };
217
218 static ssize_t ali_drw_pmu_cpumask_show(struct device *dev,
219                                         struct device_attribute *attr,
220                                         char *buf)
221 {
222         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(dev_get_drvdata(dev));
223
224         return cpumap_print_to_pagebuf(true, buf, cpumask_of(drw_pmu->cpu));
225 }
226
227 static struct device_attribute ali_drw_pmu_cpumask_attr =
228                 __ATTR(cpumask, 0444, ali_drw_pmu_cpumask_show, NULL);
229
230 static struct attribute *ali_drw_pmu_cpumask_attrs[] = {
231         &ali_drw_pmu_cpumask_attr.attr,
232         NULL,
233 };
234
235 static const struct attribute_group ali_drw_pmu_cpumask_attr_group = {
236         .attrs = ali_drw_pmu_cpumask_attrs,
237 };
238
239 static umode_t ali_drw_pmu_identifier_attr_visible(struct kobject *kobj,
240                                                 struct attribute *attr, int n)
241 {
242         return attr->mode;
243 }
244
245 static DEVICE_STRING_ATTR_RO(ali_drw_pmu_identifier, 0444, "ali_drw_pmu");
246
247 static struct attribute *ali_drw_pmu_identifier_attrs[] = {
248         &dev_attr_ali_drw_pmu_identifier.attr.attr,
249         NULL
250 };
251
252 static const struct attribute_group ali_drw_pmu_identifier_attr_group = {
253         .attrs = ali_drw_pmu_identifier_attrs,
254         .is_visible = ali_drw_pmu_identifier_attr_visible
255 };
256
257 static const struct attribute_group *ali_drw_pmu_attr_groups[] = {
258         &ali_drw_pmu_events_attr_group,
259         &ali_drw_pmu_cpumask_attr_group,
260         &ali_drw_pmu_format_group,
261         &ali_drw_pmu_identifier_attr_group,
262         NULL,
263 };
264
265 /* find a counter for event, then in add func, hw.idx will equal to counter */
266 static int ali_drw_get_counter_idx(struct perf_event *event)
267 {
268         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
269         int idx;
270
271         for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; ++idx) {
272                 if (!test_and_set_bit(idx, drw_pmu->used_mask))
273                         return idx;
274         }
275
276         /* The counters are all in use. */
277         return -EBUSY;
278 }
279
280 static u64 ali_drw_pmu_read_counter(struct perf_event *event)
281 {
282         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
283         u64 cycle_high, cycle_low;
284
285         if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
286                 cycle_high = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_HIGH);
287                 cycle_high &= ALI_DRW_PMU_CYCLE_CNT_HIGH_MASK;
288                 cycle_low = readl(drw_pmu->cfg_base + ALI_DRW_PMU_CYCLE_CNT_LOW);
289                 cycle_low &= ALI_DRW_PMU_CYCLE_CNT_LOW_MASK;
290                 return (cycle_high << 32 | cycle_low);
291         }
292
293         return readl(drw_pmu->cfg_base +
294                      ALI_DRW_PMU_COMMON_COUNTERn(event->hw.idx));
295 }
296
297 static void ali_drw_pmu_event_update(struct perf_event *event)
298 {
299         struct hw_perf_event *hwc = &event->hw;
300         u64 delta, prev, now;
301
302         do {
303                 prev = local64_read(&hwc->prev_count);
304                 now = ali_drw_pmu_read_counter(event);
305         } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
306
307         /* handle overflow. */
308         delta = now - prev;
309         if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID)
310                 delta &= ALI_DRW_PMU_OV_INTR_MASK;
311         else
312                 delta &= ALI_DRW_CNT_MAX_PERIOD;
313         local64_add(delta, &event->count);
314 }
315
316 static void ali_drw_pmu_event_set_period(struct perf_event *event)
317 {
318         u64 pre_val;
319         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
320
321         /* set a preload counter for test purpose */
322         writel(ALI_DRW_PMU_TEST_SEL_COMMON_COUNTER_BASE + event->hw.idx,
323                drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
324
325         /* set conunter initial value */
326         pre_val = ALI_DRW_PMU_CNT_INIT;
327         writel(pre_val, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
328         local64_set(&event->hw.prev_count, pre_val);
329
330         /* set sel mode to zero to start test */
331         writel(0x0, drw_pmu->cfg_base + ALI_DRW_PMU_TEST_CTRL);
332 }
333
334 static void ali_drw_pmu_enable_counter(struct perf_event *event)
335 {
336         u32 val, subval, reg, shift;
337         int counter = event->hw.idx;
338         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
339
340         reg = ALI_DRW_PMU_EVENT_SELn(counter);
341         val = readl(drw_pmu->cfg_base + reg);
342         subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 1) |
343                  FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, drw_pmu->evtids[counter]);
344
345         shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
346         val &= ~(GENMASK(7, 0) << shift);
347         val |= subval << shift;
348
349         writel(val, drw_pmu->cfg_base + reg);
350 }
351
352 static void ali_drw_pmu_disable_counter(struct perf_event *event)
353 {
354         u32 val, reg, subval, shift;
355         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
356         int counter = event->hw.idx;
357
358         reg = ALI_DRW_PMU_EVENT_SELn(counter);
359         val = readl(drw_pmu->cfg_base + reg);
360         subval = FIELD_PREP(ALI_DRW_PMCOM_CNT_EN, 0) |
361                  FIELD_PREP(ALI_DRW_PMCOM_CNT_EVENT_MASK, 0);
362
363         shift = ALI_DRW_PMCOM_CNT_EVENT_OFFSET(counter);
364         val &= ~(GENMASK(7, 0) << shift);
365         val |= subval << shift;
366
367         writel(val, drw_pmu->cfg_base + reg);
368 }
369
370 static irqreturn_t ali_drw_pmu_isr(int irq_num, void *data)
371 {
372         struct ali_drw_pmu_irq *irq = data;
373         struct ali_drw_pmu *drw_pmu;
374         irqreturn_t ret = IRQ_NONE;
375
376         rcu_read_lock();
377         list_for_each_entry_rcu(drw_pmu, &irq->pmus_node, pmus_node) {
378                 unsigned long status, clr_status;
379                 struct perf_event *event;
380                 unsigned int idx;
381
382                 for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
383                         event = drw_pmu->events[idx];
384                         if (!event)
385                                 continue;
386                         ali_drw_pmu_disable_counter(event);
387                 }
388
389                 /* common counter intr status */
390                 status = readl(drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_STATUS);
391                 status = FIELD_GET(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
392                 if (status) {
393                         for_each_set_bit(idx, &status,
394                                          ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
395                                 event = drw_pmu->events[idx];
396                                 if (WARN_ON_ONCE(!event))
397                                         continue;
398                                 ali_drw_pmu_event_update(event);
399                                 ali_drw_pmu_event_set_period(event);
400                         }
401
402                         /* clear common counter intr status */
403                         clr_status = FIELD_PREP(ALI_DRW_PMCOM_CNT_OV_INTR_MASK, status);
404                         writel(clr_status,
405                                drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
406                 }
407
408                 for (idx = 0; idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS; idx++) {
409                         event = drw_pmu->events[idx];
410                         if (!event)
411                                 continue;
412                         if (!(event->hw.state & PERF_HES_STOPPED))
413                                 ali_drw_pmu_enable_counter(event);
414                 }
415                 if (status)
416                         ret = IRQ_HANDLED;
417         }
418         rcu_read_unlock();
419         return ret;
420 }
421
422 static struct ali_drw_pmu_irq *__ali_drw_pmu_init_irq(struct platform_device
423                                                       *pdev, int irq_num)
424 {
425         int ret;
426         struct ali_drw_pmu_irq *irq;
427
428         list_for_each_entry(irq, &ali_drw_pmu_irqs, irqs_node) {
429                 if (irq->irq_num == irq_num
430                     && refcount_inc_not_zero(&irq->refcount))
431                         return irq;
432         }
433
434         irq = kzalloc(sizeof(*irq), GFP_KERNEL);
435         if (!irq)
436                 return ERR_PTR(-ENOMEM);
437
438         INIT_LIST_HEAD(&irq->pmus_node);
439
440         /* Pick one CPU to be the preferred one to use */
441         irq->cpu = smp_processor_id();
442         refcount_set(&irq->refcount, 1);
443
444         /*
445          * FIXME: one of DDRSS Driveway PMU overflow interrupt shares the same
446          * irq number with MPAM ERR_IRQ. To register DDRSS PMU and MPAM drivers
447          * successfully, add IRQF_SHARED flag. Howerer, PMU interrupt should not
448          * share with other component.
449          */
450         ret = devm_request_irq(&pdev->dev, irq_num, ali_drw_pmu_isr,
451                                IRQF_SHARED, dev_name(&pdev->dev), irq);
452         if (ret < 0) {
453                 dev_err(&pdev->dev,
454                         "Fail to request IRQ:%d ret:%d\n", irq_num, ret);
455                 goto out_free;
456         }
457
458         ret = irq_set_affinity_hint(irq_num, cpumask_of(irq->cpu));
459         if (ret)
460                 goto out_free;
461
462         ret = cpuhp_state_add_instance_nocalls(ali_drw_cpuhp_state_num,
463                                              &irq->node);
464         if (ret)
465                 goto out_free;
466
467         irq->irq_num = irq_num;
468         list_add(&irq->irqs_node, &ali_drw_pmu_irqs);
469
470         return irq;
471
472 out_free:
473         kfree(irq);
474         return ERR_PTR(ret);
475 }
476
477 static int ali_drw_pmu_init_irq(struct ali_drw_pmu *drw_pmu,
478                                 struct platform_device *pdev)
479 {
480         int irq_num;
481         struct ali_drw_pmu_irq *irq;
482
483         /* Read and init IRQ */
484         irq_num = platform_get_irq(pdev, 0);
485         if (irq_num < 0)
486                 return irq_num;
487
488         mutex_lock(&ali_drw_pmu_irqs_lock);
489         irq = __ali_drw_pmu_init_irq(pdev, irq_num);
490         mutex_unlock(&ali_drw_pmu_irqs_lock);
491
492         if (IS_ERR(irq))
493                 return PTR_ERR(irq);
494
495         drw_pmu->irq = irq;
496
497         mutex_lock(&ali_drw_pmu_irqs_lock);
498         list_add_rcu(&drw_pmu->pmus_node, &irq->pmus_node);
499         mutex_unlock(&ali_drw_pmu_irqs_lock);
500
501         return 0;
502 }
503
504 static void ali_drw_pmu_uninit_irq(struct ali_drw_pmu *drw_pmu)
505 {
506         struct ali_drw_pmu_irq *irq = drw_pmu->irq;
507
508         mutex_lock(&ali_drw_pmu_irqs_lock);
509         list_del_rcu(&drw_pmu->pmus_node);
510
511         if (!refcount_dec_and_test(&irq->refcount)) {
512                 mutex_unlock(&ali_drw_pmu_irqs_lock);
513                 return;
514         }
515
516         list_del(&irq->irqs_node);
517         mutex_unlock(&ali_drw_pmu_irqs_lock);
518
519         WARN_ON(irq_set_affinity_hint(irq->irq_num, NULL));
520         cpuhp_state_remove_instance_nocalls(ali_drw_cpuhp_state_num,
521                                             &irq->node);
522         kfree(irq);
523 }
524
525 static int ali_drw_pmu_event_init(struct perf_event *event)
526 {
527         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
528         struct hw_perf_event *hwc = &event->hw;
529         struct perf_event *sibling;
530         struct device *dev = drw_pmu->pmu.dev;
531
532         if (event->attr.type != event->pmu->type)
533                 return -ENOENT;
534
535         if (is_sampling_event(event)) {
536                 dev_err(dev, "Sampling not supported!\n");
537                 return -EOPNOTSUPP;
538         }
539
540         if (event->attach_state & PERF_ATTACH_TASK) {
541                 dev_err(dev, "Per-task counter cannot allocate!\n");
542                 return -EOPNOTSUPP;
543         }
544
545         event->cpu = drw_pmu->cpu;
546         if (event->cpu < 0) {
547                 dev_err(dev, "Per-task mode not supported!\n");
548                 return -EOPNOTSUPP;
549         }
550
551         if (event->group_leader != event &&
552             !is_software_event(event->group_leader)) {
553                 dev_err(dev, "driveway only allow one event!\n");
554                 return -EINVAL;
555         }
556
557         for_each_sibling_event(sibling, event->group_leader) {
558                 if (sibling != event && !is_software_event(sibling)) {
559                         dev_err(dev, "driveway event not allowed!\n");
560                         return -EINVAL;
561                 }
562         }
563
564         /* reset all the pmu counters */
565         writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
566
567         hwc->idx = -1;
568
569         return 0;
570 }
571
572 static void ali_drw_pmu_start(struct perf_event *event, int flags)
573 {
574         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
575
576         event->hw.state = 0;
577
578         if (GET_DRW_EVENTID(event) == ALI_DRW_PMU_CYCLE_EVT_ID) {
579                 writel(ALI_DRW_PMU_CNT_START,
580                        drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
581                 return;
582         }
583
584         ali_drw_pmu_event_set_period(event);
585         if (flags & PERF_EF_RELOAD) {
586                 unsigned long prev_raw_count =
587                     local64_read(&event->hw.prev_count);
588                 writel(prev_raw_count,
589                        drw_pmu->cfg_base + ALI_DRW_PMU_CNT_PRELOAD);
590         }
591
592         ali_drw_pmu_enable_counter(event);
593
594         writel(ALI_DRW_PMU_CNT_START, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
595 }
596
597 static void ali_drw_pmu_stop(struct perf_event *event, int flags)
598 {
599         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
600
601         if (event->hw.state & PERF_HES_STOPPED)
602                 return;
603
604         if (GET_DRW_EVENTID(event) != ALI_DRW_PMU_CYCLE_EVT_ID)
605                 ali_drw_pmu_disable_counter(event);
606
607         writel(ALI_DRW_PMU_CNT_STOP, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
608
609         ali_drw_pmu_event_update(event);
610         event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
611 }
612
613 static int ali_drw_pmu_add(struct perf_event *event, int flags)
614 {
615         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
616         struct hw_perf_event *hwc = &event->hw;
617         int idx = -1;
618         int evtid;
619
620         evtid = GET_DRW_EVENTID(event);
621
622         if (evtid != ALI_DRW_PMU_CYCLE_EVT_ID) {
623                 idx = ali_drw_get_counter_idx(event);
624                 if (idx < 0)
625                         return idx;
626                 drw_pmu->events[idx] = event;
627                 drw_pmu->evtids[idx] = evtid;
628         }
629         hwc->idx = idx;
630
631         hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
632
633         if (flags & PERF_EF_START)
634                 ali_drw_pmu_start(event, PERF_EF_RELOAD);
635
636         /* Propagate our changes to the userspace mapping. */
637         perf_event_update_userpage(event);
638
639         return 0;
640 }
641
642 static void ali_drw_pmu_del(struct perf_event *event, int flags)
643 {
644         struct ali_drw_pmu *drw_pmu = to_ali_drw_pmu(event->pmu);
645         struct hw_perf_event *hwc = &event->hw;
646         int idx = hwc->idx;
647
648         ali_drw_pmu_stop(event, PERF_EF_UPDATE);
649
650         if (idx >= 0 && idx < ALI_DRW_PMU_COMMON_MAX_COUNTERS) {
651                 drw_pmu->events[idx] = NULL;
652                 drw_pmu->evtids[idx] = 0;
653                 clear_bit(idx, drw_pmu->used_mask);
654         }
655
656         perf_event_update_userpage(event);
657 }
658
659 static void ali_drw_pmu_read(struct perf_event *event)
660 {
661         ali_drw_pmu_event_update(event);
662 }
663
664 static int ali_drw_pmu_probe(struct platform_device *pdev)
665 {
666         struct ali_drw_pmu *drw_pmu;
667         struct resource *res;
668         char *name;
669         int ret;
670
671         drw_pmu = devm_kzalloc(&pdev->dev, sizeof(*drw_pmu), GFP_KERNEL);
672         if (!drw_pmu)
673                 return -ENOMEM;
674
675         drw_pmu->dev = &pdev->dev;
676         platform_set_drvdata(pdev, drw_pmu);
677
678         drw_pmu->cfg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
679         if (IS_ERR(drw_pmu->cfg_base))
680                 return PTR_ERR(drw_pmu->cfg_base);
681
682         name = devm_kasprintf(drw_pmu->dev, GFP_KERNEL, "ali_drw_%llx",
683                               (u64) (res->start >> ALI_DRW_PMU_PA_SHIFT));
684         if (!name)
685                 return -ENOMEM;
686
687         writel(ALI_DRW_PMU_CNT_RST, drw_pmu->cfg_base + ALI_DRW_PMU_CNT_CTRL);
688
689         /* enable the generation of interrupt by all common counters */
690         writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
691                drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_ENABLE_CTL);
692
693         /* clearing interrupt status */
694         writel(0xffffff, drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_CLR);
695
696         drw_pmu->cpu = smp_processor_id();
697
698         ret = ali_drw_pmu_init_irq(drw_pmu, pdev);
699         if (ret)
700                 return ret;
701
702         drw_pmu->pmu = (struct pmu) {
703                 .module         = THIS_MODULE,
704                 .parent         = &pdev->dev,
705                 .task_ctx_nr    = perf_invalid_context,
706                 .event_init     = ali_drw_pmu_event_init,
707                 .add            = ali_drw_pmu_add,
708                 .del            = ali_drw_pmu_del,
709                 .start          = ali_drw_pmu_start,
710                 .stop           = ali_drw_pmu_stop,
711                 .read           = ali_drw_pmu_read,
712                 .attr_groups    = ali_drw_pmu_attr_groups,
713                 .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
714         };
715
716         ret = perf_pmu_register(&drw_pmu->pmu, name, -1);
717         if (ret) {
718                 dev_err(drw_pmu->dev, "DRW Driveway PMU PMU register failed!\n");
719                 ali_drw_pmu_uninit_irq(drw_pmu);
720         }
721
722         return ret;
723 }
724
725 static void ali_drw_pmu_remove(struct platform_device *pdev)
726 {
727         struct ali_drw_pmu *drw_pmu = platform_get_drvdata(pdev);
728
729         /* disable the generation of interrupt by all common counters */
730         writel(ALI_DRW_PMCOM_CNT_OV_INTR_MASK,
731                drw_pmu->cfg_base + ALI_DRW_PMU_OV_INTR_DISABLE_CTL);
732
733         ali_drw_pmu_uninit_irq(drw_pmu);
734         perf_pmu_unregister(&drw_pmu->pmu);
735 }
736
737 static int ali_drw_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
738 {
739         struct ali_drw_pmu_irq *irq;
740         struct ali_drw_pmu *drw_pmu;
741         unsigned int target;
742
743         irq = hlist_entry_safe(node, struct ali_drw_pmu_irq, node);
744         if (cpu != irq->cpu)
745                 return 0;
746
747         target = cpumask_any_and_but(cpumask_of_node(cpu_to_node(cpu)),
748                                      cpu_online_mask, cpu);
749         if (target >= nr_cpu_ids)
750                 target = cpumask_any_but(cpu_online_mask, cpu);
751
752         if (target >= nr_cpu_ids)
753                 return 0;
754
755         /* We're only reading, but this isn't the place to be involving RCU */
756         mutex_lock(&ali_drw_pmu_irqs_lock);
757         list_for_each_entry(drw_pmu, &irq->pmus_node, pmus_node)
758                 perf_pmu_migrate_context(&drw_pmu->pmu, irq->cpu, target);
759         mutex_unlock(&ali_drw_pmu_irqs_lock);
760
761         WARN_ON(irq_set_affinity_hint(irq->irq_num, cpumask_of(target)));
762         irq->cpu = target;
763
764         return 0;
765 }
766
767 /*
768  * Due to historical reasons, the HID used in the production environment is
769  * ARMHD700, so we leave ARMHD700 as Compatible ID.
770  */
771 static const struct acpi_device_id ali_drw_acpi_match[] = {
772         {"BABA5000", 0},
773         {"ARMHD700", 0},
774         {}
775 };
776
777 MODULE_DEVICE_TABLE(acpi, ali_drw_acpi_match);
778
779 static struct platform_driver ali_drw_pmu_driver = {
780         .driver = {
781                    .name = "ali_drw_pmu",
782                    .acpi_match_table = ali_drw_acpi_match,
783                    },
784         .probe = ali_drw_pmu_probe,
785         .remove_new = ali_drw_pmu_remove,
786 };
787
788 static int __init ali_drw_pmu_init(void)
789 {
790         int ret;
791
792         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
793                                       "ali_drw_pmu:online",
794                                       NULL, ali_drw_pmu_offline_cpu);
795
796         if (ret < 0) {
797                 pr_err("DRW Driveway PMU: setup hotplug failed, ret = %d\n",
798                        ret);
799                 return ret;
800         }
801         ali_drw_cpuhp_state_num = ret;
802
803         ret = platform_driver_register(&ali_drw_pmu_driver);
804         if (ret)
805                 cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
806
807         return ret;
808 }
809
810 static void __exit ali_drw_pmu_exit(void)
811 {
812         platform_driver_unregister(&ali_drw_pmu_driver);
813         cpuhp_remove_multi_state(ali_drw_cpuhp_state_num);
814 }
815
816 module_init(ali_drw_pmu_init);
817 module_exit(ali_drw_pmu_exit);
818
819 MODULE_AUTHOR("Hongbo Yao <[email protected]>");
820 MODULE_AUTHOR("Neng Chen <[email protected]>");
821 MODULE_AUTHOR("Shuai Xue <[email protected]>");
822 MODULE_DESCRIPTION("Alibaba DDR Sub-System Driveway PMU driver");
823 MODULE_LICENSE("GPL v2");
This page took 0.079849 seconds and 4 git commands to generate.