]> Git Repo - linux.git/blame - arch/x86/events/intel/uncore.h
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / arch / x86 / events / intel / uncore.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
087bfbb0 2#include <linux/slab.h>
14371cce 3#include <linux/pci.h>
6f6e1516
IM
4#include <asm/apicdef.h>
5
087bfbb0 6#include <linux/perf_event.h>
27f6d22b 7#include "../perf_event.h"
087bfbb0
YZ
8
9#define UNCORE_PMU_NAME_LEN 32
7740dfc0 10#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC)
ced2efb0 11#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
087bfbb0 12
eca26c99 13#define UNCORE_FIXED_EVENT 0xff
087bfbb0 14#define UNCORE_PMC_IDX_MAX_GENERIC 8
927b2deb
KL
15#define UNCORE_PMC_IDX_MAX_FIXED 1
16#define UNCORE_PMC_IDX_MAX_FREERUNNING 1
087bfbb0 17#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
927b2deb
KL
18#define UNCORE_PMC_IDX_FREERUNNING (UNCORE_PMC_IDX_FIXED + \
19 UNCORE_PMC_IDX_MAX_FIXED)
20#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FREERUNNING + \
21 UNCORE_PMC_IDX_MAX_FREERUNNING)
087bfbb0 22
a54fa079
KL
23#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
24 ((dev << 24) | (func << 16) | (type << 8) | idx)
899396cf 25#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
a54fa079
KL
26#define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
27#define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
899396cf
YZ
28#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
29#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
30#define UNCORE_EXTRA_PCI_DEV 0xff
156c8b58 31#define UNCORE_EXTRA_PCI_DEV_MAX 4
899396cf 32
087bfbb0
YZ
33#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
34
cf6d445f
TG
35struct pci_extra_dev {
36 struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
37};
38
087bfbb0
YZ
39struct intel_uncore_ops;
40struct intel_uncore_pmu;
41struct intel_uncore_box;
42struct uncore_event_desc;
927b2deb 43struct freerunning_counters;
087bfbb0
YZ
44
45struct intel_uncore_type {
46 const char *name;
47 int num_counters;
48 int num_boxes;
49 int perf_ctr_bits;
50 int fixed_ctr_bits;
927b2deb 51 int num_freerunning_types;
087bfbb0
YZ
52 unsigned perf_ctr;
53 unsigned event_ctl;
54 unsigned event_mask;
cd34cd97 55 unsigned event_mask_ext;
087bfbb0
YZ
56 unsigned fixed_ctr;
57 unsigned fixed_ctl;
58 unsigned box_ctl;
59 unsigned msr_offset;
6a67943a
YZ
60 unsigned num_shared_regs:8;
61 unsigned single_fixed:1;
254298c7 62 unsigned pair_ctr_ctl:1;
cb37af77 63 unsigned *msr_offsets;
087bfbb0
YZ
64 struct event_constraint unconstrainted;
65 struct event_constraint *constraints;
66 struct intel_uncore_pmu *pmus;
67 struct intel_uncore_ops *ops;
68 struct uncore_event_desc *event_descs;
927b2deb 69 struct freerunning_counters *freerunning;
314d9f63 70 const struct attribute_group *attr_groups[4];
d64b25b6 71 struct pmu *pmu; /* for custom pmu ops */
087bfbb0
YZ
72};
73
314d9f63
YZ
74#define pmu_group attr_groups[0]
75#define format_group attr_groups[1]
76#define events_group attr_groups[2]
087bfbb0
YZ
77
78struct intel_uncore_ops {
79 void (*init_box)(struct intel_uncore_box *);
a46195f1 80 void (*exit_box)(struct intel_uncore_box *);
087bfbb0
YZ
81 void (*disable_box)(struct intel_uncore_box *);
82 void (*enable_box)(struct intel_uncore_box *);
83 void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
84 void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
85 u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
6a67943a
YZ
86 int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
87 struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
88 struct perf_event *);
89 void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
087bfbb0
YZ
90};
91
92struct intel_uncore_pmu {
4f089678
TG
93 struct pmu pmu;
94 char name[UNCORE_PMU_NAME_LEN];
95 int pmu_idx;
96 int func_id;
97 bool registered;
cf6d445f 98 atomic_t activeboxes;
4f089678 99 struct intel_uncore_type *type;
cf6d445f 100 struct intel_uncore_box **boxes;
087bfbb0
YZ
101};
102
6a67943a
YZ
103struct intel_uncore_extra_reg {
104 raw_spinlock_t lock;
254298c7 105 u64 config, config1, config2;
6a67943a
YZ
106 atomic_t ref;
107};
108
087bfbb0 109struct intel_uncore_box {
cf6d445f 110 int pci_phys_id;
d46b4c1c 111 int pkgid; /* Logical package ID */
087bfbb0
YZ
112 int n_active; /* number of active events */
113 int n_events;
114 int cpu; /* cpu to collect events */
115 unsigned long flags;
116 atomic_t refcnt;
117 struct perf_event *events[UNCORE_PMC_IDX_MAX];
118 struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
b371b594 119 struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
087bfbb0
YZ
120 unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
121 u64 tags[UNCORE_PMC_IDX_MAX];
14371cce 122 struct pci_dev *pci_dev;
087bfbb0 123 struct intel_uncore_pmu *pmu;
79859cce 124 u64 hrtimer_duration; /* hrtimer timeout for this box */
087bfbb0
YZ
125 struct hrtimer hrtimer;
126 struct list_head list;
ced2efb0 127 struct list_head active_list;
b9e1ab6d 128 void *io_addr;
6a67943a 129 struct intel_uncore_extra_reg shared_regs[0];
087bfbb0
YZ
130};
131
4d47d640
KL
132/* CFL uncore 8th cbox MSRs */
133#define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
134#define CFL_UNC_CBO_7_PER_CTR0 0xf76
135
136#define UNCORE_BOX_FLAG_INITIATED 0
137/* event config registers are 8-byte apart */
138#define UNCORE_BOX_FLAG_CTL_OFFS8 1
139/* CFL 8th CBOX has different MSR space */
140#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
087bfbb0
YZ
141
142struct uncore_event_desc {
143 struct kobj_attribute attr;
144 const char *config;
145};
146
927b2deb
KL
147struct freerunning_counters {
148 unsigned int counter_base;
149 unsigned int counter_offset;
150 unsigned int box_offset;
151 unsigned int num_counters;
152 unsigned int bits;
153};
154
712df65c
TI
155struct pci2phy_map {
156 struct list_head list;
157 int segment;
158 int pbus_to_physid[256];
159};
160
712df65c
TI
161struct pci2phy_map *__find_pci2phy_map(int segment);
162
514b2346
YZ
163ssize_t uncore_event_show(struct kobject *kobj,
164 struct kobj_attribute *attr, char *buf);
165
087bfbb0
YZ
166#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
167{ \
168 .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
169 .config = _config, \
170}
171
172#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
173static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
174 struct kobj_attribute *attr, \
175 char *page) \
176{ \
177 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
178 return sprintf(page, _format "\n"); \
179} \
180static struct kobj_attribute format_attr_##_var = \
181 __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
182
927b2deb
KL
183static inline bool uncore_pmc_fixed(int idx)
184{
185 return idx == UNCORE_PMC_IDX_FIXED;
186}
187
188static inline bool uncore_pmc_freerunning(int idx)
189{
190 return idx == UNCORE_PMC_IDX_FREERUNNING;
191}
192
14371cce
YZ
193static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
194{
195 return box->pmu->type->box_ctl;
196}
197
198static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
199{
200 return box->pmu->type->fixed_ctl;
201}
202
203static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
204{
205 return box->pmu->type->fixed_ctr;
206}
207
208static inline
209unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
210{
24cf8467
SE
211 if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
212 return idx * 8 + box->pmu->type->event_ctl;
213
14371cce
YZ
214 return idx * 4 + box->pmu->type->event_ctl;
215}
216
217static inline
218unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
219{
220 return idx * 8 + box->pmu->type->perf_ctr;
221}
222
cb37af77
YZ
223static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
224{
225 struct intel_uncore_pmu *pmu = box->pmu;
226 return pmu->type->msr_offsets ?
227 pmu->type->msr_offsets[pmu->pmu_idx] :
228 pmu->type->msr_offset * pmu->pmu_idx;
229}
230
231static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
087bfbb0
YZ
232{
233 if (!box->pmu->type->box_ctl)
234 return 0;
cb37af77 235 return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
087bfbb0
YZ
236}
237
cb37af77 238static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
087bfbb0
YZ
239{
240 if (!box->pmu->type->fixed_ctl)
241 return 0;
cb37af77 242 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
087bfbb0
YZ
243}
244
cb37af77 245static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
087bfbb0 246{
cb37af77 247 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
087bfbb0
YZ
248}
249
927b2deb
KL
250
251/*
252 * In the uncore document, there is no event-code assigned to free running
253 * counters. Some events need to be defined to indicate the free running
254 * counters. The events are encoded as event-code + umask-code.
255 *
256 * The event-code for all free running counters is 0xff, which is the same as
257 * the fixed counters.
258 *
259 * The umask-code is used to distinguish a fixed counter and a free running
260 * counter, and different types of free running counters.
261 * - For fixed counters, the umask-code is 0x0X.
262 * X indicates the index of the fixed counter, which starts from 0.
263 * - For free running counters, the umask-code uses the rest of the space.
264 * It would bare the format of 0xXY.
265 * X stands for the type of free running counters, which starts from 1.
266 * Y stands for the index of free running counters of same type, which
267 * starts from 0.
268 *
269 * For example, there are three types of IIO free running counters on Skylake
270 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
271 * The event-code for all the free running counters is 0xff.
272 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
273 * which umask-code starts from 0x10.
274 * So 'ioclk' is encoded as event=0xff,umask=0x10
275 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
276 * the second type, which umask-code starts from 0x20.
277 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
278 */
279static inline unsigned int uncore_freerunning_idx(u64 config)
280{
281 return ((config >> 8) & 0xf);
282}
283
284#define UNCORE_FREERUNNING_UMASK_START 0x10
285
286static inline unsigned int uncore_freerunning_type(u64 config)
287{
288 return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
289}
290
291static inline
292unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
293 struct perf_event *event)
294{
295 unsigned int type = uncore_freerunning_type(event->attr.config);
296 unsigned int idx = uncore_freerunning_idx(event->attr.config);
297 struct intel_uncore_pmu *pmu = box->pmu;
298
299 return pmu->type->freerunning[type].counter_base +
300 pmu->type->freerunning[type].counter_offset * idx +
301 pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
302}
303
087bfbb0
YZ
304static inline
305unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
306{
4d47d640
KL
307 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
308 return CFL_UNC_CBO_7_PERFEVTSEL0 +
309 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
310 } else {
311 return box->pmu->type->event_ctl +
312 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
313 uncore_msr_box_offset(box);
314 }
087bfbb0
YZ
315}
316
317static inline
318unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
319{
4d47d640
KL
320 if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
321 return CFL_UNC_CBO_7_PER_CTR0 +
322 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
323 } else {
324 return box->pmu->type->perf_ctr +
325 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
326 uncore_msr_box_offset(box);
327 }
087bfbb0
YZ
328}
329
14371cce
YZ
330static inline
331unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
332{
333 if (box->pci_dev)
334 return uncore_pci_fixed_ctl(box);
335 else
336 return uncore_msr_fixed_ctl(box);
337}
338
339static inline
340unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
341{
342 if (box->pci_dev)
343 return uncore_pci_fixed_ctr(box);
344 else
345 return uncore_msr_fixed_ctr(box);
346}
347
348static inline
349unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
350{
351 if (box->pci_dev)
352 return uncore_pci_event_ctl(box, idx);
353 else
354 return uncore_msr_event_ctl(box, idx);
355}
356
357static inline
358unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
359{
360 if (box->pci_dev)
361 return uncore_pci_perf_ctr(box, idx);
362 else
363 return uncore_msr_perf_ctr(box, idx);
364}
365
087bfbb0
YZ
366static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
367{
368 return box->pmu->type->perf_ctr_bits;
369}
370
371static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
372{
373 return box->pmu->type->fixed_ctr_bits;
374}
375
927b2deb
KL
376static inline
377unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
378 struct perf_event *event)
379{
380 unsigned int type = uncore_freerunning_type(event->attr.config);
381
382 return box->pmu->type->freerunning[type].bits;
383}
384
385static inline int uncore_num_freerunning(struct intel_uncore_box *box,
386 struct perf_event *event)
387{
388 unsigned int type = uncore_freerunning_type(event->attr.config);
389
390 return box->pmu->type->freerunning[type].num_counters;
391}
392
393static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
394 struct perf_event *event)
395{
396 return box->pmu->type->num_freerunning_types;
397}
398
399static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
400 struct perf_event *event)
401{
402 unsigned int type = uncore_freerunning_type(event->attr.config);
403 unsigned int idx = uncore_freerunning_idx(event->attr.config);
404
405 return (type < uncore_num_freerunning_types(box, event)) &&
406 (idx < uncore_num_freerunning(box, event));
407}
408
087bfbb0
YZ
409static inline int uncore_num_counters(struct intel_uncore_box *box)
410{
411 return box->pmu->type->num_counters;
412}
413
927b2deb
KL
414static inline bool is_freerunning_event(struct perf_event *event)
415{
416 u64 cfg = event->attr.config;
417
418 return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
419 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
420}
421
087bfbb0
YZ
422static inline void uncore_disable_box(struct intel_uncore_box *box)
423{
424 if (box->pmu->type->ops->disable_box)
425 box->pmu->type->ops->disable_box(box);
426}
427
428static inline void uncore_enable_box(struct intel_uncore_box *box)
429{
430 if (box->pmu->type->ops->enable_box)
431 box->pmu->type->ops->enable_box(box);
432}
433
434static inline void uncore_disable_event(struct intel_uncore_box *box,
435 struct perf_event *event)
436{
437 box->pmu->type->ops->disable_event(box, event);
438}
439
440static inline void uncore_enable_event(struct intel_uncore_box *box,
441 struct perf_event *event)
442{
443 box->pmu->type->ops->enable_event(box, event);
444}
445
446static inline u64 uncore_read_counter(struct intel_uncore_box *box,
447 struct perf_event *event)
448{
449 return box->pmu->type->ops->read_counter(box, event);
450}
451
15c12479
IM
452static inline void uncore_box_init(struct intel_uncore_box *box)
453{
454 if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
455 if (box->pmu->type->ops->init_box)
456 box->pmu->type->ops->init_box(box);
457 }
458}
459
a46195f1
TG
460static inline void uncore_box_exit(struct intel_uncore_box *box)
461{
462 if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
463 if (box->pmu->type->ops->exit_box)
464 box->pmu->type->ops->exit_box(box);
465 }
466}
467
254298c7
YZ
468static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
469{
cf6d445f 470 return (box->pkgid < 0);
254298c7 471}
514b2346 472
1f2569fa
TG
473static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
474{
475 return container_of(event->pmu, struct intel_uncore_pmu, pmu);
476}
477
478static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
479{
480 return event->pmu_private;
481}
482
514b2346 483struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
514b2346
YZ
484u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
485void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
486void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
5a6c9d94
KL
487void uncore_pmu_event_start(struct perf_event *event, int flags);
488void uncore_pmu_event_stop(struct perf_event *event, int flags);
489int uncore_pmu_event_add(struct perf_event *event, int flags);
490void uncore_pmu_event_del(struct perf_event *event, int flags);
514b2346
YZ
491void uncore_pmu_event_read(struct perf_event *event);
492void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
493struct event_constraint *
494uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
495void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
496u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
497
498extern struct intel_uncore_type **uncore_msr_uncores;
499extern struct intel_uncore_type **uncore_pci_uncores;
500extern struct pci_driver *uncore_pci_driver;
712df65c
TI
501extern raw_spinlock_t pci2phy_map_lock;
502extern struct list_head pci2phy_map_head;
cf6d445f 503extern struct pci_extra_dev *uncore_extra_pci_dev;
514b2346 504extern struct event_constraint uncore_constraint_empty;
92807ffd 505
940b2f2f 506/* uncore_snb.c */
92807ffd
YZ
507int snb_uncore_pci_init(void);
508int ivb_uncore_pci_init(void);
509int hsw_uncore_pci_init(void);
a41f3c8c 510int bdw_uncore_pci_init(void);
0e1eb0a1 511int skl_uncore_pci_init(void);
92807ffd
YZ
512void snb_uncore_cpu_init(void);
513void nhm_uncore_cpu_init(void);
46866b59 514void skl_uncore_cpu_init(void);
77af0037 515int snb_pci2phy_map_init(int devid);
8268fdfc 516
940b2f2f 517/* uncore_snbep.c */
8268fdfc
YZ
518int snbep_uncore_pci_init(void);
519void snbep_uncore_cpu_init(void);
ddcd0973
PZ
520int ivbep_uncore_pci_init(void);
521void ivbep_uncore_cpu_init(void);
e735b9db
YZ
522int hswep_uncore_pci_init(void);
523void hswep_uncore_cpu_init(void);
070e9887
KL
524int bdx_uncore_pci_init(void);
525void bdx_uncore_cpu_init(void);
77af0037
HC
526int knl_uncore_pci_init(void);
527void knl_uncore_cpu_init(void);
cd34cd97
KL
528int skx_uncore_pci_init(void);
529void skx_uncore_cpu_init(void);
c1e46580 530
940b2f2f 531/* uncore_nhmex.c */
c1e46580 532void nhmex_uncore_cpu_init(void);
This page took 0.35282 seconds and 4 git commands to generate.