1 // SPDX-License-Identifier: GPL-2.0
2 /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
4 #include "uncore_discovery.h"
6 /* Uncore IMC PCI IDs */
7 #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
8 #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
9 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
10 #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
11 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
12 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
13 #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
14 #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
15 #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
16 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
17 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
18 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
19 #define PCI_DEVICE_ID_INTEL_SKL_E3_IMC 0x1918
20 #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
21 #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
22 #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
23 #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
24 #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
25 #define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC 0x5910
26 #define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC 0x5918
27 #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
28 #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
29 #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
30 #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
31 #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
32 #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
33 #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
34 #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
35 #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
36 #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
37 #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
38 #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
39 #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
40 #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
41 #define PCI_DEVICE_ID_INTEL_AML_YD_IMC 0x590c
42 #define PCI_DEVICE_ID_INTEL_AML_YQ_IMC 0x590d
43 #define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC 0x3ed0
44 #define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC 0x3e34
45 #define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35
46 #define PCI_DEVICE_ID_INTEL_CML_H1_IMC 0x9b44
47 #define PCI_DEVICE_ID_INTEL_CML_H2_IMC 0x9b54
48 #define PCI_DEVICE_ID_INTEL_CML_H3_IMC 0x9b64
49 #define PCI_DEVICE_ID_INTEL_CML_U1_IMC 0x9b51
50 #define PCI_DEVICE_ID_INTEL_CML_U2_IMC 0x9b61
51 #define PCI_DEVICE_ID_INTEL_CML_U3_IMC 0x9b71
52 #define PCI_DEVICE_ID_INTEL_CML_S1_IMC 0x9b33
53 #define PCI_DEVICE_ID_INTEL_CML_S2_IMC 0x9b43
54 #define PCI_DEVICE_ID_INTEL_CML_S3_IMC 0x9b53
55 #define PCI_DEVICE_ID_INTEL_CML_S4_IMC 0x9b63
56 #define PCI_DEVICE_ID_INTEL_CML_S5_IMC 0x9b73
57 #define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
58 #define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
59 #define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02
60 #define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04
61 #define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12
62 #define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14
63 #define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36
64 #define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43
65 #define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
66 #define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660
67 #define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641
68 #define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601
69 #define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602
70 #define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609
71 #define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a
72 #define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621
73 #define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623
74 #define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629
75 #define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637
76 #define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b
77 #define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648
78 #define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649
79 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650
80 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668
81 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670
82 #define PCI_DEVICE_ID_INTEL_ADL_17_IMC 0x4614
83 #define PCI_DEVICE_ID_INTEL_ADL_18_IMC 0x4617
84 #define PCI_DEVICE_ID_INTEL_ADL_19_IMC 0x4618
85 #define PCI_DEVICE_ID_INTEL_ADL_20_IMC 0x461B
86 #define PCI_DEVICE_ID_INTEL_ADL_21_IMC 0x461C
87 #define PCI_DEVICE_ID_INTEL_RPL_1_IMC 0xA700
88 #define PCI_DEVICE_ID_INTEL_RPL_2_IMC 0xA702
89 #define PCI_DEVICE_ID_INTEL_RPL_3_IMC 0xA706
90 #define PCI_DEVICE_ID_INTEL_RPL_4_IMC 0xA709
91 #define PCI_DEVICE_ID_INTEL_RPL_5_IMC 0xA701
92 #define PCI_DEVICE_ID_INTEL_RPL_6_IMC 0xA703
93 #define PCI_DEVICE_ID_INTEL_RPL_7_IMC 0xA704
94 #define PCI_DEVICE_ID_INTEL_RPL_8_IMC 0xA705
95 #define PCI_DEVICE_ID_INTEL_RPL_9_IMC 0xA706
96 #define PCI_DEVICE_ID_INTEL_RPL_10_IMC 0xA707
97 #define PCI_DEVICE_ID_INTEL_RPL_11_IMC 0xA708
98 #define PCI_DEVICE_ID_INTEL_RPL_12_IMC 0xA709
99 #define PCI_DEVICE_ID_INTEL_RPL_13_IMC 0xA70a
100 #define PCI_DEVICE_ID_INTEL_RPL_14_IMC 0xA70b
101 #define PCI_DEVICE_ID_INTEL_RPL_15_IMC 0xA715
102 #define PCI_DEVICE_ID_INTEL_RPL_16_IMC 0xA716
103 #define PCI_DEVICE_ID_INTEL_RPL_17_IMC 0xA717
104 #define PCI_DEVICE_ID_INTEL_RPL_18_IMC 0xA718
105 #define PCI_DEVICE_ID_INTEL_RPL_19_IMC 0xA719
106 #define PCI_DEVICE_ID_INTEL_RPL_20_IMC 0xA71A
107 #define PCI_DEVICE_ID_INTEL_RPL_21_IMC 0xA71B
108 #define PCI_DEVICE_ID_INTEL_RPL_22_IMC 0xA71C
109 #define PCI_DEVICE_ID_INTEL_RPL_23_IMC 0xA728
110 #define PCI_DEVICE_ID_INTEL_RPL_24_IMC 0xA729
111 #define PCI_DEVICE_ID_INTEL_RPL_25_IMC 0xA72A
114 #define IMC_UNCORE_DEV(a) \
116 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_##a##_IMC), \
117 .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), \
120 /* SNB event control */
121 #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
122 #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
123 #define SNB_UNC_CTL_EDGE_DET (1 << 18)
124 #define SNB_UNC_CTL_EN (1 << 22)
125 #define SNB_UNC_CTL_INVERT (1 << 23)
126 #define SNB_UNC_CTL_CMASK_MASK 0x1f000000
127 #define NHM_UNC_CTL_CMASK_MASK 0xff000000
128 #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
130 #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
131 SNB_UNC_CTL_UMASK_MASK | \
132 SNB_UNC_CTL_EDGE_DET | \
133 SNB_UNC_CTL_INVERT | \
134 SNB_UNC_CTL_CMASK_MASK)
136 #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
137 SNB_UNC_CTL_UMASK_MASK | \
138 SNB_UNC_CTL_EDGE_DET | \
139 SNB_UNC_CTL_INVERT | \
140 NHM_UNC_CTL_CMASK_MASK)
142 /* SNB global control register */
143 #define SNB_UNC_PERF_GLOBAL_CTL 0x391
144 #define SNB_UNC_FIXED_CTR_CTRL 0x394
145 #define SNB_UNC_FIXED_CTR 0x395
147 /* SNB uncore global control */
148 #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
149 #define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
151 /* SNB Cbo register */
152 #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
153 #define SNB_UNC_CBO_0_PER_CTR0 0x706
154 #define SNB_UNC_CBO_MSR_OFFSET 0x10
156 /* SNB ARB register */
157 #define SNB_UNC_ARB_PER_CTR0 0x3b0
158 #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2
159 #define SNB_UNC_ARB_MSR_OFFSET 0x10
161 /* NHM global control register */
162 #define NHM_UNC_PERF_GLOBAL_CTL 0x391
163 #define NHM_UNC_FIXED_CTR 0x394
164 #define NHM_UNC_FIXED_CTR_CTRL 0x395
166 /* NHM uncore global control */
167 #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
168 #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
170 /* NHM uncore register */
171 #define NHM_UNC_PERFEVTSEL0 0x3c0
172 #define NHM_UNC_UNCORE_PMC0 0x3b0
174 /* SKL uncore global control */
175 #define SKL_UNC_PERF_GLOBAL_CTL 0xe01
176 #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
178 /* ICL Cbo register */
179 #define ICL_UNC_CBO_CONFIG 0x396
180 #define ICL_UNC_NUM_CBO_MASK 0xf
181 #define ICL_UNC_CBO_0_PER_CTR0 0x702
182 #define ICL_UNC_CBO_MSR_OFFSET 0x8
184 /* ICL ARB register */
185 #define ICL_UNC_ARB_PER_CTR 0x3b1
186 #define ICL_UNC_ARB_PERFEVTSEL 0x3b3
188 /* ADL uncore global control */
189 #define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0
190 #define ADL_UNC_FIXED_CTR_CTRL 0x2fde
191 #define ADL_UNC_FIXED_CTR 0x2fdf
193 /* ADL Cbo register */
194 #define ADL_UNC_CBO_0_PER_CTR0 0x2002
195 #define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000
196 #define ADL_UNC_CTL_THRESHOLD 0x3f000000
197 #define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
198 SNB_UNC_CTL_UMASK_MASK | \
199 SNB_UNC_CTL_EDGE_DET | \
200 SNB_UNC_CTL_INVERT | \
201 ADL_UNC_CTL_THRESHOLD)
203 /* ADL ARB register */
204 #define ADL_UNC_ARB_PER_CTR0 0x2FD2
205 #define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0
206 #define ADL_UNC_ARB_MSR_OFFSET 0x8
208 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
209 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
210 DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
211 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
212 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
213 DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
214 DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
215 DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
217 /* Sandy Bridge uncore support */
218 static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
220 struct hw_perf_event *hwc = &event->hw;
222 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
223 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
225 wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
228 static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
230 wrmsrl(event->hw.config_base, 0);
233 static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
235 if (box->pmu->pmu_idx == 0) {
236 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
237 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
241 static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
243 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
244 SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
247 static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
249 if (box->pmu->pmu_idx == 0)
250 wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
253 static struct uncore_event_desc snb_uncore_events[] = {
254 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
255 { /* end: all zeroes */ },
258 static struct attribute *snb_uncore_formats_attr[] = {
259 &format_attr_event.attr,
260 &format_attr_umask.attr,
261 &format_attr_edge.attr,
262 &format_attr_inv.attr,
263 &format_attr_cmask5.attr,
267 static const struct attribute_group snb_uncore_format_group = {
269 .attrs = snb_uncore_formats_attr,
272 static struct intel_uncore_ops snb_uncore_msr_ops = {
273 .init_box = snb_uncore_msr_init_box,
274 .enable_box = snb_uncore_msr_enable_box,
275 .exit_box = snb_uncore_msr_exit_box,
276 .disable_event = snb_uncore_msr_disable_event,
277 .enable_event = snb_uncore_msr_enable_event,
278 .read_counter = uncore_msr_read_counter,
281 static struct event_constraint snb_uncore_arb_constraints[] = {
282 UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
283 UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
287 static struct intel_uncore_type snb_uncore_cbox = {
292 .fixed_ctr_bits = 48,
293 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
294 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
295 .fixed_ctr = SNB_UNC_FIXED_CTR,
296 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
298 .event_mask = SNB_UNC_RAW_EVENT_MASK,
299 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
300 .ops = &snb_uncore_msr_ops,
301 .format_group = &snb_uncore_format_group,
302 .event_descs = snb_uncore_events,
305 static struct intel_uncore_type snb_uncore_arb = {
310 .perf_ctr = SNB_UNC_ARB_PER_CTR0,
311 .event_ctl = SNB_UNC_ARB_PERFEVTSEL0,
312 .event_mask = SNB_UNC_RAW_EVENT_MASK,
313 .msr_offset = SNB_UNC_ARB_MSR_OFFSET,
314 .constraints = snb_uncore_arb_constraints,
315 .ops = &snb_uncore_msr_ops,
316 .format_group = &snb_uncore_format_group,
319 static struct intel_uncore_type *snb_msr_uncores[] = {
325 void snb_uncore_cpu_init(void)
327 uncore_msr_uncores = snb_msr_uncores;
328 if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
329 snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
332 static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
334 if (box->pmu->pmu_idx == 0) {
335 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
336 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
339 /* The 8th CBOX has different MSR space */
340 if (box->pmu->pmu_idx == 7)
341 __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
344 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
346 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
347 SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
350 static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
352 if (box->pmu->pmu_idx == 0)
353 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
356 static struct intel_uncore_ops skl_uncore_msr_ops = {
357 .init_box = skl_uncore_msr_init_box,
358 .enable_box = skl_uncore_msr_enable_box,
359 .exit_box = skl_uncore_msr_exit_box,
360 .disable_event = snb_uncore_msr_disable_event,
361 .enable_event = snb_uncore_msr_enable_event,
362 .read_counter = uncore_msr_read_counter,
365 static struct intel_uncore_type skl_uncore_cbox = {
370 .fixed_ctr_bits = 48,
371 .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
372 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
373 .fixed_ctr = SNB_UNC_FIXED_CTR,
374 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
376 .event_mask = SNB_UNC_RAW_EVENT_MASK,
377 .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
378 .ops = &skl_uncore_msr_ops,
379 .format_group = &snb_uncore_format_group,
380 .event_descs = snb_uncore_events,
383 static struct intel_uncore_type *skl_msr_uncores[] = {
389 void skl_uncore_cpu_init(void)
391 uncore_msr_uncores = skl_msr_uncores;
392 if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
393 skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
394 snb_uncore_arb.ops = &skl_uncore_msr_ops;
397 static struct intel_uncore_ops icl_uncore_msr_ops = {
398 .disable_event = snb_uncore_msr_disable_event,
399 .enable_event = snb_uncore_msr_enable_event,
400 .read_counter = uncore_msr_read_counter,
403 static struct intel_uncore_type icl_uncore_cbox = {
407 .perf_ctr = ICL_UNC_CBO_0_PER_CTR0,
408 .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
409 .event_mask = SNB_UNC_RAW_EVENT_MASK,
410 .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
411 .ops = &icl_uncore_msr_ops,
412 .format_group = &snb_uncore_format_group,
415 static struct uncore_event_desc icl_uncore_events[] = {
416 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
417 { /* end: all zeroes */ },
420 static struct attribute *icl_uncore_clock_formats_attr[] = {
421 &format_attr_event.attr,
425 static struct attribute_group icl_uncore_clock_format_group = {
427 .attrs = icl_uncore_clock_formats_attr,
430 static struct intel_uncore_type icl_uncore_clockbox = {
434 .fixed_ctr_bits = 48,
435 .fixed_ctr = SNB_UNC_FIXED_CTR,
436 .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
438 .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
439 .format_group = &icl_uncore_clock_format_group,
440 .ops = &icl_uncore_msr_ops,
441 .event_descs = icl_uncore_events,
444 static struct intel_uncore_type icl_uncore_arb = {
449 .perf_ctr = ICL_UNC_ARB_PER_CTR,
450 .event_ctl = ICL_UNC_ARB_PERFEVTSEL,
451 .event_mask = SNB_UNC_RAW_EVENT_MASK,
452 .ops = &icl_uncore_msr_ops,
453 .format_group = &snb_uncore_format_group,
456 static struct intel_uncore_type *icl_msr_uncores[] = {
459 &icl_uncore_clockbox,
463 static int icl_get_cbox_num(void)
467 rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
469 return num_boxes & ICL_UNC_NUM_CBO_MASK;
472 void icl_uncore_cpu_init(void)
474 uncore_msr_uncores = icl_msr_uncores;
475 icl_uncore_cbox.num_boxes = icl_get_cbox_num();
478 static struct intel_uncore_type *tgl_msr_uncores[] = {
481 &icl_uncore_clockbox,
485 static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
487 if (box->pmu->pmu_idx == 0)
488 wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
491 void tgl_uncore_cpu_init(void)
493 uncore_msr_uncores = tgl_msr_uncores;
494 icl_uncore_cbox.num_boxes = icl_get_cbox_num();
495 icl_uncore_cbox.ops = &skl_uncore_msr_ops;
496 icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
497 snb_uncore_arb.ops = &skl_uncore_msr_ops;
498 skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
501 static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
503 if (box->pmu->pmu_idx == 0)
504 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
507 static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
509 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
512 static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
514 if (box->pmu->pmu_idx == 0)
515 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
518 static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
520 if (box->pmu->pmu_idx == 0)
521 wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
524 static struct intel_uncore_ops adl_uncore_msr_ops = {
525 .init_box = adl_uncore_msr_init_box,
526 .enable_box = adl_uncore_msr_enable_box,
527 .disable_box = adl_uncore_msr_disable_box,
528 .exit_box = adl_uncore_msr_exit_box,
529 .disable_event = snb_uncore_msr_disable_event,
530 .enable_event = snb_uncore_msr_enable_event,
531 .read_counter = uncore_msr_read_counter,
534 static struct attribute *adl_uncore_formats_attr[] = {
535 &format_attr_event.attr,
536 &format_attr_umask.attr,
537 &format_attr_edge.attr,
538 &format_attr_inv.attr,
539 &format_attr_threshold.attr,
543 static const struct attribute_group adl_uncore_format_group = {
545 .attrs = adl_uncore_formats_attr,
548 static struct intel_uncore_type adl_uncore_cbox = {
552 .perf_ctr = ADL_UNC_CBO_0_PER_CTR0,
553 .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0,
554 .event_mask = ADL_UNC_RAW_EVENT_MASK,
555 .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
556 .ops = &adl_uncore_msr_ops,
557 .format_group = &adl_uncore_format_group,
560 static struct intel_uncore_type adl_uncore_arb = {
565 .perf_ctr = ADL_UNC_ARB_PER_CTR0,
566 .event_ctl = ADL_UNC_ARB_PERFEVTSEL0,
567 .event_mask = SNB_UNC_RAW_EVENT_MASK,
568 .msr_offset = ADL_UNC_ARB_MSR_OFFSET,
569 .constraints = snb_uncore_arb_constraints,
570 .ops = &adl_uncore_msr_ops,
571 .format_group = &snb_uncore_format_group,
574 static struct intel_uncore_type adl_uncore_clockbox = {
578 .fixed_ctr_bits = 48,
579 .fixed_ctr = ADL_UNC_FIXED_CTR,
580 .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL,
582 .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
583 .format_group = &icl_uncore_clock_format_group,
584 .ops = &adl_uncore_msr_ops,
585 .event_descs = icl_uncore_events,
588 static struct intel_uncore_type *adl_msr_uncores[] = {
591 &adl_uncore_clockbox,
595 void adl_uncore_cpu_init(void)
597 adl_uncore_cbox.num_boxes = icl_get_cbox_num();
598 uncore_msr_uncores = adl_msr_uncores;
605 static struct uncore_event_desc snb_uncore_imc_events[] = {
606 INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"),
607 INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
608 INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
610 INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
611 INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
612 INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
614 INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
615 INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
616 INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
618 INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
619 INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
620 INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
622 INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
623 INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
624 INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
626 { /* end: all zeroes */ },
629 #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff
630 #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48
632 /* page size multiple covering all config regs */
633 #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000
635 #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1
636 #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050
637 #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2
638 #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054
639 #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE
641 /* BW break down- legacy counters */
642 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS 0x3
643 #define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE 0x5040
644 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS 0x4
645 #define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE 0x5044
646 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS 0x5
647 #define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE 0x5048
649 enum perf_snb_uncore_imc_freerunning_types {
650 SNB_PCI_UNCORE_IMC_DATA_READS = 0,
651 SNB_PCI_UNCORE_IMC_DATA_WRITES,
652 SNB_PCI_UNCORE_IMC_GT_REQUESTS,
653 SNB_PCI_UNCORE_IMC_IA_REQUESTS,
654 SNB_PCI_UNCORE_IMC_IO_REQUESTS,
656 SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
659 static struct freerunning_counters snb_uncore_imc_freerunning[] = {
660 [SNB_PCI_UNCORE_IMC_DATA_READS] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
662 [SNB_PCI_UNCORE_IMC_DATA_WRITES] = { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
664 [SNB_PCI_UNCORE_IMC_GT_REQUESTS] = { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
666 [SNB_PCI_UNCORE_IMC_IA_REQUESTS] = { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
668 [SNB_PCI_UNCORE_IMC_IO_REQUESTS] = { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
672 static struct attribute *snb_uncore_imc_formats_attr[] = {
673 &format_attr_event.attr,
677 static const struct attribute_group snb_uncore_imc_format_group = {
679 .attrs = snb_uncore_imc_formats_attr,
682 static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
684 struct intel_uncore_type *type = box->pmu->type;
685 struct pci_dev *pdev = box->pci_dev;
686 int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
687 resource_size_t addr;
690 pci_read_config_dword(pdev, where, &pci_dword);
693 #ifdef CONFIG_PHYS_ADDR_T_64BIT
694 pci_read_config_dword(pdev, where + 4, &pci_dword);
695 addr |= ((resource_size_t)pci_dword << 32);
698 addr &= ~(PAGE_SIZE - 1);
700 box->io_addr = ioremap(addr, type->mmio_map_size);
702 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
704 box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
707 static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
710 static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
713 static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
716 static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
720 * Keep the custom event_init() function compatible with old event
721 * encoding for free running counters.
723 static int snb_uncore_imc_event_init(struct perf_event *event)
725 struct intel_uncore_pmu *pmu;
726 struct intel_uncore_box *box;
727 struct hw_perf_event *hwc = &event->hw;
728 u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
731 if (event->attr.type != event->pmu->type)
734 pmu = uncore_event_to_pmu(event);
735 /* no device found for this pmu */
736 if (pmu->func_id < 0)
739 /* Sampling not supported yet */
740 if (hwc->sample_period)
743 /* unsupported modes and filters */
744 if (event->attr.sample_period) /* no sampling */
748 * Place all uncore events for a particular physical package
754 /* check only supported bits are set */
755 if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
758 box = uncore_pmu_to_box(pmu, event->cpu);
759 if (!box || box->cpu < 0)
762 event->cpu = box->cpu;
763 event->pmu_private = box;
765 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
768 event->hw.last_tag = ~0ULL;
769 event->hw.extra_reg.idx = EXTRA_REG_NONE;
770 event->hw.branch_reg.idx = EXTRA_REG_NONE;
772 * check event is known (whitelist, determines counter)
775 case SNB_UNCORE_PCI_IMC_DATA_READS:
776 base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
777 idx = UNCORE_PMC_IDX_FREERUNNING;
779 case SNB_UNCORE_PCI_IMC_DATA_WRITES:
780 base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
781 idx = UNCORE_PMC_IDX_FREERUNNING;
783 case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
784 base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
785 idx = UNCORE_PMC_IDX_FREERUNNING;
787 case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
788 base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
789 idx = UNCORE_PMC_IDX_FREERUNNING;
791 case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
792 base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
793 idx = UNCORE_PMC_IDX_FREERUNNING;
799 /* must be done before validate_group */
800 event->hw.event_base = base;
803 /* Convert to standard encoding format for freerunning counters */
804 event->hw.config = ((cfg - 1) << 8) | 0x10ff;
806 /* no group validation needed, we have free running counters */
811 static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
816 int snb_pci2phy_map_init(int devid)
818 struct pci_dev *dev = NULL;
819 struct pci2phy_map *map;
822 dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
826 bus = dev->bus->number;
827 segment = pci_domain_nr(dev->bus);
829 raw_spin_lock(&pci2phy_map_lock);
830 map = __find_pci2phy_map(segment);
832 raw_spin_unlock(&pci2phy_map_lock);
836 map->pbus_to_dieid[bus] = 0;
837 raw_spin_unlock(&pci2phy_map_lock);
844 static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
846 struct hw_perf_event *hwc = &event->hw;
849 * SNB IMC counters are 32-bit and are laid out back to back
850 * in MMIO space. Therefore we must use a 32-bit accessor function
851 * using readq() from uncore_mmio_read_counter() causes problems
852 * because it is reading 64-bit at a time. This is okay for the
853 * uncore_perf_event_update() function because it drops the upper
854 * 32-bits but not okay for plain uncore_read_counter() as invoked
855 * in uncore_pmu_event_start().
857 return (u64)readl(box->io_addr + hwc->event_base);
860 static struct pmu snb_uncore_imc_pmu = {
861 .task_ctx_nr = perf_invalid_context,
862 .event_init = snb_uncore_imc_event_init,
863 .add = uncore_pmu_event_add,
864 .del = uncore_pmu_event_del,
865 .start = uncore_pmu_event_start,
866 .stop = uncore_pmu_event_stop,
867 .read = uncore_pmu_event_read,
868 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
871 static struct intel_uncore_ops snb_uncore_imc_ops = {
872 .init_box = snb_uncore_imc_init_box,
873 .exit_box = uncore_mmio_exit_box,
874 .enable_box = snb_uncore_imc_enable_box,
875 .disable_box = snb_uncore_imc_disable_box,
876 .disable_event = snb_uncore_imc_disable_event,
877 .enable_event = snb_uncore_imc_enable_event,
878 .hw_config = snb_uncore_imc_hw_config,
879 .read_counter = snb_uncore_imc_read_counter,
882 static struct intel_uncore_type snb_uncore_imc = {
886 .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
887 .mmio_map_size = SNB_UNCORE_PCI_IMC_MAP_SIZE,
888 .freerunning = snb_uncore_imc_freerunning,
889 .event_descs = snb_uncore_imc_events,
890 .format_group = &snb_uncore_imc_format_group,
891 .ops = &snb_uncore_imc_ops,
892 .pmu = &snb_uncore_imc_pmu,
895 static struct intel_uncore_type *snb_pci_uncores[] = {
896 [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc,
900 static const struct pci_device_id snb_uncore_pci_ids[] = {
902 { /* end: all zeroes */ },
905 static const struct pci_device_id ivb_uncore_pci_ids[] = {
907 IMC_UNCORE_DEV(IVB_E3),
908 { /* end: all zeroes */ },
911 static const struct pci_device_id hsw_uncore_pci_ids[] = {
913 IMC_UNCORE_DEV(HSW_U),
914 { /* end: all zeroes */ },
917 static const struct pci_device_id bdw_uncore_pci_ids[] = {
919 { /* end: all zeroes */ },
922 static const struct pci_device_id skl_uncore_pci_ids[] = {
923 IMC_UNCORE_DEV(SKL_Y),
924 IMC_UNCORE_DEV(SKL_U),
925 IMC_UNCORE_DEV(SKL_HD),
926 IMC_UNCORE_DEV(SKL_HQ),
927 IMC_UNCORE_DEV(SKL_SD),
928 IMC_UNCORE_DEV(SKL_SQ),
929 IMC_UNCORE_DEV(SKL_E3),
930 IMC_UNCORE_DEV(KBL_Y),
931 IMC_UNCORE_DEV(KBL_U),
932 IMC_UNCORE_DEV(KBL_UQ),
933 IMC_UNCORE_DEV(KBL_SD),
934 IMC_UNCORE_DEV(KBL_SQ),
935 IMC_UNCORE_DEV(KBL_HQ),
936 IMC_UNCORE_DEV(KBL_WQ),
937 IMC_UNCORE_DEV(CFL_2U),
938 IMC_UNCORE_DEV(CFL_4U),
939 IMC_UNCORE_DEV(CFL_4H),
940 IMC_UNCORE_DEV(CFL_6H),
941 IMC_UNCORE_DEV(CFL_2S_D),
942 IMC_UNCORE_DEV(CFL_4S_D),
943 IMC_UNCORE_DEV(CFL_6S_D),
944 IMC_UNCORE_DEV(CFL_8S_D),
945 IMC_UNCORE_DEV(CFL_4S_W),
946 IMC_UNCORE_DEV(CFL_6S_W),
947 IMC_UNCORE_DEV(CFL_8S_W),
948 IMC_UNCORE_DEV(CFL_4S_S),
949 IMC_UNCORE_DEV(CFL_6S_S),
950 IMC_UNCORE_DEV(CFL_8S_S),
951 IMC_UNCORE_DEV(AML_YD),
952 IMC_UNCORE_DEV(AML_YQ),
953 IMC_UNCORE_DEV(WHL_UQ),
954 IMC_UNCORE_DEV(WHL_4_UQ),
955 IMC_UNCORE_DEV(WHL_UD),
956 IMC_UNCORE_DEV(CML_H1),
957 IMC_UNCORE_DEV(CML_H2),
958 IMC_UNCORE_DEV(CML_H3),
959 IMC_UNCORE_DEV(CML_U1),
960 IMC_UNCORE_DEV(CML_U2),
961 IMC_UNCORE_DEV(CML_U3),
962 IMC_UNCORE_DEV(CML_S1),
963 IMC_UNCORE_DEV(CML_S2),
964 IMC_UNCORE_DEV(CML_S3),
965 IMC_UNCORE_DEV(CML_S4),
966 IMC_UNCORE_DEV(CML_S5),
967 { /* end: all zeroes */ },
970 static const struct pci_device_id icl_uncore_pci_ids[] = {
971 IMC_UNCORE_DEV(ICL_U),
972 IMC_UNCORE_DEV(ICL_U2),
973 IMC_UNCORE_DEV(RKL_1),
974 IMC_UNCORE_DEV(RKL_2),
975 { /* end: all zeroes */ },
978 static struct pci_driver snb_uncore_pci_driver = {
979 .name = "snb_uncore",
980 .id_table = snb_uncore_pci_ids,
983 static struct pci_driver ivb_uncore_pci_driver = {
984 .name = "ivb_uncore",
985 .id_table = ivb_uncore_pci_ids,
988 static struct pci_driver hsw_uncore_pci_driver = {
989 .name = "hsw_uncore",
990 .id_table = hsw_uncore_pci_ids,
993 static struct pci_driver bdw_uncore_pci_driver = {
994 .name = "bdw_uncore",
995 .id_table = bdw_uncore_pci_ids,
998 static struct pci_driver skl_uncore_pci_driver = {
999 .name = "skl_uncore",
1000 .id_table = skl_uncore_pci_ids,
1003 static struct pci_driver icl_uncore_pci_driver = {
1004 .name = "icl_uncore",
1005 .id_table = icl_uncore_pci_ids,
1008 struct imc_uncore_pci_dev {
1010 struct pci_driver *driver;
1012 #define IMC_DEV(a, d) \
1013 { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
1015 static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
1016 IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
1017 IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
1018 IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
1019 IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
1020 IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
1021 IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
1022 IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
1023 IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
1024 IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
1025 IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
1026 IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
1027 IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
1028 IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver), /* Xeon E3 V5 Gen Core processor */
1029 IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
1030 IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
1031 IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
1032 IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
1033 IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
1034 IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core H Quad Core */
1035 IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S 4 cores Work Station */
1036 IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
1037 IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
1038 IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
1039 IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
1040 IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
1041 IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
1042 IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
1043 IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
1044 IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
1045 IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
1046 IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
1047 IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
1048 IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
1049 IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
1050 IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Dual Core */
1051 IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core Y Mobile Quad Core */
1052 IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
1053 IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Quad Core */
1054 IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U Mobile Dual Core */
1055 IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
1056 IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
1057 IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
1058 IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
1059 IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
1060 IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
1061 IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
1062 IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
1063 IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
1064 IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
1065 IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
1066 IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
1067 IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver), /* 10th Gen Core Mobile */
1068 IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
1069 IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
1070 { /* end marker */ }
1074 #define for_each_imc_pci_id(x, t) \
1075 for (x = (t); (x)->pci_id; x++)
1077 static struct pci_driver *imc_uncore_find_dev(void)
1079 const struct imc_uncore_pci_dev *p;
1082 for_each_imc_pci_id(p, desktop_imc_pci_ids) {
1083 ret = snb_pci2phy_map_init(p->pci_id);
1090 static int imc_uncore_pci_init(void)
1092 struct pci_driver *imc_drv = imc_uncore_find_dev();
1097 uncore_pci_uncores = snb_pci_uncores;
1098 uncore_pci_driver = imc_drv;
1103 int snb_uncore_pci_init(void)
1105 return imc_uncore_pci_init();
1108 int ivb_uncore_pci_init(void)
1110 return imc_uncore_pci_init();
1112 int hsw_uncore_pci_init(void)
1114 return imc_uncore_pci_init();
1117 int bdw_uncore_pci_init(void)
1119 return imc_uncore_pci_init();
1122 int skl_uncore_pci_init(void)
1124 return imc_uncore_pci_init();
1127 /* end of Sandy Bridge uncore support */
1129 /* Nehalem uncore support */
1130 static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
1132 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
1135 static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
1137 wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
1140 static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
1142 struct hw_perf_event *hwc = &event->hw;
1144 if (hwc->idx < UNCORE_PMC_IDX_FIXED)
1145 wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
1147 wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
1150 static struct attribute *nhm_uncore_formats_attr[] = {
1151 &format_attr_event.attr,
1152 &format_attr_umask.attr,
1153 &format_attr_edge.attr,
1154 &format_attr_inv.attr,
1155 &format_attr_cmask8.attr,
1159 static const struct attribute_group nhm_uncore_format_group = {
1161 .attrs = nhm_uncore_formats_attr,
1164 static struct uncore_event_desc nhm_uncore_events[] = {
1165 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
1166 INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
1167 INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
1168 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
1169 INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
1170 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
1171 INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
1172 INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
1173 INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
1174 { /* end: all zeroes */ },
1177 static struct intel_uncore_ops nhm_uncore_msr_ops = {
1178 .disable_box = nhm_uncore_msr_disable_box,
1179 .enable_box = nhm_uncore_msr_enable_box,
1180 .disable_event = snb_uncore_msr_disable_event,
1181 .enable_event = nhm_uncore_msr_enable_event,
1182 .read_counter = uncore_msr_read_counter,
1185 static struct intel_uncore_type nhm_uncore = {
1189 .perf_ctr_bits = 48,
1190 .fixed_ctr_bits = 48,
1191 .event_ctl = NHM_UNC_PERFEVTSEL0,
1192 .perf_ctr = NHM_UNC_UNCORE_PMC0,
1193 .fixed_ctr = NHM_UNC_FIXED_CTR,
1194 .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
1195 .event_mask = NHM_UNC_RAW_EVENT_MASK,
1196 .event_descs = nhm_uncore_events,
1197 .ops = &nhm_uncore_msr_ops,
1198 .format_group = &nhm_uncore_format_group,
1201 static struct intel_uncore_type *nhm_msr_uncores[] = {
1206 void nhm_uncore_cpu_init(void)
1208 uncore_msr_uncores = nhm_msr_uncores;
1211 /* end of Nehalem uncore support */
1213 /* Tiger Lake MMIO uncore support */
1215 static const struct pci_device_id tgl_uncore_pci_ids[] = {
1216 IMC_UNCORE_DEV(TGL_U1),
1217 IMC_UNCORE_DEV(TGL_U2),
1218 IMC_UNCORE_DEV(TGL_U3),
1219 IMC_UNCORE_DEV(TGL_U4),
1220 IMC_UNCORE_DEV(TGL_H),
1221 IMC_UNCORE_DEV(ADL_1),
1222 IMC_UNCORE_DEV(ADL_2),
1223 IMC_UNCORE_DEV(ADL_3),
1224 IMC_UNCORE_DEV(ADL_4),
1225 IMC_UNCORE_DEV(ADL_5),
1226 IMC_UNCORE_DEV(ADL_6),
1227 IMC_UNCORE_DEV(ADL_7),
1228 IMC_UNCORE_DEV(ADL_8),
1229 IMC_UNCORE_DEV(ADL_9),
1230 IMC_UNCORE_DEV(ADL_10),
1231 IMC_UNCORE_DEV(ADL_11),
1232 IMC_UNCORE_DEV(ADL_12),
1233 IMC_UNCORE_DEV(ADL_13),
1234 IMC_UNCORE_DEV(ADL_14),
1235 IMC_UNCORE_DEV(ADL_15),
1236 IMC_UNCORE_DEV(ADL_16),
1237 IMC_UNCORE_DEV(ADL_17),
1238 IMC_UNCORE_DEV(ADL_18),
1239 IMC_UNCORE_DEV(ADL_19),
1240 IMC_UNCORE_DEV(ADL_20),
1241 IMC_UNCORE_DEV(ADL_21),
1242 IMC_UNCORE_DEV(RPL_1),
1243 IMC_UNCORE_DEV(RPL_2),
1244 IMC_UNCORE_DEV(RPL_3),
1245 IMC_UNCORE_DEV(RPL_4),
1246 IMC_UNCORE_DEV(RPL_5),
1247 IMC_UNCORE_DEV(RPL_6),
1248 IMC_UNCORE_DEV(RPL_7),
1249 IMC_UNCORE_DEV(RPL_8),
1250 IMC_UNCORE_DEV(RPL_9),
1251 IMC_UNCORE_DEV(RPL_10),
1252 IMC_UNCORE_DEV(RPL_11),
1253 IMC_UNCORE_DEV(RPL_12),
1254 IMC_UNCORE_DEV(RPL_13),
1255 IMC_UNCORE_DEV(RPL_14),
1256 IMC_UNCORE_DEV(RPL_15),
1257 IMC_UNCORE_DEV(RPL_16),
1258 IMC_UNCORE_DEV(RPL_17),
1259 IMC_UNCORE_DEV(RPL_18),
1260 IMC_UNCORE_DEV(RPL_19),
1261 IMC_UNCORE_DEV(RPL_20),
1262 IMC_UNCORE_DEV(RPL_21),
1263 IMC_UNCORE_DEV(RPL_22),
1264 IMC_UNCORE_DEV(RPL_23),
1265 IMC_UNCORE_DEV(RPL_24),
1266 IMC_UNCORE_DEV(RPL_25),
1267 { /* end: all zeroes */ }
1270 enum perf_tgl_uncore_imc_freerunning_types {
1271 TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
1272 TGL_MMIO_UNCORE_IMC_DATA_READ,
1273 TGL_MMIO_UNCORE_IMC_DATA_WRITE,
1274 TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1277 static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
1278 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 },
1279 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 },
1280 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 },
1283 static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
1284 [TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 },
1285 [TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 },
1286 [TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 },
1289 static struct uncore_event_desc tgl_uncore_imc_events[] = {
1290 INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"),
1291 INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"),
1292 INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"),
1294 INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"),
1295 INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"),
1296 INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"),
1298 INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"),
1299 INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"),
1300 INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"),
1302 { /* end: all zeroes */ }
1305 static struct pci_dev *tgl_uncore_get_mc_dev(void)
1307 const struct pci_device_id *ids = tgl_uncore_pci_ids;
1308 struct pci_dev *mc_dev = NULL;
1310 while (ids && ids->vendor) {
1311 mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
1320 #define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
1321 #define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
1323 static void __uncore_imc_init_box(struct intel_uncore_box *box,
1324 unsigned int base_offset)
1326 struct pci_dev *pdev = tgl_uncore_get_mc_dev();
1327 struct intel_uncore_pmu *pmu = box->pmu;
1328 struct intel_uncore_type *type = pmu->type;
1329 resource_size_t addr;
1333 pr_warn("perf uncore: Cannot find matched IMC device.\n");
1337 pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
1338 /* MCHBAR is disabled */
1339 if (!(mch_bar & BIT(0))) {
1340 pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
1344 addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
1346 #ifdef CONFIG_PHYS_ADDR_T_64BIT
1347 pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
1348 addr |= ((resource_size_t)mch_bar << 32);
1351 addr += base_offset;
1352 box->io_addr = ioremap(addr, type->mmio_map_size);
1354 pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
1357 static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1359 __uncore_imc_init_box(box, 0);
1362 static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
1363 .init_box = tgl_uncore_imc_freerunning_init_box,
1364 .exit_box = uncore_mmio_exit_box,
1365 .read_counter = uncore_mmio_read_counter,
1366 .hw_config = uncore_freerunning_hw_config,
1369 static struct attribute *tgl_uncore_imc_formats_attr[] = {
1370 &format_attr_event.attr,
1371 &format_attr_umask.attr,
1375 static const struct attribute_group tgl_uncore_imc_format_group = {
1377 .attrs = tgl_uncore_imc_formats_attr,
1380 static struct intel_uncore_type tgl_uncore_imc_free_running = {
1381 .name = "imc_free_running",
1384 .num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1385 .mmio_map_size = TGL_UNCORE_PCI_IMC_MAP_SIZE,
1386 .freerunning = tgl_uncore_imc_freerunning,
1387 .ops = &tgl_uncore_imc_freerunning_ops,
1388 .event_descs = tgl_uncore_imc_events,
1389 .format_group = &tgl_uncore_imc_format_group,
1392 static struct intel_uncore_type *tgl_mmio_uncores[] = {
1393 &tgl_uncore_imc_free_running,
1397 void tgl_l_uncore_mmio_init(void)
1399 tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
1400 uncore_mmio_uncores = tgl_mmio_uncores;
1403 void tgl_uncore_mmio_init(void)
1405 uncore_mmio_uncores = tgl_mmio_uncores;
1408 /* end of Tiger Lake MMIO uncore support */
1410 /* Alder Lake MMIO uncore support */
1411 #define ADL_UNCORE_IMC_BASE 0xd900
1412 #define ADL_UNCORE_IMC_MAP_SIZE 0x200
1413 #define ADL_UNCORE_IMC_CTR 0xe8
1414 #define ADL_UNCORE_IMC_CTRL 0xd0
1415 #define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0
1416 #define ADL_UNCORE_IMC_BOX_CTL 0xc4
1417 #define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800
1418 #define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100
1420 #define ADL_UNCORE_IMC_CTL_FRZ (1 << 0)
1421 #define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1)
1422 #define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2)
1423 #define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \
1424 ADL_UNCORE_IMC_CTL_RST_CTRS)
1426 static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
1428 __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
1430 /* The global control in MC1 can control both MCs. */
1431 if (box->io_addr && (box->pmu->pmu_idx == 1))
1432 writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
1435 static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
1440 writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
1443 static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
1448 writel(0, box->io_addr + uncore_mmio_box_ctl(box));
1451 static struct intel_uncore_ops adl_uncore_mmio_ops = {
1452 .init_box = adl_uncore_imc_init_box,
1453 .exit_box = uncore_mmio_exit_box,
1454 .disable_box = adl_uncore_mmio_disable_box,
1455 .enable_box = adl_uncore_mmio_enable_box,
1456 .disable_event = intel_generic_uncore_mmio_disable_event,
1457 .enable_event = intel_generic_uncore_mmio_enable_event,
1458 .read_counter = uncore_mmio_read_counter,
1461 #define ADL_UNC_CTL_CHMASK_MASK 0x00000f00
1462 #define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
1463 ADL_UNC_CTL_CHMASK_MASK | \
1464 SNB_UNC_CTL_EDGE_DET)
1466 static struct attribute *adl_uncore_imc_formats_attr[] = {
1467 &format_attr_event.attr,
1468 &format_attr_chmask.attr,
1469 &format_attr_edge.attr,
1473 static const struct attribute_group adl_uncore_imc_format_group = {
1475 .attrs = adl_uncore_imc_formats_attr,
1478 static struct intel_uncore_type adl_uncore_imc = {
1482 .perf_ctr_bits = 64,
1483 .perf_ctr = ADL_UNCORE_IMC_CTR,
1484 .event_ctl = ADL_UNCORE_IMC_CTRL,
1485 .event_mask = ADL_UNC_IMC_EVENT_MASK,
1486 .box_ctl = ADL_UNCORE_IMC_BOX_CTL,
1488 .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE,
1489 .ops = &adl_uncore_mmio_ops,
1490 .format_group = &adl_uncore_imc_format_group,
1493 enum perf_adl_uncore_imc_freerunning_types {
1494 ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
1495 ADL_MMIO_UNCORE_IMC_DATA_READ,
1496 ADL_MMIO_UNCORE_IMC_DATA_WRITE,
1497 ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
1500 static struct freerunning_counters adl_uncore_imc_freerunning[] = {
1501 [ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 },
1502 [ADL_MMIO_UNCORE_IMC_DATA_READ] = { 0x58, 0x0, 0x0, 1, 64 },
1503 [ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xA0, 0x0, 0x0, 1, 64 },
1506 static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
1508 __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
1511 static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
1512 .init_box = adl_uncore_imc_freerunning_init_box,
1513 .exit_box = uncore_mmio_exit_box,
1514 .read_counter = uncore_mmio_read_counter,
1515 .hw_config = uncore_freerunning_hw_config,
1518 static struct intel_uncore_type adl_uncore_imc_free_running = {
1519 .name = "imc_free_running",
1522 .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
1523 .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
1524 .freerunning = adl_uncore_imc_freerunning,
1525 .ops = &adl_uncore_imc_freerunning_ops,
1526 .event_descs = tgl_uncore_imc_events,
1527 .format_group = &tgl_uncore_imc_format_group,
1530 static struct intel_uncore_type *adl_mmio_uncores[] = {
1532 &adl_uncore_imc_free_running,
1536 void adl_uncore_mmio_init(void)
1538 uncore_mmio_uncores = adl_mmio_uncores;
1541 /* end of Alder Lake MMIO uncore support */