]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
46866b59 | 2 | /* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */ |
92553e40 | 3 | #include "uncore.h" |
92807ffd | 4 | |
0140e614 SR |
5 | /* Uncore IMC PCI IDs */ |
6 | #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 | |
7 | #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 | |
8 | #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 | |
9 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | |
80bcffb3 | 10 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 |
a41f3c8c | 11 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 |
d786810b KL |
12 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 |
13 | #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c | |
14 | #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 | |
15 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 | |
16 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f | |
17 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f | |
c10a8de0 KL |
18 | #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c |
19 | #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 | |
20 | #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 | |
21 | #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f | |
22 | #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f | |
23 | #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc | |
24 | #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 | |
25 | #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 | |
26 | #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 | |
27 | #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f | |
28 | #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f | |
29 | #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 | |
30 | #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 | |
31 | #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 | |
32 | #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 | |
33 | #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 | |
34 | #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 | |
35 | #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca | |
36 | #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 | |
80bcffb3 | 37 | |
92807ffd YZ |
38 | /* SNB event control */ |
39 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | |
40 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | |
41 | #define SNB_UNC_CTL_EDGE_DET (1 << 18) | |
42 | #define SNB_UNC_CTL_EN (1 << 22) | |
43 | #define SNB_UNC_CTL_INVERT (1 << 23) | |
44 | #define SNB_UNC_CTL_CMASK_MASK 0x1f000000 | |
45 | #define NHM_UNC_CTL_CMASK_MASK 0xff000000 | |
46 | #define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0) | |
47 | ||
48 | #define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
49 | SNB_UNC_CTL_UMASK_MASK | \ | |
50 | SNB_UNC_CTL_EDGE_DET | \ | |
51 | SNB_UNC_CTL_INVERT | \ | |
52 | SNB_UNC_CTL_CMASK_MASK) | |
53 | ||
54 | #define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \ | |
55 | SNB_UNC_CTL_UMASK_MASK | \ | |
56 | SNB_UNC_CTL_EDGE_DET | \ | |
57 | SNB_UNC_CTL_INVERT | \ | |
58 | NHM_UNC_CTL_CMASK_MASK) | |
59 | ||
60 | /* SNB global control register */ | |
61 | #define SNB_UNC_PERF_GLOBAL_CTL 0x391 | |
62 | #define SNB_UNC_FIXED_CTR_CTRL 0x394 | |
63 | #define SNB_UNC_FIXED_CTR 0x395 | |
64 | ||
65 | /* SNB uncore global control */ | |
66 | #define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1) | |
67 | #define SNB_UNC_GLOBAL_CTL_EN (1 << 29) | |
68 | ||
69 | /* SNB Cbo register */ | |
70 | #define SNB_UNC_CBO_0_PERFEVTSEL0 0x700 | |
71 | #define SNB_UNC_CBO_0_PER_CTR0 0x706 | |
72 | #define SNB_UNC_CBO_MSR_OFFSET 0x10 | |
73 | ||
e3a13192 AK |
74 | /* SNB ARB register */ |
75 | #define SNB_UNC_ARB_PER_CTR0 0x3b0 | |
76 | #define SNB_UNC_ARB_PERFEVTSEL0 0x3b2 | |
77 | #define SNB_UNC_ARB_MSR_OFFSET 0x10 | |
78 | ||
92807ffd YZ |
79 | /* NHM global control register */ |
80 | #define NHM_UNC_PERF_GLOBAL_CTL 0x391 | |
81 | #define NHM_UNC_FIXED_CTR 0x394 | |
82 | #define NHM_UNC_FIXED_CTR_CTRL 0x395 | |
83 | ||
84 | /* NHM uncore global control */ | |
85 | #define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1) | |
86 | #define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) | |
87 | ||
88 | /* NHM uncore register */ | |
89 | #define NHM_UNC_PERFEVTSEL0 0x3c0 | |
90 | #define NHM_UNC_UNCORE_PMC0 0x3b0 | |
91 | ||
46866b59 KL |
92 | /* SKL uncore global control */ |
93 | #define SKL_UNC_PERF_GLOBAL_CTL 0xe01 | |
94 | #define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1) | |
95 | ||
92807ffd YZ |
96 | DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); |
97 | DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); | |
98 | DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); | |
99 | DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); | |
100 | DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28"); | |
101 | DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31"); | |
102 | ||
103 | /* Sandy Bridge uncore support */ | |
104 | static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
105 | { | |
106 | struct hw_perf_event *hwc = &event->hw; | |
107 | ||
108 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
109 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
110 | else | |
111 | wrmsrl(hwc->config_base, SNB_UNC_CTL_EN); | |
112 | } | |
113 | ||
114 | static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
115 | { | |
116 | wrmsrl(event->hw.config_base, 0); | |
117 | } | |
118 | ||
119 | static void snb_uncore_msr_init_box(struct intel_uncore_box *box) | |
120 | { | |
121 | if (box->pmu->pmu_idx == 0) { | |
122 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
123 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
124 | } | |
125 | } | |
126 | ||
95f3be79 KL |
127 | static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) |
128 | { | |
129 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, | |
130 | SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL); | |
131 | } | |
132 | ||
a46195f1 TG |
133 | static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) |
134 | { | |
135 | if (box->pmu->pmu_idx == 0) | |
136 | wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0); | |
137 | } | |
138 | ||
92807ffd YZ |
139 | static struct uncore_event_desc snb_uncore_events[] = { |
140 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
141 | { /* end: all zeroes */ }, | |
142 | }; | |
143 | ||
144 | static struct attribute *snb_uncore_formats_attr[] = { | |
145 | &format_attr_event.attr, | |
146 | &format_attr_umask.attr, | |
147 | &format_attr_edge.attr, | |
148 | &format_attr_inv.attr, | |
149 | &format_attr_cmask5.attr, | |
150 | NULL, | |
151 | }; | |
152 | ||
45bd07ad | 153 | static const struct attribute_group snb_uncore_format_group = { |
92807ffd YZ |
154 | .name = "format", |
155 | .attrs = snb_uncore_formats_attr, | |
156 | }; | |
157 | ||
158 | static struct intel_uncore_ops snb_uncore_msr_ops = { | |
159 | .init_box = snb_uncore_msr_init_box, | |
95f3be79 | 160 | .enable_box = snb_uncore_msr_enable_box, |
a46195f1 | 161 | .exit_box = snb_uncore_msr_exit_box, |
92807ffd YZ |
162 | .disable_event = snb_uncore_msr_disable_event, |
163 | .enable_event = snb_uncore_msr_enable_event, | |
164 | .read_counter = uncore_msr_read_counter, | |
165 | }; | |
166 | ||
e3a13192 | 167 | static struct event_constraint snb_uncore_arb_constraints[] = { |
92807ffd YZ |
168 | UNCORE_EVENT_CONSTRAINT(0x80, 0x1), |
169 | UNCORE_EVENT_CONSTRAINT(0x83, 0x1), | |
170 | EVENT_CONSTRAINT_END | |
171 | }; | |
172 | ||
173 | static struct intel_uncore_type snb_uncore_cbox = { | |
174 | .name = "cbox", | |
175 | .num_counters = 2, | |
176 | .num_boxes = 4, | |
177 | .perf_ctr_bits = 44, | |
178 | .fixed_ctr_bits = 48, | |
179 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
180 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
181 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
182 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
183 | .single_fixed = 1, | |
184 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
185 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
92807ffd YZ |
186 | .ops = &snb_uncore_msr_ops, |
187 | .format_group = &snb_uncore_format_group, | |
188 | .event_descs = snb_uncore_events, | |
189 | }; | |
190 | ||
e3a13192 AK |
191 | static struct intel_uncore_type snb_uncore_arb = { |
192 | .name = "arb", | |
193 | .num_counters = 2, | |
194 | .num_boxes = 1, | |
195 | .perf_ctr_bits = 44, | |
196 | .perf_ctr = SNB_UNC_ARB_PER_CTR0, | |
197 | .event_ctl = SNB_UNC_ARB_PERFEVTSEL0, | |
198 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
199 | .msr_offset = SNB_UNC_ARB_MSR_OFFSET, | |
200 | .constraints = snb_uncore_arb_constraints, | |
201 | .ops = &snb_uncore_msr_ops, | |
202 | .format_group = &snb_uncore_format_group, | |
203 | }; | |
204 | ||
92807ffd YZ |
205 | static struct intel_uncore_type *snb_msr_uncores[] = { |
206 | &snb_uncore_cbox, | |
e3a13192 | 207 | &snb_uncore_arb, |
92807ffd YZ |
208 | NULL, |
209 | }; | |
210 | ||
211 | void snb_uncore_cpu_init(void) | |
212 | { | |
213 | uncore_msr_uncores = snb_msr_uncores; | |
214 | if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
215 | snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
216 | } | |
217 | ||
46866b59 KL |
218 | static void skl_uncore_msr_init_box(struct intel_uncore_box *box) |
219 | { | |
220 | if (box->pmu->pmu_idx == 0) { | |
221 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
222 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
223 | } | |
4d47d640 KL |
224 | |
225 | /* The 8th CBOX has different MSR space */ | |
226 | if (box->pmu->pmu_idx == 7) | |
227 | __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); | |
46866b59 KL |
228 | } |
229 | ||
95f3be79 KL |
230 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) |
231 | { | |
232 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | |
233 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | |
234 | } | |
235 | ||
46866b59 KL |
236 | static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) |
237 | { | |
238 | if (box->pmu->pmu_idx == 0) | |
239 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0); | |
240 | } | |
241 | ||
242 | static struct intel_uncore_ops skl_uncore_msr_ops = { | |
243 | .init_box = skl_uncore_msr_init_box, | |
95f3be79 | 244 | .enable_box = skl_uncore_msr_enable_box, |
46866b59 KL |
245 | .exit_box = skl_uncore_msr_exit_box, |
246 | .disable_event = snb_uncore_msr_disable_event, | |
247 | .enable_event = snb_uncore_msr_enable_event, | |
248 | .read_counter = uncore_msr_read_counter, | |
249 | }; | |
250 | ||
251 | static struct intel_uncore_type skl_uncore_cbox = { | |
252 | .name = "cbox", | |
253 | .num_counters = 4, | |
4d47d640 | 254 | .num_boxes = 8, |
46866b59 KL |
255 | .perf_ctr_bits = 44, |
256 | .fixed_ctr_bits = 48, | |
257 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | |
258 | .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0, | |
259 | .fixed_ctr = SNB_UNC_FIXED_CTR, | |
260 | .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL, | |
261 | .single_fixed = 1, | |
262 | .event_mask = SNB_UNC_RAW_EVENT_MASK, | |
263 | .msr_offset = SNB_UNC_CBO_MSR_OFFSET, | |
264 | .ops = &skl_uncore_msr_ops, | |
265 | .format_group = &snb_uncore_format_group, | |
266 | .event_descs = snb_uncore_events, | |
267 | }; | |
268 | ||
269 | static struct intel_uncore_type *skl_msr_uncores[] = { | |
270 | &skl_uncore_cbox, | |
271 | &snb_uncore_arb, | |
272 | NULL, | |
273 | }; | |
274 | ||
275 | void skl_uncore_cpu_init(void) | |
276 | { | |
277 | uncore_msr_uncores = skl_msr_uncores; | |
278 | if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | |
279 | skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | |
280 | snb_uncore_arb.ops = &skl_uncore_msr_ops; | |
281 | } | |
282 | ||
92807ffd YZ |
283 | enum { |
284 | SNB_PCI_UNCORE_IMC, | |
285 | }; | |
286 | ||
287 | static struct uncore_event_desc snb_uncore_imc_events[] = { | |
288 | INTEL_UNCORE_EVENT_DESC(data_reads, "event=0x01"), | |
289 | INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"), | |
290 | INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"), | |
291 | ||
292 | INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"), | |
293 | INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"), | |
294 | INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"), | |
295 | ||
296 | { /* end: all zeroes */ }, | |
297 | }; | |
298 | ||
299 | #define SNB_UNCORE_PCI_IMC_EVENT_MASK 0xff | |
300 | #define SNB_UNCORE_PCI_IMC_BAR_OFFSET 0x48 | |
301 | ||
302 | /* page size multiple covering all config regs */ | |
303 | #define SNB_UNCORE_PCI_IMC_MAP_SIZE 0x6000 | |
304 | ||
305 | #define SNB_UNCORE_PCI_IMC_DATA_READS 0x1 | |
306 | #define SNB_UNCORE_PCI_IMC_DATA_READS_BASE 0x5050 | |
307 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES 0x2 | |
308 | #define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE 0x5054 | |
309 | #define SNB_UNCORE_PCI_IMC_CTR_BASE SNB_UNCORE_PCI_IMC_DATA_READS_BASE | |
310 | ||
9aae1780 KL |
311 | enum perf_snb_uncore_imc_freerunning_types { |
312 | SNB_PCI_UNCORE_IMC_DATA = 0, | |
313 | SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, | |
314 | }; | |
315 | ||
316 | static struct freerunning_counters snb_uncore_imc_freerunning[] = { | |
317 | [SNB_PCI_UNCORE_IMC_DATA] = { SNB_UNCORE_PCI_IMC_DATA_READS_BASE, 0x4, 0x0, 2, 32 }, | |
318 | }; | |
319 | ||
92807ffd YZ |
320 | static struct attribute *snb_uncore_imc_formats_attr[] = { |
321 | &format_attr_event.attr, | |
322 | NULL, | |
323 | }; | |
324 | ||
45bd07ad | 325 | static const struct attribute_group snb_uncore_imc_format_group = { |
92807ffd YZ |
326 | .name = "format", |
327 | .attrs = snb_uncore_imc_formats_attr, | |
328 | }; | |
329 | ||
330 | static void snb_uncore_imc_init_box(struct intel_uncore_box *box) | |
331 | { | |
332 | struct pci_dev *pdev = box->pci_dev; | |
333 | int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET; | |
334 | resource_size_t addr; | |
335 | u32 pci_dword; | |
336 | ||
337 | pci_read_config_dword(pdev, where, &pci_dword); | |
338 | addr = pci_dword; | |
339 | ||
340 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | |
341 | pci_read_config_dword(pdev, where + 4, &pci_dword); | |
342 | addr |= ((resource_size_t)pci_dword << 32); | |
343 | #endif | |
344 | ||
345 | addr &= ~(PAGE_SIZE - 1); | |
346 | ||
347 | box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); | |
348 | box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; | |
349 | } | |
350 | ||
a46195f1 TG |
351 | static void snb_uncore_imc_exit_box(struct intel_uncore_box *box) |
352 | { | |
353 | iounmap(box->io_addr); | |
354 | } | |
355 | ||
92807ffd YZ |
356 | static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) |
357 | {} | |
358 | ||
359 | static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) | |
360 | {} | |
361 | ||
362 | static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
363 | {} | |
364 | ||
365 | static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) | |
366 | {} | |
367 | ||
368 | static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event) | |
369 | { | |
370 | struct hw_perf_event *hwc = &event->hw; | |
371 | ||
372 | return (u64)*(unsigned int *)(box->io_addr + hwc->event_base); | |
373 | } | |
374 | ||
375 | /* | |
9aae1780 KL |
376 | * Keep the custom event_init() function compatible with old event |
377 | * encoding for free running counters. | |
92807ffd YZ |
378 | */ |
379 | static int snb_uncore_imc_event_init(struct perf_event *event) | |
380 | { | |
381 | struct intel_uncore_pmu *pmu; | |
382 | struct intel_uncore_box *box; | |
383 | struct hw_perf_event *hwc = &event->hw; | |
384 | u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK; | |
385 | int idx, base; | |
386 | ||
387 | if (event->attr.type != event->pmu->type) | |
388 | return -ENOENT; | |
389 | ||
390 | pmu = uncore_event_to_pmu(event); | |
391 | /* no device found for this pmu */ | |
392 | if (pmu->func_id < 0) | |
393 | return -ENOENT; | |
394 | ||
395 | /* Sampling not supported yet */ | |
396 | if (hwc->sample_period) | |
397 | return -EINVAL; | |
398 | ||
399 | /* unsupported modes and filters */ | |
400 | if (event->attr.exclude_user || | |
401 | event->attr.exclude_kernel || | |
402 | event->attr.exclude_hv || | |
403 | event->attr.exclude_idle || | |
404 | event->attr.exclude_host || | |
405 | event->attr.exclude_guest || | |
406 | event->attr.sample_period) /* no sampling */ | |
407 | return -EINVAL; | |
408 | ||
409 | /* | |
410 | * Place all uncore events for a particular physical package | |
411 | * onto a single cpu | |
412 | */ | |
413 | if (event->cpu < 0) | |
414 | return -EINVAL; | |
415 | ||
416 | /* check only supported bits are set */ | |
417 | if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK) | |
418 | return -EINVAL; | |
419 | ||
420 | box = uncore_pmu_to_box(pmu, event->cpu); | |
421 | if (!box || box->cpu < 0) | |
422 | return -EINVAL; | |
423 | ||
424 | event->cpu = box->cpu; | |
1f2569fa | 425 | event->pmu_private = box; |
92807ffd | 426 | |
e64cd6f7 DCC |
427 | event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; |
428 | ||
92807ffd YZ |
429 | event->hw.idx = -1; |
430 | event->hw.last_tag = ~0ULL; | |
431 | event->hw.extra_reg.idx = EXTRA_REG_NONE; | |
432 | event->hw.branch_reg.idx = EXTRA_REG_NONE; | |
433 | /* | |
434 | * check event is known (whitelist, determines counter) | |
435 | */ | |
436 | switch (cfg) { | |
437 | case SNB_UNCORE_PCI_IMC_DATA_READS: | |
438 | base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE; | |
9aae1780 | 439 | idx = UNCORE_PMC_IDX_FREERUNNING; |
92807ffd YZ |
440 | break; |
441 | case SNB_UNCORE_PCI_IMC_DATA_WRITES: | |
442 | base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE; | |
9aae1780 | 443 | idx = UNCORE_PMC_IDX_FREERUNNING; |
92807ffd YZ |
444 | break; |
445 | default: | |
446 | return -EINVAL; | |
447 | } | |
448 | ||
449 | /* must be done before validate_group */ | |
450 | event->hw.event_base = base; | |
451 | event->hw.config = cfg; | |
452 | event->hw.idx = idx; | |
453 | ||
454 | /* no group validation needed, we have free running counters */ | |
455 | ||
456 | return 0; | |
457 | } | |
458 | ||
459 | static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) | |
460 | { | |
461 | return 0; | |
462 | } | |
463 | ||
77af0037 | 464 | int snb_pci2phy_map_init(int devid) |
92807ffd YZ |
465 | { |
466 | struct pci_dev *dev = NULL; | |
712df65c TI |
467 | struct pci2phy_map *map; |
468 | int bus, segment; | |
92807ffd YZ |
469 | |
470 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev); | |
471 | if (!dev) | |
472 | return -ENOTTY; | |
473 | ||
474 | bus = dev->bus->number; | |
712df65c TI |
475 | segment = pci_domain_nr(dev->bus); |
476 | ||
477 | raw_spin_lock(&pci2phy_map_lock); | |
478 | map = __find_pci2phy_map(segment); | |
479 | if (!map) { | |
480 | raw_spin_unlock(&pci2phy_map_lock); | |
481 | pci_dev_put(dev); | |
482 | return -ENOMEM; | |
483 | } | |
484 | map->pbus_to_physid[bus] = 0; | |
485 | raw_spin_unlock(&pci2phy_map_lock); | |
92807ffd YZ |
486 | |
487 | pci_dev_put(dev); | |
488 | ||
489 | return 0; | |
490 | } | |
491 | ||
492 | static struct pmu snb_uncore_imc_pmu = { | |
493 | .task_ctx_nr = perf_invalid_context, | |
494 | .event_init = snb_uncore_imc_event_init, | |
9aae1780 KL |
495 | .add = uncore_pmu_event_add, |
496 | .del = uncore_pmu_event_del, | |
497 | .start = uncore_pmu_event_start, | |
498 | .stop = uncore_pmu_event_stop, | |
499 | .read = uncore_pmu_event_read, | |
92807ffd YZ |
500 | }; |
501 | ||
502 | static struct intel_uncore_ops snb_uncore_imc_ops = { | |
503 | .init_box = snb_uncore_imc_init_box, | |
a46195f1 | 504 | .exit_box = snb_uncore_imc_exit_box, |
92807ffd YZ |
505 | .enable_box = snb_uncore_imc_enable_box, |
506 | .disable_box = snb_uncore_imc_disable_box, | |
507 | .disable_event = snb_uncore_imc_disable_event, | |
508 | .enable_event = snb_uncore_imc_enable_event, | |
509 | .hw_config = snb_uncore_imc_hw_config, | |
510 | .read_counter = snb_uncore_imc_read_counter, | |
511 | }; | |
512 | ||
513 | static struct intel_uncore_type snb_uncore_imc = { | |
514 | .name = "imc", | |
515 | .num_counters = 2, | |
516 | .num_boxes = 1, | |
9aae1780 KL |
517 | .num_freerunning_types = SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX, |
518 | .freerunning = snb_uncore_imc_freerunning, | |
92807ffd YZ |
519 | .event_descs = snb_uncore_imc_events, |
520 | .format_group = &snb_uncore_imc_format_group, | |
92807ffd YZ |
521 | .ops = &snb_uncore_imc_ops, |
522 | .pmu = &snb_uncore_imc_pmu, | |
523 | }; | |
524 | ||
525 | static struct intel_uncore_type *snb_pci_uncores[] = { | |
526 | [SNB_PCI_UNCORE_IMC] = &snb_uncore_imc, | |
527 | NULL, | |
528 | }; | |
529 | ||
83bc90e1 | 530 | static const struct pci_device_id snb_uncore_pci_ids[] = { |
92807ffd YZ |
531 | { /* IMC */ |
532 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SNB_IMC), | |
533 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
534 | }, | |
535 | { /* end: all zeroes */ }, | |
536 | }; | |
537 | ||
83bc90e1 | 538 | static const struct pci_device_id ivb_uncore_pci_ids[] = { |
92807ffd YZ |
539 | { /* IMC */ |
540 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_IMC), | |
541 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
542 | }, | |
521e8bac SE |
543 | { /* IMC */ |
544 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_E3_IMC), | |
545 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
546 | }, | |
92807ffd YZ |
547 | { /* end: all zeroes */ }, |
548 | }; | |
549 | ||
83bc90e1 | 550 | static const struct pci_device_id hsw_uncore_pci_ids[] = { |
92807ffd YZ |
551 | { /* IMC */ |
552 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | |
553 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
554 | }, | |
80bcffb3 SR |
555 | { /* IMC */ |
556 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), | |
557 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
558 | }, | |
92807ffd YZ |
559 | { /* end: all zeroes */ }, |
560 | }; | |
561 | ||
a41f3c8c SE |
562 | static const struct pci_device_id bdw_uncore_pci_ids[] = { |
563 | { /* IMC */ | |
564 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC), | |
565 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
566 | }, | |
567 | { /* end: all zeroes */ }, | |
568 | }; | |
569 | ||
0e1eb0a1 SE |
570 | static const struct pci_device_id skl_uncore_pci_ids[] = { |
571 | { /* IMC */ | |
d786810b | 572 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), |
0e1eb0a1 SE |
573 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
574 | }, | |
46866b59 KL |
575 | { /* IMC */ |
576 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), | |
577 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
578 | }, | |
d786810b KL |
579 | { /* IMC */ |
580 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), | |
581 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
582 | }, | |
583 | { /* IMC */ | |
584 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), | |
585 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
586 | }, | |
587 | { /* IMC */ | |
588 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), | |
589 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
590 | }, | |
591 | { /* IMC */ | |
592 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), | |
593 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
594 | }, | |
c10a8de0 KL |
595 | { /* IMC */ |
596 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), | |
597 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
598 | }, | |
599 | { /* IMC */ | |
600 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), | |
601 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
602 | }, | |
603 | { /* IMC */ | |
604 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), | |
605 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
606 | }, | |
607 | { /* IMC */ | |
608 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), | |
609 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
610 | }, | |
611 | { /* IMC */ | |
612 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), | |
613 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
614 | }, | |
615 | { /* IMC */ | |
616 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), | |
617 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
618 | }, | |
619 | { /* IMC */ | |
620 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), | |
621 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
622 | }, | |
623 | { /* IMC */ | |
624 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), | |
625 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
626 | }, | |
627 | { /* IMC */ | |
628 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), | |
629 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
630 | }, | |
631 | { /* IMC */ | |
632 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), | |
633 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
634 | }, | |
635 | { /* IMC */ | |
636 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), | |
637 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
638 | }, | |
639 | { /* IMC */ | |
640 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), | |
641 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
642 | }, | |
643 | { /* IMC */ | |
644 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), | |
645 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
646 | }, | |
647 | { /* IMC */ | |
648 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), | |
649 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
650 | }, | |
651 | { /* IMC */ | |
652 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), | |
653 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
654 | }, | |
655 | { /* IMC */ | |
656 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), | |
657 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
658 | }, | |
659 | { /* IMC */ | |
660 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), | |
661 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
662 | }, | |
663 | { /* IMC */ | |
664 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), | |
665 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
666 | }, | |
667 | { /* IMC */ | |
668 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), | |
669 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | |
670 | }, | |
0e1eb0a1 SE |
671 | { /* end: all zeroes */ }, |
672 | }; | |
673 | ||
92807ffd YZ |
674 | static struct pci_driver snb_uncore_pci_driver = { |
675 | .name = "snb_uncore", | |
676 | .id_table = snb_uncore_pci_ids, | |
677 | }; | |
678 | ||
679 | static struct pci_driver ivb_uncore_pci_driver = { | |
680 | .name = "ivb_uncore", | |
681 | .id_table = ivb_uncore_pci_ids, | |
682 | }; | |
683 | ||
684 | static struct pci_driver hsw_uncore_pci_driver = { | |
685 | .name = "hsw_uncore", | |
686 | .id_table = hsw_uncore_pci_ids, | |
687 | }; | |
688 | ||
a41f3c8c SE |
689 | static struct pci_driver bdw_uncore_pci_driver = { |
690 | .name = "bdw_uncore", | |
691 | .id_table = bdw_uncore_pci_ids, | |
692 | }; | |
693 | ||
0e1eb0a1 SE |
694 | static struct pci_driver skl_uncore_pci_driver = { |
695 | .name = "skl_uncore", | |
696 | .id_table = skl_uncore_pci_ids, | |
697 | }; | |
698 | ||
521e8bac SE |
699 | struct imc_uncore_pci_dev { |
700 | __u32 pci_id; | |
701 | struct pci_driver *driver; | |
702 | }; | |
703 | #define IMC_DEV(a, d) \ | |
704 | { .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) } | |
705 | ||
706 | static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |
707 | IMC_DEV(SNB_IMC, &snb_uncore_pci_driver), | |
708 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ | |
709 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ | |
710 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | |
80bcffb3 | 711 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ |
a41f3c8c | 712 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ |
d786810b | 713 | IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ |
46866b59 | 714 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ |
d786810b KL |
715 | IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ |
716 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ | |
717 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ | |
718 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ | |
c10a8de0 KL |
719 | IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ |
720 | IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ | |
721 | IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ | |
722 | IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ | |
723 | IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ | |
724 | IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ | |
725 | IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ | |
726 | IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ | |
727 | IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ | |
728 | IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ | |
729 | IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ | |
730 | IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ | |
731 | IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ | |
732 | IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ | |
733 | IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ | |
734 | IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ | |
735 | IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ | |
736 | IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ | |
737 | IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ | |
521e8bac SE |
738 | { /* end marker */ } |
739 | }; | |
740 | ||
741 | ||
742 | #define for_each_imc_pci_id(x, t) \ | |
743 | for (x = (t); (x)->pci_id; x++) | |
744 | ||
745 | static struct pci_driver *imc_uncore_find_dev(void) | |
92807ffd | 746 | { |
521e8bac SE |
747 | const struct imc_uncore_pci_dev *p; |
748 | int ret; | |
749 | ||
750 | for_each_imc_pci_id(p, desktop_imc_pci_ids) { | |
751 | ret = snb_pci2phy_map_init(p->pci_id); | |
752 | if (ret == 0) | |
753 | return p->driver; | |
754 | } | |
755 | return NULL; | |
92807ffd YZ |
756 | } |
757 | ||
521e8bac | 758 | static int imc_uncore_pci_init(void) |
92807ffd | 759 | { |
521e8bac SE |
760 | struct pci_driver *imc_drv = imc_uncore_find_dev(); |
761 | ||
762 | if (!imc_drv) | |
763 | return -ENODEV; | |
764 | ||
92807ffd | 765 | uncore_pci_uncores = snb_pci_uncores; |
521e8bac SE |
766 | uncore_pci_driver = imc_drv; |
767 | ||
92807ffd YZ |
768 | return 0; |
769 | } | |
770 | ||
521e8bac SE |
771 | int snb_uncore_pci_init(void) |
772 | { | |
773 | return imc_uncore_pci_init(); | |
774 | } | |
775 | ||
776 | int ivb_uncore_pci_init(void) | |
777 | { | |
778 | return imc_uncore_pci_init(); | |
779 | } | |
92807ffd YZ |
780 | int hsw_uncore_pci_init(void) |
781 | { | |
521e8bac | 782 | return imc_uncore_pci_init(); |
92807ffd YZ |
783 | } |
784 | ||
a41f3c8c SE |
785 | int bdw_uncore_pci_init(void) |
786 | { | |
787 | return imc_uncore_pci_init(); | |
788 | } | |
789 | ||
0e1eb0a1 SE |
790 | int skl_uncore_pci_init(void) |
791 | { | |
792 | return imc_uncore_pci_init(); | |
793 | } | |
794 | ||
92807ffd YZ |
795 | /* end of Sandy Bridge uncore support */ |
796 | ||
797 | /* Nehalem uncore support */ | |
798 | static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) | |
799 | { | |
800 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0); | |
801 | } | |
802 | ||
803 | static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) | |
804 | { | |
805 | wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC); | |
806 | } | |
807 | ||
808 | static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) | |
809 | { | |
810 | struct hw_perf_event *hwc = &event->hw; | |
811 | ||
812 | if (hwc->idx < UNCORE_PMC_IDX_FIXED) | |
813 | wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN); | |
814 | else | |
815 | wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN); | |
816 | } | |
817 | ||
818 | static struct attribute *nhm_uncore_formats_attr[] = { | |
819 | &format_attr_event.attr, | |
820 | &format_attr_umask.attr, | |
821 | &format_attr_edge.attr, | |
822 | &format_attr_inv.attr, | |
823 | &format_attr_cmask8.attr, | |
824 | NULL, | |
825 | }; | |
826 | ||
45bd07ad | 827 | static const struct attribute_group nhm_uncore_format_group = { |
92807ffd YZ |
828 | .name = "format", |
829 | .attrs = nhm_uncore_formats_attr, | |
830 | }; | |
831 | ||
832 | static struct uncore_event_desc nhm_uncore_events[] = { | |
833 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), | |
834 | INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"), | |
835 | INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"), | |
836 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"), | |
837 | INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"), | |
838 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"), | |
839 | INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"), | |
840 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"), | |
841 | INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"), | |
842 | { /* end: all zeroes */ }, | |
843 | }; | |
844 | ||
845 | static struct intel_uncore_ops nhm_uncore_msr_ops = { | |
846 | .disable_box = nhm_uncore_msr_disable_box, | |
847 | .enable_box = nhm_uncore_msr_enable_box, | |
848 | .disable_event = snb_uncore_msr_disable_event, | |
849 | .enable_event = nhm_uncore_msr_enable_event, | |
850 | .read_counter = uncore_msr_read_counter, | |
851 | }; | |
852 | ||
853 | static struct intel_uncore_type nhm_uncore = { | |
854 | .name = "", | |
855 | .num_counters = 8, | |
856 | .num_boxes = 1, | |
857 | .perf_ctr_bits = 48, | |
858 | .fixed_ctr_bits = 48, | |
859 | .event_ctl = NHM_UNC_PERFEVTSEL0, | |
860 | .perf_ctr = NHM_UNC_UNCORE_PMC0, | |
861 | .fixed_ctr = NHM_UNC_FIXED_CTR, | |
862 | .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL, | |
863 | .event_mask = NHM_UNC_RAW_EVENT_MASK, | |
864 | .event_descs = nhm_uncore_events, | |
865 | .ops = &nhm_uncore_msr_ops, | |
866 | .format_group = &nhm_uncore_format_group, | |
867 | }; | |
868 | ||
869 | static struct intel_uncore_type *nhm_msr_uncores[] = { | |
870 | &nhm_uncore, | |
871 | NULL, | |
872 | }; | |
873 | ||
874 | void nhm_uncore_cpu_init(void) | |
875 | { | |
876 | uncore_msr_uncores = nhm_msr_uncores; | |
877 | } | |
878 | ||
879 | /* end of Nehalem uncore support */ |