]> Git Repo - linux.git/blame - drivers/edac/skx_edac.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux.git] / drivers / edac / skx_edac.c
CommitLineData
4ec656bd
TL
1/*
2 * EDAC driver for Intel(R) Xeon(R) Skylake processors
3 * Copyright (c) 2016, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
58ca9ac1
TL
17#include <linux/acpi.h>
18#include <linux/dmi.h>
4ec656bd
TL
19#include <linux/pci.h>
20#include <linux/pci_ids.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/edac.h>
24#include <linux/mmzone.h>
25#include <linux/smp.h>
26#include <linux/bitmap.h>
27#include <linux/math64.h>
28#include <linux/mod_devicetable.h>
ad6e1605 29#include <linux/adxl.h>
58ca9ac1 30#include <acpi/nfit.h>
4ec656bd 31#include <asm/cpu_device_id.h>
20f4d692 32#include <asm/intel-family.h>
4ec656bd
TL
33#include <asm/processor.h>
34#include <asm/mce.h>
35
78d88e8a 36#include "edac_module.h"
4ec656bd 37
301375e7 38#define EDAC_MOD_STR "skx_edac"
ad6e1605 39#define MSG_SIZE 1024
301375e7 40
4ec656bd
TL
41/*
42 * Debug macros
43 */
44#define skx_printk(level, fmt, arg...) \
45 edac_printk(level, "skx", fmt, ##arg)
46
47#define skx_mc_printk(mci, level, fmt, arg...) \
48 edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
49
50/*
51 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
52 */
53#define GET_BITFIELD(v, lo, hi) \
54 (((v) & GENMASK_ULL((hi), (lo))) >> (lo))
55
56static LIST_HEAD(skx_edac_list);
57
58static u64 skx_tolm, skx_tohm;
ad6e1605
QZ
59static char *skx_msg;
60static unsigned int nvdimm_count;
61
62enum {
63 INDEX_SOCKET,
64 INDEX_MEMCTRL,
65 INDEX_CHANNEL,
66 INDEX_DIMM,
67 INDEX_MAX
68};
69
70static const char * const component_names[] = {
71 [INDEX_SOCKET] = "ProcessorSocketId",
72 [INDEX_MEMCTRL] = "MemoryControllerId",
73 [INDEX_CHANNEL] = "ChannelId",
74 [INDEX_DIMM] = "DimmSlotId",
75};
76
77static int component_indices[ARRAY_SIZE(component_names)];
78static int adxl_component_count;
79static const char * const *adxl_component_names;
80static u64 *adxl_values;
81static char *adxl_msg;
4ec656bd
TL
82
83#define NUM_IMC 2 /* memory controllers per socket */
84#define NUM_CHANNELS 3 /* channels per memory controller */
85#define NUM_DIMMS 2 /* Max DIMMS per channel */
86
87#define MASK26 0x3FFFFFF /* Mask for 2^26 */
88#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
89
90/*
91 * Each cpu socket contains some pci devices that provide global
92 * information, and also some that are local to each of the two
93 * memory controllers on the die.
94 */
95struct skx_dev {
96 struct list_head list;
97 u8 bus[4];
88ae80aa 98 int seg;
4ec656bd
TL
99 struct pci_dev *sad_all;
100 struct pci_dev *util_all;
101 u32 mcroute;
102 struct skx_imc {
103 struct mem_ctl_info *mci;
104 u8 mc; /* system wide mc# */
105 u8 lmc; /* socket relative mc# */
106 u8 src_id, node_id;
107 struct skx_channel {
108 struct pci_dev *cdev;
109 struct skx_dimm {
110 u8 close_pg;
111 u8 bank_xor_enable;
112 u8 fine_grain_bank;
113 u8 rowbits;
114 u8 colbits;
115 } dimms[NUM_DIMMS];
116 } chan[NUM_CHANNELS];
117 } imc[NUM_IMC];
118};
119static int skx_num_sockets;
120
121struct skx_pvt {
122 struct skx_imc *imc;
123};
124
125struct decoded_addr {
126 struct skx_dev *dev;
127 u64 addr;
128 int socket;
129 int imc;
130 int channel;
131 u64 chan_addr;
132 int sktways;
133 int chanways;
134 int dimm;
135 int rank;
136 int channel_rank;
137 u64 rank_address;
138 int row;
139 int column;
140 int bank_address;
141 int bank_group;
142};
143
88ae80aa 144static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
4ec656bd
TL
145{
146 struct skx_dev *d;
147
148 list_for_each_entry(d, &skx_edac_list, list) {
88ae80aa 149 if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
4ec656bd
TL
150 return d;
151 }
152
153 return NULL;
154}
155
156enum munittype {
157 CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
158};
159
160struct munit {
161 u16 did;
162 u16 devfn[NUM_IMC];
163 u8 busidx;
164 u8 per_socket;
165 enum munittype mtype;
166};
167
168/*
169 * List of PCI device ids that we need together with some device
170 * number and function numbers to tell which memory controller the
171 * device belongs to.
172 */
173static const struct munit skx_all_munits[] = {
174 { 0x2054, { }, 1, 1, SAD_ALL },
175 { 0x2055, { }, 1, 1, UTIL_ALL },
176 { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
177 { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
178 { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
179 { 0x208e, { }, 1, 0, SAD },
180 { }
181};
182
183/*
184 * We use the per-socket device 0x2016 to count how many sockets are present,
185 * and to detemine which PCI buses are associated with each socket. Allocate
186 * and build the full list of all the skx_dev structures that we need here.
187 */
188static int get_all_bus_mappings(void)
189{
190 struct pci_dev *pdev, *prev;
191 struct skx_dev *d;
192 u32 reg;
193 int ndev = 0;
194
195 prev = NULL;
196 for (;;) {
197 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev);
198 if (!pdev)
199 break;
200 ndev++;
201 d = kzalloc(sizeof(*d), GFP_KERNEL);
202 if (!d) {
203 pci_dev_put(pdev);
204 return -ENOMEM;
205 }
88ae80aa 206 d->seg = pci_domain_nr(pdev->bus);
4ec656bd
TL
207 pci_read_config_dword(pdev, 0xCC, &reg);
208 d->bus[0] = GET_BITFIELD(reg, 0, 7);
209 d->bus[1] = GET_BITFIELD(reg, 8, 15);
210 d->bus[2] = GET_BITFIELD(reg, 16, 23);
211 d->bus[3] = GET_BITFIELD(reg, 24, 31);
212 edac_dbg(2, "busses: %x, %x, %x, %x\n",
213 d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
214 list_add_tail(&d->list, &skx_edac_list);
215 skx_num_sockets++;
216 prev = pdev;
217 }
218
219 return ndev;
220}
221
222static int get_all_munits(const struct munit *m)
223{
224 struct pci_dev *pdev, *prev;
225 struct skx_dev *d;
226 u32 reg;
227 int i = 0, ndev = 0;
228
229 prev = NULL;
230 for (;;) {
231 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
232 if (!pdev)
233 break;
234 ndev++;
235 if (m->per_socket == NUM_IMC) {
236 for (i = 0; i < NUM_IMC; i++)
237 if (m->devfn[i] == pdev->devfn)
238 break;
239 if (i == NUM_IMC)
240 goto fail;
241 }
88ae80aa 242 d = get_skx_dev(pdev->bus, m->busidx);
4ec656bd
TL
243 if (!d)
244 goto fail;
245
246 /* Be sure that the device is enabled */
247 if (unlikely(pci_enable_device(pdev) < 0)) {
248 skx_printk(KERN_ERR,
249 "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
250 goto fail;
251 }
252
253 switch (m->mtype) {
254 case CHAN0: case CHAN1: case CHAN2:
255 pci_dev_get(pdev);
256 d->imc[i].chan[m->mtype].cdev = pdev;
257 break;
258 case SAD_ALL:
259 pci_dev_get(pdev);
260 d->sad_all = pdev;
261 break;
262 case UTIL_ALL:
263 pci_dev_get(pdev);
264 d->util_all = pdev;
265 break;
266 case SAD:
267 /*
268 * one of these devices per core, including cores
269 * that don't exist on this SKU. Ignore any that
270 * read a route table of zero, make sure all the
271 * non-zero values match.
272 */
273 pci_read_config_dword(pdev, 0xB4, &reg);
274 if (reg != 0) {
275 if (d->mcroute == 0)
276 d->mcroute = reg;
277 else if (d->mcroute != reg) {
278 skx_printk(KERN_ERR,
279 "mcroute mismatch\n");
280 goto fail;
281 }
282 }
283 ndev--;
284 break;
285 }
286
287 prev = pdev;
288 }
289
290 return ndev;
291fail:
292 pci_dev_put(pdev);
293 return -ENODEV;
294}
295
240ea921 296static const struct x86_cpu_id skx_cpuids[] = {
20f4d692 297 { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 },
4ec656bd
TL
298 { }
299};
300MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
301
302static u8 get_src_id(struct skx_dev *d)
303{
304 u32 reg;
305
306 pci_read_config_dword(d->util_all, 0xF0, &reg);
307
308 return GET_BITFIELD(reg, 12, 14);
309}
310
311static u8 skx_get_node_id(struct skx_dev *d)
312{
313 u32 reg;
314
315 pci_read_config_dword(d->util_all, 0xF4, &reg);
316
317 return GET_BITFIELD(reg, 0, 2);
318}
319
320static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
321 int maxval, char *name)
322{
323 u32 val = GET_BITFIELD(reg, lobit, hibit);
324
325 if (val < minval || val > maxval) {
326 edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
327 return -EINVAL;
328 }
329 return val + add;
330}
331
332#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15)
58ca9ac1 333#define IS_NVDIMM_PRESENT(mcddrtcfg, i) GET_BITFIELD((mcddrtcfg), (i), (i))
4ec656bd 334
a9c0a108 335#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks")
4ec656bd
TL
336#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows")
337#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols")
338
339static int get_width(u32 mtr)
340{
341 switch (GET_BITFIELD(mtr, 8, 9)) {
342 case 0:
343 return DEV_X4;
344 case 1:
345 return DEV_X8;
346 case 2:
347 return DEV_X16;
348 }
349 return DEV_UNKNOWN;
350}
351
352static int skx_get_hi_lo(void)
353{
354 struct pci_dev *pdev;
355 u32 reg;
356
357 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL);
358 if (!pdev) {
359 edac_dbg(0, "Can't get tolm/tohm\n");
360 return -ENODEV;
361 }
362
363 pci_read_config_dword(pdev, 0xD0, &reg);
364 skx_tolm = reg;
365 pci_read_config_dword(pdev, 0xD4, &reg);
366 skx_tohm = reg;
367 pci_read_config_dword(pdev, 0xD8, &reg);
368 skx_tohm |= (u64)reg << 32;
369
370 pci_dev_put(pdev);
371 edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
372
373 return 0;
374}
375
376static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
377 struct skx_imc *imc, int chan, int dimmno)
378{
379 int banks = 16, ranks, rows, cols, npages;
380 u64 size;
381
4ec656bd
TL
382 ranks = numrank(mtr);
383 rows = numrow(mtr);
384 cols = numcol(mtr);
385
386 /*
387 * Compute size in 8-byte (2^3) words, then shift to MiB (2^20)
388 */
389 size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
390 npages = MiB_TO_PAGES(size);
391
6f6da136 392 edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
4ec656bd 393 imc->mc, chan, dimmno, size, npages,
a9c0a108 394 banks, 1 << ranks, rows, cols);
4ec656bd
TL
395
396 imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
397 imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
398 imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
399 imc->chan[chan].dimms[dimmno].rowbits = rows;
400 imc->chan[chan].dimms[dimmno].colbits = cols;
401
402 dimm->nr_pages = npages;
403 dimm->grain = 32;
404 dimm->dtype = get_width(mtr);
405 dimm->mtype = MEM_DDR4;
406 dimm->edac_mode = EDAC_SECDED; /* likely better than this */
407 snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
408 imc->src_id, imc->lmc, chan, dimmno);
409
410 return 1;
411}
412
58ca9ac1
TL
413static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
414 int chan, int dimmno)
415{
416 int smbios_handle;
417 u32 dev_handle;
418 u16 flags;
419 u64 size = 0;
420
ad6e1605
QZ
421 nvdimm_count++;
422
58ca9ac1
TL
423 dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc,
424 imc->src_id, 0);
425
426 smbios_handle = nfit_get_smbios_id(dev_handle, &flags);
427 if (smbios_handle == -EOPNOTSUPP) {
428 pr_warn_once(EDAC_MOD_STR ": Can't find size of NVDIMM. Try enabling CONFIG_ACPI_NFIT\n");
429 goto unknown_size;
430 }
431
432 if (smbios_handle < 0) {
433 skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=%x\n", dev_handle);
434 goto unknown_size;
435 }
436
437 if (flags & ACPI_NFIT_MEM_MAP_FAILED) {
438 skx_printk(KERN_ERR, "NVDIMM ADR=%x is not mapped\n", dev_handle);
439 goto unknown_size;
440 }
441
442 size = dmi_memdev_size(smbios_handle);
443 if (size == ~0ull)
444 skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=%x/SMBIOS=%x\n",
445 dev_handle, smbios_handle);
446
447unknown_size:
448 dimm->nr_pages = size >> PAGE_SHIFT;
449 dimm->grain = 32;
450 dimm->dtype = DEV_UNKNOWN;
451 dimm->mtype = MEM_NVDIMM;
452 dimm->edac_mode = EDAC_SECDED; /* likely better than this */
453
6f6da136 454 edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n",
58ca9ac1
TL
455 imc->mc, chan, dimmno, size >> 20, dimm->nr_pages);
456
457 snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
458 imc->src_id, imc->lmc, chan, dimmno);
459
460 return (size == 0 || size == ~0ull) ? 0 : 1;
461}
462
4ec656bd
TL
463#define SKX_GET_MTMTR(dev, reg) \
464 pci_read_config_dword((dev), 0x87c, &reg)
465
466static bool skx_check_ecc(struct pci_dev *pdev)
467{
468 u32 mtmtr;
469
470 SKX_GET_MTMTR(pdev, mtmtr);
471
472 return !!GET_BITFIELD(mtmtr, 2, 2);
473}
474
475static int skx_get_dimm_config(struct mem_ctl_info *mci)
476{
477 struct skx_pvt *pvt = mci->pvt_info;
478 struct skx_imc *imc = pvt->imc;
58ca9ac1 479 u32 mtr, amap, mcddrtcfg;
4ec656bd
TL
480 struct dimm_info *dimm;
481 int i, j;
4ec656bd
TL
482 int ndimms;
483
484 for (i = 0; i < NUM_CHANNELS; i++) {
485 ndimms = 0;
486 pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
58ca9ac1 487 pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
4ec656bd
TL
488 for (j = 0; j < NUM_DIMMS; j++) {
489 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
490 mci->n_layers, i, j, 0);
491 pci_read_config_dword(imc->chan[i].cdev,
492 0x80 + 4*j, &mtr);
58ca9ac1
TL
493 if (IS_DIMM_PRESENT(mtr))
494 ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j);
495 else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
496 ndimms += get_nvdimm_info(dimm, imc, i, j);
4ec656bd
TL
497 }
498 if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
499 skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
500 return -ENODEV;
501 }
502 }
503
504 return 0;
505}
506
507static void skx_unregister_mci(struct skx_imc *imc)
508{
509 struct mem_ctl_info *mci = imc->mci;
510
511 if (!mci)
512 return;
513
514 edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci);
515
516 /* Remove MC sysfs nodes */
517 edac_mc_del_mc(mci->pdev);
518
519 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
520 kfree(mci->ctl_name);
521 edac_mc_free(mci);
522}
523
524static int skx_register_mci(struct skx_imc *imc)
525{
526 struct mem_ctl_info *mci;
527 struct edac_mc_layer layers[2];
528 struct pci_dev *pdev = imc->chan[0].cdev;
529 struct skx_pvt *pvt;
530 int rc;
531
532 /* allocate a new MC control structure */
533 layers[0].type = EDAC_MC_LAYER_CHANNEL;
534 layers[0].size = NUM_CHANNELS;
535 layers[0].is_virt_csrow = false;
536 layers[1].type = EDAC_MC_LAYER_SLOT;
537 layers[1].size = NUM_DIMMS;
538 layers[1].is_virt_csrow = true;
539 mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
540 sizeof(struct skx_pvt));
541
542 if (unlikely(!mci))
543 return -ENOMEM;
544
545 edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci);
546
547 /* Associate skx_dev and mci for future usage */
548 imc->mci = mci;
549 pvt = mci->pvt_info;
550 pvt->imc = imc;
551
58ca9ac1
TL
552 mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d",
553 imc->node_id, imc->lmc);
75f029c3
AY
554 if (!mci->ctl_name) {
555 rc = -ENOMEM;
556 goto fail0;
557 }
558
58ca9ac1 559 mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_NVDIMM;
4ec656bd
TL
560 mci->edac_ctl_cap = EDAC_FLAG_NONE;
561 mci->edac_cap = EDAC_FLAG_NONE;
301375e7 562 mci->mod_name = EDAC_MOD_STR;
4ec656bd 563 mci->dev_name = pci_name(imc->chan[0].cdev);
4ec656bd
TL
564 mci->ctl_page_to_phys = NULL;
565
566 rc = skx_get_dimm_config(mci);
567 if (rc < 0)
568 goto fail;
569
570 /* record ptr to the generic device */
571 mci->pdev = &pdev->dev;
572
573 /* add this new MC control structure to EDAC's list of MCs */
574 if (unlikely(edac_mc_add_mc(mci))) {
575 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
576 rc = -EINVAL;
577 goto fail;
578 }
579
580 return 0;
581
582fail:
583 kfree(mci->ctl_name);
75f029c3 584fail0:
4ec656bd
TL
585 edac_mc_free(mci);
586 imc->mci = NULL;
587 return rc;
588}
589
590#define SKX_MAX_SAD 24
591
592#define SKX_GET_SAD(d, i, reg) \
593 pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &reg)
594#define SKX_GET_ILV(d, i, reg) \
595 pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &reg)
596
597#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
598#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
599#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
600#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
601#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
602#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
603#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
604
605#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
606#define SKX_ILV_TARGET(tgt) ((tgt) & 7)
607
608static bool skx_sad_decode(struct decoded_addr *res)
609{
610 struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list);
611 u64 addr = res->addr;
612 int i, idx, tgt, lchan, shift;
613 u32 sad, ilv;
614 u64 limit, prev_limit;
615 int remote = 0;
616
617 /* Simple sanity check for I/O space or out of range */
618 if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
619 edac_dbg(0, "Address %llx out of range\n", addr);
620 return false;
621 }
622
623restart:
624 prev_limit = 0;
625 for (i = 0; i < SKX_MAX_SAD; i++) {
626 SKX_GET_SAD(d, i, sad);
627 limit = SKX_SAD_LIMIT(sad);
628 if (SKX_SAD_ENABLE(sad)) {
629 if (addr >= prev_limit && addr <= limit)
630 goto sad_found;
631 }
632 prev_limit = limit + 1;
633 }
634 edac_dbg(0, "No SAD entry for %llx\n", addr);
635 return false;
636
637sad_found:
638 SKX_GET_ILV(d, i, ilv);
639
640 switch (SKX_SAD_INTERLEAVE(sad)) {
641 case 0:
642 idx = GET_BITFIELD(addr, 6, 8);
643 break;
644 case 1:
645 idx = GET_BITFIELD(addr, 8, 10);
646 break;
647 case 2:
648 idx = GET_BITFIELD(addr, 12, 14);
649 break;
650 case 3:
651 idx = GET_BITFIELD(addr, 30, 32);
652 break;
653 }
654
655 tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
656
657 /* If point to another node, find it and start over */
658 if (SKX_ILV_REMOTE(tgt)) {
659 if (remote) {
660 edac_dbg(0, "Double remote!\n");
661 return false;
662 }
663 remote = 1;
664 list_for_each_entry(d, &skx_edac_list, list) {
665 if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
666 goto restart;
667 }
668 edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
669 return false;
670 }
671
672 if (SKX_SAD_MOD3(sad) == 0)
673 lchan = SKX_ILV_TARGET(tgt);
674 else {
675 switch (SKX_SAD_MOD3MODE(sad)) {
676 case 0:
677 shift = 6;
678 break;
679 case 1:
680 shift = 8;
681 break;
682 case 2:
683 shift = 12;
684 break;
685 default:
686 edac_dbg(0, "illegal mod3mode\n");
687 return false;
688 }
689 switch (SKX_SAD_MOD3ASMOD2(sad)) {
690 case 0:
691 lchan = (addr >> shift) % 3;
692 break;
693 case 1:
694 lchan = (addr >> shift) % 2;
695 break;
696 case 2:
697 lchan = (addr >> shift) % 2;
8f189738 698 lchan = (lchan << 1) | !lchan;
4ec656bd
TL
699 break;
700 case 3:
701 lchan = ((addr >> shift) % 2) << 1;
702 break;
703 }
704 lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
705 }
706
707 res->dev = d;
708 res->socket = d->imc[0].src_id;
709 res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
710 res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
711
712 edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
713 res->addr, res->socket, res->imc, res->channel);
714 return true;
715}
716
717#define SKX_MAX_TAD 8
718
719#define SKX_GET_TADBASE(d, mc, i, reg) \
720 pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &reg)
721#define SKX_GET_TADWAYNESS(d, mc, i, reg) \
722 pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &reg)
723#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
724 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &reg)
725
726#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
727#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
728#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
729#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
730#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
731#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
732#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
733
734/* which bit used for both socket and channel interleave */
735static int skx_granularity[] = { 6, 8, 12, 30 };
736
737static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
738{
739 addr >>= shift;
740 addr /= ways;
741 addr <<= shift;
742
743 return addr | (lowbits & ((1ull << shift) - 1));
744}
745
746static bool skx_tad_decode(struct decoded_addr *res)
747{
748 int i;
749 u32 base, wayness, chnilvoffset;
750 int skt_interleave_bit, chn_interleave_bit;
751 u64 channel_addr;
752
753 for (i = 0; i < SKX_MAX_TAD; i++) {
754 SKX_GET_TADBASE(res->dev, res->imc, i, base);
755 SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
756 if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
757 goto tad_found;
758 }
759 edac_dbg(0, "No TAD entry for %llx\n", res->addr);
760 return false;
761
762tad_found:
763 res->sktways = SKX_TAD_SKTWAYS(wayness);
764 res->chanways = SKX_TAD_CHNWAYS(wayness);
765 skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
766 chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
767
768 SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
769 channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
770
771 if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
772 /* Must handle channel first, then socket */
773 channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
774 res->chanways, channel_addr);
775 channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
776 res->sktways, channel_addr);
777 } else {
778 /* Handle socket then channel. Preserve low bits from original address */
779 channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
780 res->sktways, res->addr);
781 channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
782 res->chanways, res->addr);
783 }
784
785 res->chan_addr = channel_addr;
786
787 edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
788 res->addr, res->chan_addr, res->sktways, res->chanways);
789 return true;
790}
791
792#define SKX_MAX_RIR 4
793
794#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
795 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
796 0x108 + 4 * (i), &reg)
797#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
798 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
799 0x120 + 16 * idx + 4 * (i), &reg)
800
801#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
802#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
803#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
804#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
805#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
806
807static bool skx_rir_decode(struct decoded_addr *res)
808{
809 int i, idx, chan_rank;
810 int shift;
811 u32 rirway, rirlv;
812 u64 rank_addr, prev_limit = 0, limit;
813
814 if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
815 shift = 6;
816 else
817 shift = 13;
818
819 for (i = 0; i < SKX_MAX_RIR; i++) {
820 SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
821 limit = SKX_RIR_LIMIT(rirway);
822 if (SKX_RIR_VALID(rirway)) {
823 if (prev_limit <= res->chan_addr &&
824 res->chan_addr <= limit)
825 goto rir_found;
826 }
827 prev_limit = limit;
828 }
829 edac_dbg(0, "No RIR entry for %llx\n", res->addr);
830 return false;
831
832rir_found:
833 rank_addr = res->chan_addr >> shift;
834 rank_addr /= SKX_RIR_WAYS(rirway);
835 rank_addr <<= shift;
836 rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
837
838 res->rank_address = rank_addr;
839 idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
840
841 SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
842 res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
843 chan_rank = SKX_RIR_CHAN_RANK(rirlv);
844 res->channel_rank = chan_rank;
845 res->dimm = chan_rank / 4;
846 res->rank = chan_rank % 4;
847
848 edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
849 res->addr, res->dimm, res->rank,
850 res->channel_rank, res->rank_address);
851 return true;
852}
853
854static u8 skx_close_row[] = {
855 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
856};
857static u8 skx_close_column[] = {
858 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
859};
860static u8 skx_open_row[] = {
861 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
862};
863static u8 skx_open_column[] = {
864 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
865};
866static u8 skx_open_fine_column[] = {
867 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
868};
869
870static int skx_bits(u64 addr, int nbits, u8 *bits)
871{
872 int i, res = 0;
873
874 for (i = 0; i < nbits; i++)
875 res |= ((addr >> bits[i]) & 1) << i;
876 return res;
877}
878
879static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
880{
881 int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
882
883 if (do_xor)
884 ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
885
886 return ret;
887}
888
889static bool skx_mad_decode(struct decoded_addr *r)
890{
891 struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
892 int bg0 = dimm->fine_grain_bank ? 6 : 13;
893
894 if (dimm->close_pg) {
895 r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
896 r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
897 r->column |= 0x400; /* C10 is autoprecharge, always set */
898 r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
899 r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
900 } else {
901 r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
902 if (dimm->fine_grain_bank)
903 r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
904 else
905 r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
906 r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
907 r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
908 }
909 r->row &= (1u << dimm->rowbits) - 1;
910
911 edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
912 r->addr, r->row, r->column, r->bank_address,
913 r->bank_group);
914 return true;
915}
916
917static bool skx_decode(struct decoded_addr *res)
918{
919
920 return skx_sad_decode(res) && skx_tad_decode(res) &&
921 skx_rir_decode(res) && skx_mad_decode(res);
922}
923
924#ifdef CONFIG_EDAC_DEBUG
925/*
926 * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
927 * Write an address to this file to exercise the address decode
928 * logic in this driver.
929 */
930static struct dentry *skx_test;
931static u64 skx_fake_addr;
932
933static int debugfs_u64_set(void *data, u64 val)
934{
935 struct decoded_addr res;
936
937 res.addr = val;
938 skx_decode(&res);
939
940 return 0;
941}
942
943DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
944
945static struct dentry *mydebugfs_create(const char *name, umode_t mode,
946 struct dentry *parent, u64 *value)
947{
948 return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
949}
950
951static void setup_skx_debug(void)
952{
953 skx_test = debugfs_create_dir("skx_edac_test", NULL);
954 mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
955}
956
957static void teardown_skx_debug(void)
958{
959 debugfs_remove_recursive(skx_test);
960}
961#else
962static void setup_skx_debug(void)
963{
964}
965
966static void teardown_skx_debug(void)
967{
968}
969#endif /*CONFIG_EDAC_DEBUG*/
970
ad6e1605
QZ
971static bool skx_adxl_decode(struct decoded_addr *res)
972
973{
974 int i, len = 0;
975
976 if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
977 res->addr < BIT_ULL(32))) {
978 edac_dbg(0, "Address 0x%llx out of range\n", res->addr);
979 return false;
980 }
981
982 if (adxl_decode(res->addr, adxl_values)) {
983 edac_dbg(0, "Failed to decode 0x%llx\n", res->addr);
984 return false;
985 }
986
987 res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]];
988 res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
989 res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
990 res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]];
991
992 for (i = 0; i < adxl_component_count; i++) {
993 if (adxl_values[i] == ~0x0ull)
994 continue;
995
996 len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx",
997 adxl_component_names[i], adxl_values[i]);
998 if (MSG_SIZE - len <= 0)
999 break;
1000 }
1001
1002 return true;
1003}
1004
4ec656bd
TL
1005static void skx_mce_output_error(struct mem_ctl_info *mci,
1006 const struct mce *m,
1007 struct decoded_addr *res)
1008{
1009 enum hw_event_mc_err_type tp_event;
ad6e1605 1010 char *type, *optype;
4ec656bd
TL
1011 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1012 bool overflow = GET_BITFIELD(m->status, 62, 62);
1013 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1014 bool recoverable;
1015 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1016 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1017 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1018 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1019
1020 recoverable = GET_BITFIELD(m->status, 56, 56);
1021
1022 if (uncorrected_error) {
432de7fd 1023 core_err_cnt = 1;
4ec656bd
TL
1024 if (ripv) {
1025 type = "FATAL";
1026 tp_event = HW_EVENT_ERR_FATAL;
1027 } else {
1028 type = "NON_FATAL";
1029 tp_event = HW_EVENT_ERR_UNCORRECTED;
1030 }
1031 } else {
1032 type = "CORRECTED";
1033 tp_event = HW_EVENT_ERR_CORRECTED;
1034 }
1035
1036 /*
1037 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1038 * memory errors should fit in this mask:
1039 * 000f 0000 1mmm cccc (binary)
1040 * where:
1041 * f = Correction Report Filtering Bit. If 1, subsequent errors
1042 * won't be shown
1043 * mmm = error type
1044 * cccc = channel
1045 * If the mask doesn't match, report an error to the parsing logic
1046 */
1047 if (!((errcode & 0xef80) == 0x80)) {
1048 optype = "Can't parse: it is not a mem";
1049 } else {
1050 switch (optypenum) {
1051 case 0:
1052 optype = "generic undef request error";
1053 break;
1054 case 1:
1055 optype = "memory read error";
1056 break;
1057 case 2:
1058 optype = "memory write error";
1059 break;
1060 case 3:
1061 optype = "addr/cmd error";
1062 break;
1063 case 4:
1064 optype = "memory scrubbing error";
1065 break;
1066 default:
1067 optype = "reserved";
1068 break;
1069 }
1070 }
ad6e1605
QZ
1071 if (adxl_component_count) {
1072 snprintf(skx_msg, MSG_SIZE, "%s%s err_code:%04x:%04x %s",
1073 overflow ? " OVERFLOW" : "",
1074 (uncorrected_error && recoverable) ? " recoverable" : "",
1075 mscod, errcode, adxl_msg);
1076 } else {
1077 snprintf(skx_msg, MSG_SIZE,
1078 "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
1079 overflow ? " OVERFLOW" : "",
1080 (uncorrected_error && recoverable) ? " recoverable" : "",
1081 mscod, errcode,
1082 res->socket, res->imc, res->rank,
1083 res->bank_group, res->bank_address, res->row, res->column);
1084 }
4ec656bd 1085
ad6e1605 1086 edac_dbg(0, "%s\n", skx_msg);
4ec656bd
TL
1087
1088 /* Call the helper to output message */
1089 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1090 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1091 res->channel, res->dimm, -1,
ad6e1605
QZ
1092 optype, skx_msg);
1093}
1094
1095static struct mem_ctl_info *get_mci(int src_id, int lmc)
1096{
1097 struct skx_dev *d;
1098
1099 if (lmc > NUM_IMC - 1) {
1100 skx_printk(KERN_ERR, "Bad lmc %d\n", lmc);
1101 return NULL;
1102 }
1103
1104 list_for_each_entry(d, &skx_edac_list, list) {
1105 if (d->imc[0].src_id == src_id)
1106 return d->imc[lmc].mci;
1107 }
1108
1109 skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc);
1110
1111 return NULL;
4ec656bd
TL
1112}
1113
1114static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
1115 void *data)
1116{
1117 struct mce *mce = (struct mce *)data;
1118 struct decoded_addr res;
1119 struct mem_ctl_info *mci;
1120 char *type;
1121
bffc7dec 1122 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
4ec656bd
TL
1123 return NOTIFY_DONE;
1124
1125 /* ignore unless this is memory related with an address */
1126 if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
1127 return NOTIFY_DONE;
1128
ad6e1605 1129 memset(&res, 0, sizeof(res));
4ec656bd 1130 res.addr = mce->addr;
ad6e1605
QZ
1131
1132 if (adxl_component_count) {
1133 if (!skx_adxl_decode(&res))
1134 return NOTIFY_DONE;
1135
1136 mci = get_mci(res.socket, res.imc);
1137 } else {
1138 if (!skx_decode(&res))
1139 return NOTIFY_DONE;
1140
1141 mci = res.dev->imc[res.imc].mci;
1142 }
1143
1144 if (!mci)
4ec656bd 1145 return NOTIFY_DONE;
4ec656bd
TL
1146
1147 if (mce->mcgstatus & MCG_STATUS_MCIP)
1148 type = "Exception";
1149 else
1150 type = "Event";
1151
1152 skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
1153
1154 skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
1155 "Bank %d: %016Lx\n", mce->extcpu, type,
1156 mce->mcgstatus, mce->bank, mce->status);
1157 skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
1158 skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
1159 skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
1160
1161 skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
1162 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
1163 mce->time, mce->socketid, mce->apicid);
1164
1165 skx_mce_output_error(mci, mce, &res);
1166
1167 return NOTIFY_DONE;
1168}
1169
1170static struct notifier_block skx_mce_dec = {
9026cc82
BP
1171 .notifier_call = skx_mce_check_error,
1172 .priority = MCE_PRIO_EDAC,
4ec656bd
TL
1173};
1174
1175static void skx_remove(void)
1176{
1177 int i, j;
1178 struct skx_dev *d, *tmp;
1179
1180 edac_dbg(0, "\n");
1181
1182 list_for_each_entry_safe(d, tmp, &skx_edac_list, list) {
1183 list_del(&d->list);
1184 for (i = 0; i < NUM_IMC; i++) {
1185 skx_unregister_mci(&d->imc[i]);
1186 for (j = 0; j < NUM_CHANNELS; j++)
1187 pci_dev_put(d->imc[i].chan[j].cdev);
1188 }
1189 pci_dev_put(d->util_all);
1190 pci_dev_put(d->sad_all);
1191
1192 kfree(d);
1193 }
1194}
1195
ad6e1605
QZ
1196static void __init skx_adxl_get(void)
1197{
1198 const char * const *names;
1199 int i, j;
1200
1201 names = adxl_get_component_names();
1202 if (!names) {
1203 skx_printk(KERN_NOTICE, "No firmware support for address translation.");
1204 skx_printk(KERN_CONT, " Only decoding DDR4 address!\n");
1205 return;
1206 }
1207
1208 for (i = 0; i < INDEX_MAX; i++) {
1209 for (j = 0; names[j]; j++) {
1210 if (!strcmp(component_names[i], names[j])) {
1211 component_indices[i] = j;
1212 break;
1213 }
1214 }
1215
1216 if (!names[j])
1217 goto err;
1218 }
1219
1220 adxl_component_names = names;
1221 while (*names++)
1222 adxl_component_count++;
1223
1224 adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values),
1225 GFP_KERNEL);
1226 if (!adxl_values) {
1227 adxl_component_count = 0;
1228 return;
1229 }
1230
1231 adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
1232 if (!adxl_msg) {
1233 adxl_component_count = 0;
1234 kfree(adxl_values);
1235 }
1236
1237 return;
1238err:
1239 skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ",
1240 component_names[i]);
1241 for (j = 0; names[j]; j++)
1242 skx_printk(KERN_CONT, "%s ", names[j]);
1243 skx_printk(KERN_CONT, "\n");
1244}
1245
1246static void __exit skx_adxl_put(void)
1247{
1248 kfree(adxl_values);
1249 kfree(adxl_msg);
1250}
1251
4ec656bd
TL
1252/*
1253 * skx_init:
1254 * make sure we are running on the correct cpu model
1255 * search for all the devices we need
1256 * check which DIMMs are present.
1257 */
240ea921 1258static int __init skx_init(void)
4ec656bd
TL
1259{
1260 const struct x86_cpu_id *id;
1261 const struct munit *m;
301375e7 1262 const char *owner;
4ec656bd
TL
1263 int rc = 0, i;
1264 u8 mc = 0, src_id, node_id;
1265 struct skx_dev *d;
1266
1267 edac_dbg(2, "\n");
1268
301375e7
TK
1269 owner = edac_get_owner();
1270 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1271 return -EBUSY;
1272
4ec656bd
TL
1273 id = x86_match_cpu(skx_cpuids);
1274 if (!id)
1275 return -ENODEV;
1276
1277 rc = skx_get_hi_lo();
1278 if (rc)
1279 return rc;
1280
1281 rc = get_all_bus_mappings();
1282 if (rc < 0)
1283 goto fail;
1284 if (rc == 0) {
1285 edac_dbg(2, "No memory controllers found\n");
1286 return -ENODEV;
1287 }
1288
1289 for (m = skx_all_munits; m->did; m++) {
1290 rc = get_all_munits(m);
1291 if (rc < 0)
1292 goto fail;
1293 if (rc != m->per_socket * skx_num_sockets) {
1294 edac_dbg(2, "Expected %d, got %d of %x\n",
1295 m->per_socket * skx_num_sockets, rc, m->did);
1296 rc = -ENODEV;
1297 goto fail;
1298 }
1299 }
1300
1301 list_for_each_entry(d, &skx_edac_list, list) {
1302 src_id = get_src_id(d);
1303 node_id = skx_get_node_id(d);
1304 edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
1305 for (i = 0; i < NUM_IMC; i++) {
1306 d->imc[i].mc = mc++;
1307 d->imc[i].lmc = i;
1308 d->imc[i].src_id = src_id;
1309 d->imc[i].node_id = node_id;
1310 rc = skx_register_mci(&d->imc[i]);
1311 if (rc < 0)
1312 goto fail;
1313 }
1314 }
1315
ad6e1605
QZ
1316 skx_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
1317 if (!skx_msg) {
1318 rc = -ENOMEM;
1319 goto fail;
1320 }
1321
1322 if (nvdimm_count)
1323 skx_adxl_get();
1324
4ec656bd
TL
1325 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1326 opstate_init();
1327
1328 setup_skx_debug();
1329
1330 mce_register_decode_chain(&skx_mce_dec);
1331
1332 return 0;
1333fail:
1334 skx_remove();
1335 return rc;
1336}
1337
1338static void __exit skx_exit(void)
1339{
1340 edac_dbg(2, "\n");
1341 mce_unregister_decode_chain(&skx_mce_dec);
1342 skx_remove();
ad6e1605
QZ
1343 if (nvdimm_count)
1344 skx_adxl_put();
1345 kfree(skx_msg);
4ec656bd
TL
1346 teardown_skx_debug();
1347}
1348
1349module_init(skx_init);
1350module_exit(skx_exit);
1351
1352module_param(edac_op_state, int, 0444);
1353MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1354
1355MODULE_LICENSE("GPL v2");
1356MODULE_AUTHOR("Tony Luck");
1357MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
This page took 0.361899 seconds and 4 git commands to generate.