1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Intel(R) 10nm server memory controller.
4 * Copyright (c) 2019, Intel Corporation.
8 #include <linux/kernel.h>
10 #include <asm/cpu_device_id.h>
11 #include <asm/intel-family.h>
13 #include "edac_module.h"
14 #include "skx_common.h"
16 #define I10NM_REVISION "v0.0.6"
17 #define EDAC_MOD_STR "i10nm_edac"
20 #define i10nm_printk(level, fmt, arg...) \
21 edac_printk(level, "i10nm", fmt, ##arg)
23 #define I10NM_GET_SCK_BAR(d, reg) \
24 pci_read_config_dword((d)->uracu, 0xd0, &(reg))
25 #define I10NM_GET_IMC_BAR(d, i, reg) \
26 pci_read_config_dword((d)->uracu, \
27 (res_cfg->type == GNR ? 0xd4 : 0xd8) + (i) * 4, &(reg))
28 #define I10NM_GET_SAD(d, offset, i, reg)\
29 pci_read_config_dword((d)->sad_all, (offset) + (i) * \
30 (res_cfg->type == GNR ? 12 : 8), &(reg))
31 #define I10NM_GET_HBM_IMC_BAR(d, reg) \
32 pci_read_config_dword((d)->uracu, 0xd4, &(reg))
33 #define I10NM_GET_CAPID3_CFG(d, reg) \
34 pci_read_config_dword((d)->pcu_cr3, \
35 res_cfg->type == GNR ? 0x290 : 0x90, &(reg))
36 #define I10NM_GET_CAPID5_CFG(d, reg) \
37 pci_read_config_dword((d)->pcu_cr3, \
38 res_cfg->type == GNR ? 0x298 : 0x98, &(reg))
39 #define I10NM_GET_DIMMMTR(m, i, j) \
40 readl((m)->mbase + ((m)->hbm_mc ? 0x80c : \
41 (res_cfg->type == GNR ? 0xc0c : 0x2080c)) + \
42 (i) * (m)->chan_mmio_sz + (j) * 4)
43 #define I10NM_GET_MCDDRTCFG(m, i) \
44 readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
45 (i) * (m)->chan_mmio_sz)
46 #define I10NM_GET_MCMTR(m, i) \
47 readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : \
48 (res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) + \
49 (i) * (m)->chan_mmio_sz)
50 #define I10NM_GET_REG32(m, i, offset) \
51 readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
52 #define I10NM_GET_REG64(m, i, offset) \
53 readq((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
54 #define I10NM_SET_REG32(m, i, offset, v) \
55 writel(v, (m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
57 #define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23)
58 #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
59 #define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \
60 GET_BITFIELD(reg, 0, 10) + 1) << 12)
61 #define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg) \
62 ((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
64 #define I10NM_GNR_IMC_MMIO_OFFSET 0x24c000
65 #define I10NM_GNR_IMC_MMIO_SIZE 0x4000
66 #define I10NM_HBM_IMC_MMIO_SIZE 0x9000
67 #define I10NM_DDR_IMC_CH_CNT(reg) GET_BITFIELD(reg, 21, 24)
68 #define I10NM_IS_HBM_PRESENT(reg) GET_BITFIELD(reg, 27, 30)
69 #define I10NM_IS_HBM_IMC(reg) GET_BITFIELD(reg, 29, 29)
71 #define I10NM_MAX_SAD 16
72 #define I10NM_SAD_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
73 #define I10NM_SAD_NM_CACHEABLE(reg) GET_BITFIELD(reg, 5, 5)
75 #define RETRY_RD_ERR_LOG_UC BIT(1)
76 #define RETRY_RD_ERR_LOG_NOOVER BIT(14)
77 #define RETRY_RD_ERR_LOG_EN BIT(15)
78 #define RETRY_RD_ERR_LOG_NOOVER_UC (BIT(14) | BIT(1))
79 #define RETRY_RD_ERR_LOG_OVER_UC_V (BIT(2) | BIT(1) | BIT(0))
81 static struct list_head *i10nm_edac_list;
83 static struct res_config *res_cfg;
84 static int retry_rd_err_log;
85 static int decoding_via_mca;
86 static bool mem_cfg_2lm;
88 static u32 offsets_scrub_icx[] = {0x22c60, 0x22c54, 0x22c5c, 0x22c58, 0x22c28, 0x20ed8};
89 static u32 offsets_scrub_spr[] = {0x22c60, 0x22c54, 0x22f08, 0x22c58, 0x22c28, 0x20ed8};
90 static u32 offsets_scrub_spr_hbm0[] = {0x2860, 0x2854, 0x2b08, 0x2858, 0x2828, 0x0ed8};
91 static u32 offsets_scrub_spr_hbm1[] = {0x2c60, 0x2c54, 0x2f08, 0x2c58, 0x2c28, 0x0fa8};
92 static u32 offsets_demand_icx[] = {0x22e54, 0x22e60, 0x22e64, 0x22e58, 0x22e5c, 0x20ee0};
93 static u32 offsets_demand_spr[] = {0x22e54, 0x22e60, 0x22f10, 0x22e58, 0x22e5c, 0x20ee0};
94 static u32 offsets_demand2_spr[] = {0x22c70, 0x22d80, 0x22f18, 0x22d58, 0x22c64, 0x20f10};
95 static u32 offsets_demand_spr_hbm0[] = {0x2a54, 0x2a60, 0x2b10, 0x2a58, 0x2a5c, 0x0ee0};
96 static u32 offsets_demand_spr_hbm1[] = {0x2e54, 0x2e60, 0x2f10, 0x2e58, 0x2e5c, 0x0fb0};
98 static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable,
99 u32 *offsets_scrub, u32 *offsets_demand,
100 u32 *offsets_demand2)
104 s = I10NM_GET_REG32(imc, chan, offsets_scrub[0]);
105 d = I10NM_GET_REG32(imc, chan, offsets_demand[0]);
107 d2 = I10NM_GET_REG32(imc, chan, offsets_demand2[0]);
110 /* Save default configurations */
111 imc->chan[chan].retry_rd_err_log_s = s;
112 imc->chan[chan].retry_rd_err_log_d = d;
114 imc->chan[chan].retry_rd_err_log_d2 = d2;
116 s &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
117 s |= RETRY_RD_ERR_LOG_EN;
118 d &= ~RETRY_RD_ERR_LOG_NOOVER_UC;
119 d |= RETRY_RD_ERR_LOG_EN;
121 if (offsets_demand2) {
122 d2 &= ~RETRY_RD_ERR_LOG_UC;
123 d2 |= RETRY_RD_ERR_LOG_NOOVER;
124 d2 |= RETRY_RD_ERR_LOG_EN;
127 /* Restore default configurations */
128 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_UC)
129 s |= RETRY_RD_ERR_LOG_UC;
130 if (imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_NOOVER)
131 s |= RETRY_RD_ERR_LOG_NOOVER;
132 if (!(imc->chan[chan].retry_rd_err_log_s & RETRY_RD_ERR_LOG_EN))
133 s &= ~RETRY_RD_ERR_LOG_EN;
134 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_UC)
135 d |= RETRY_RD_ERR_LOG_UC;
136 if (imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_NOOVER)
137 d |= RETRY_RD_ERR_LOG_NOOVER;
138 if (!(imc->chan[chan].retry_rd_err_log_d & RETRY_RD_ERR_LOG_EN))
139 d &= ~RETRY_RD_ERR_LOG_EN;
141 if (offsets_demand2) {
142 if (imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_UC)
143 d2 |= RETRY_RD_ERR_LOG_UC;
144 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_NOOVER))
145 d2 &= ~RETRY_RD_ERR_LOG_NOOVER;
146 if (!(imc->chan[chan].retry_rd_err_log_d2 & RETRY_RD_ERR_LOG_EN))
147 d2 &= ~RETRY_RD_ERR_LOG_EN;
151 I10NM_SET_REG32(imc, chan, offsets_scrub[0], s);
152 I10NM_SET_REG32(imc, chan, offsets_demand[0], d);
154 I10NM_SET_REG32(imc, chan, offsets_demand2[0], d2);
157 static void enable_retry_rd_err_log(bool enable)
159 int i, j, imc_num, chan_num;
165 list_for_each_entry(d, i10nm_edac_list, list) {
166 imc_num = res_cfg->ddr_imc_num;
167 chan_num = res_cfg->ddr_chan_num;
169 for (i = 0; i < imc_num; i++) {
174 for (j = 0; j < chan_num; j++)
175 __enable_retry_rd_err_log(imc, j, enable,
176 res_cfg->offsets_scrub,
177 res_cfg->offsets_demand,
178 res_cfg->offsets_demand2);
181 imc_num += res_cfg->hbm_imc_num;
182 chan_num = res_cfg->hbm_chan_num;
184 for (; i < imc_num; i++) {
186 if (!imc->mbase || !imc->hbm_mc)
189 for (j = 0; j < chan_num; j++) {
190 __enable_retry_rd_err_log(imc, j, enable,
191 res_cfg->offsets_scrub_hbm0,
192 res_cfg->offsets_demand_hbm0,
194 __enable_retry_rd_err_log(imc, j, enable,
195 res_cfg->offsets_scrub_hbm1,
196 res_cfg->offsets_demand_hbm1,
203 static void show_retry_rd_err_log(struct decoded_addr *res, char *msg,
204 int len, bool scrub_err)
206 struct skx_imc *imc = &res->dev->imc[res->imc];
207 u32 log0, log1, log2, log3, log4;
208 u32 corr0, corr1, corr2, corr3;
209 u32 lxg0, lxg1, lxg3, lxg4;
223 offsets = scrub_err ? res_cfg->offsets_scrub_hbm1 :
224 res_cfg->offsets_demand_hbm1;
226 offsets = scrub_err ? res_cfg->offsets_scrub_hbm0 :
227 res_cfg->offsets_demand_hbm0;
230 offsets = res_cfg->offsets_scrub;
232 offsets = res_cfg->offsets_demand;
233 xffsets = res_cfg->offsets_demand2;
237 log0 = I10NM_GET_REG32(imc, res->channel, offsets[0]);
238 log1 = I10NM_GET_REG32(imc, res->channel, offsets[1]);
239 log3 = I10NM_GET_REG32(imc, res->channel, offsets[3]);
240 log4 = I10NM_GET_REG32(imc, res->channel, offsets[4]);
241 log5 = I10NM_GET_REG64(imc, res->channel, offsets[5]);
244 lxg0 = I10NM_GET_REG32(imc, res->channel, xffsets[0]);
245 lxg1 = I10NM_GET_REG32(imc, res->channel, xffsets[1]);
246 lxg3 = I10NM_GET_REG32(imc, res->channel, xffsets[3]);
247 lxg4 = I10NM_GET_REG32(imc, res->channel, xffsets[4]);
248 lxg5 = I10NM_GET_REG64(imc, res->channel, xffsets[5]);
251 if (res_cfg->type == SPR) {
252 log2a = I10NM_GET_REG64(imc, res->channel, offsets[2]);
253 n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.16llx %.8x %.8x %.16llx",
254 log0, log1, log2a, log3, log4, log5);
258 lxg2a = I10NM_GET_REG64(imc, res->channel, xffsets[2]);
259 n += snprintf(msg + n, len - n, " %.8x %.8x %.16llx %.8x %.8x %.16llx]",
260 lxg0, lxg1, lxg2a, lxg3, lxg4, lxg5);
262 n += snprintf(msg + n, len - n, "]");
266 log2 = I10NM_GET_REG32(imc, res->channel, offsets[2]);
267 n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x %.16llx]",
268 log0, log1, log2, log3, log4, log5);
273 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2c18);
274 corr1 = I10NM_GET_REG32(imc, res->channel, 0x2c1c);
275 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2c20);
276 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2c24);
278 corr0 = I10NM_GET_REG32(imc, res->channel, 0x2818);
279 corr1 = I10NM_GET_REG32(imc, res->channel, 0x281c);
280 corr2 = I10NM_GET_REG32(imc, res->channel, 0x2820);
281 corr3 = I10NM_GET_REG32(imc, res->channel, 0x2824);
284 corr0 = I10NM_GET_REG32(imc, res->channel, 0x22c18);
285 corr1 = I10NM_GET_REG32(imc, res->channel, 0x22c1c);
286 corr2 = I10NM_GET_REG32(imc, res->channel, 0x22c20);
287 corr3 = I10NM_GET_REG32(imc, res->channel, 0x22c24);
291 snprintf(msg + n, len - n,
292 " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
293 corr0 & 0xffff, corr0 >> 16,
294 corr1 & 0xffff, corr1 >> 16,
295 corr2 & 0xffff, corr2 >> 16,
296 corr3 & 0xffff, corr3 >> 16);
298 /* Clear status bits */
299 if (retry_rd_err_log == 2) {
300 if (log0 & RETRY_RD_ERR_LOG_OVER_UC_V) {
301 log0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
302 I10NM_SET_REG32(imc, res->channel, offsets[0], log0);
305 if (xffsets && (lxg0 & RETRY_RD_ERR_LOG_OVER_UC_V)) {
306 lxg0 &= ~RETRY_RD_ERR_LOG_OVER_UC_V;
307 I10NM_SET_REG32(imc, res->channel, xffsets[0], lxg0);
312 static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
313 unsigned int dev, unsigned int fun)
315 struct pci_dev *pdev;
317 pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
319 edac_dbg(2, "No device %02x:%02x.%x\n",
324 if (unlikely(pci_enable_device(pdev) < 0)) {
325 edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
335 * i10nm_get_imc_num() - Get the number of present DDR memory controllers.
337 * @cfg : The pointer to the structure of EDAC resource configurations.
339 * For Granite Rapids CPUs, the number of present DDR memory controllers read
340 * at runtime overwrites the value statically configured in @cfg->ddr_imc_num.
341 * For other CPUs, the number of present DDR memory controllers is statically
342 * configured in @cfg->ddr_imc_num.
344 * RETURNS : 0 on success, < 0 on failure.
346 static int i10nm_get_imc_num(struct res_config *cfg)
348 int n, imc_num, chan_num = 0;
352 list_for_each_entry(d, i10nm_edac_list, list) {
353 d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->pcu_cr3_bdf.bus],
354 res_cfg->pcu_cr3_bdf.dev,
355 res_cfg->pcu_cr3_bdf.fun);
359 if (I10NM_GET_CAPID5_CFG(d, reg))
362 n = I10NM_DDR_IMC_CH_CNT(reg);
366 edac_dbg(2, "Get DDR CH number: %d\n", chan_num);
367 } else if (chan_num != n) {
368 i10nm_printk(KERN_NOTICE, "Get DDR CH numbers: %d, %d\n", chan_num, n);
375 * One channel per DDR memory controller for Granite Rapids CPUs.
380 i10nm_printk(KERN_ERR, "Invalid DDR MC number\n");
384 if (imc_num > I10NM_NUM_DDR_IMC) {
385 i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
389 if (cfg->ddr_imc_num != imc_num) {
391 * Store the number of present DDR memory controllers.
393 cfg->ddr_imc_num = imc_num;
394 edac_dbg(2, "Set DDR MC number: %d", imc_num);
400 * For other CPUs, the number of present DDR memory controllers
401 * is statically pre-configured in cfg->ddr_imc_num.
407 static bool i10nm_check_2lm(struct res_config *cfg)
413 list_for_each_entry(d, i10nm_edac_list, list) {
414 d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->sad_all_bdf.bus],
415 res_cfg->sad_all_bdf.dev,
416 res_cfg->sad_all_bdf.fun);
420 for (i = 0; i < I10NM_MAX_SAD; i++) {
421 I10NM_GET_SAD(d, cfg->sad_all_offset, i, reg);
422 if (I10NM_SAD_ENABLE(reg) && I10NM_SAD_NM_CACHEABLE(reg)) {
423 edac_dbg(2, "2-level memory configuration.\n");
433 * Check whether the error comes from DDRT by ICX/Tremont/SPR model specific error code.
434 * Refer to SDM vol3B 17.11.3/17.13.2 Intel IMC MC error codes for IA32_MCi_STATUS.
436 static bool i10nm_mscod_is_ddrt(u32 mscod)
438 switch (res_cfg->type) {
441 case 0x0106: case 0x0107:
442 case 0x0800: case 0x0804:
443 case 0x0806 ... 0x0808:
444 case 0x080a ... 0x080e:
445 case 0x0810: case 0x0811:
446 case 0x0816: case 0x081e:
454 case 0x0800: case 0x0804:
455 case 0x0806 ... 0x0808:
456 case 0x080a ... 0x080e:
457 case 0x0810: case 0x0811:
458 case 0x0816: case 0x081e:
471 static bool i10nm_mc_decode_available(struct mce *mce)
473 #define ICX_IMCx_CHy 0x06666000
476 if (!decoding_via_mca || mem_cfg_2lm)
479 if ((mce->status & (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
480 != (MCI_STATUS_MISCV | MCI_STATUS_ADDRV))
485 switch (res_cfg->type) {
487 /* Check whether the bank is one of {13,14,17,18,21,22,25,26} */
488 if (!(ICX_IMCx_CHy & (1 << bank)))
492 if (bank < 13 || bank > 20)
499 /* DDRT errors can't be decoded from MCA bank registers */
500 if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
503 if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
509 static bool i10nm_mc_decode(struct decoded_addr *res)
511 struct mce *m = res->mce;
515 if (!i10nm_mc_decode_available(m))
518 list_for_each_entry(d, i10nm_edac_list, list) {
519 if (d->imc[0].src_id == m->socketid) {
520 res->socket = m->socketid;
526 switch (res_cfg->type) {
530 res->channel = bank % 2;
531 res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
532 res->row = GET_BITFIELD(m->misc, 19, 39);
533 res->bank_group = GET_BITFIELD(m->misc, 40, 41);
534 res->bank_address = GET_BITFIELD(m->misc, 42, 43);
535 res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
536 res->rank = GET_BITFIELD(m->misc, 56, 58);
537 res->dimm = res->rank >> 2;
538 res->rank = res->rank % 4;
543 res->channel = bank % 2;
544 res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
545 res->row = GET_BITFIELD(m->misc, 19, 36);
546 res->bank_group = GET_BITFIELD(m->misc, 37, 38);
547 res->bank_address = GET_BITFIELD(m->misc, 39, 40);
548 res->bank_group |= GET_BITFIELD(m->misc, 41, 41) << 2;
549 res->rank = GET_BITFIELD(m->misc, 57, 57);
550 res->dimm = GET_BITFIELD(m->misc, 58, 58);
557 skx_printk(KERN_ERR, "No device for src_id %d imc %d\n",
558 m->socketid, res->imc);
566 * get_gnr_mdev() - Get the PCI device of the @logical_idx-th DDR memory controller.
568 * @d : The pointer to the structure of CPU socket EDAC device.
569 * @logical_idx : The logical index of the present memory controller (0 ~ max present MC# - 1).
570 * @physical_idx : To store the corresponding physical index of @logical_idx.
572 * RETURNS : The PCI device of the @logical_idx-th DDR memory controller, NULL on failure.
574 static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *physical_idx)
576 #define GNR_MAX_IMC_PCI_CNT 28
578 struct pci_dev *mdev;
582 * Detect present memory controllers from { PCI device: 8-5, function 7-1 }
584 for (i = 0; i < GNR_MAX_IMC_PCI_CNT; i++) {
585 mdev = pci_get_dev_wrapper(d->seg,
586 d->bus[res_cfg->ddr_mdev_bdf.bus],
587 res_cfg->ddr_mdev_bdf.dev + i / 7,
588 res_cfg->ddr_mdev_bdf.fun + i % 7);
591 if (logical == logical_idx) {
605 * get_ddr_munit() - Get the resource of the i-th DDR memory controller.
607 * @d : The pointer to the structure of CPU socket EDAC device.
608 * @i : The index of the CPU socket relative DDR memory controller.
609 * @offset : To store the MMIO offset of the i-th DDR memory controller.
610 * @size : To store the MMIO size of the i-th DDR memory controller.
612 * RETURNS : The PCI device of the i-th DDR memory controller, NULL on failure.
614 static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsigned long *size)
616 struct pci_dev *mdev;
620 switch (res_cfg->type) {
622 if (I10NM_GET_IMC_BAR(d, 0, reg)) {
623 i10nm_printk(KERN_ERR, "Failed to get mc0 bar\n");
627 mdev = get_gnr_mdev(d, i, &physical_idx);
631 *offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
632 I10NM_GNR_IMC_MMIO_OFFSET +
633 physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
634 *size = I10NM_GNR_IMC_MMIO_SIZE;
638 if (I10NM_GET_IMC_BAR(d, i, reg)) {
639 i10nm_printk(KERN_ERR, "Failed to get mc%d bar\n", i);
643 mdev = pci_get_dev_wrapper(d->seg,
644 d->bus[res_cfg->ddr_mdev_bdf.bus],
645 res_cfg->ddr_mdev_bdf.dev + i,
646 res_cfg->ddr_mdev_bdf.fun);
650 *offset = I10NM_GET_IMC_MMIO_OFFSET(reg);
651 *size = I10NM_GET_IMC_MMIO_SIZE(reg);
658 * i10nm_imc_absent() - Check whether the memory controller @imc is absent
660 * @imc : The pointer to the structure of memory controller EDAC device.
662 * RETURNS : true if the memory controller EDAC device is absent, false otherwise.
664 static bool i10nm_imc_absent(struct skx_imc *imc)
669 switch (res_cfg->type) {
671 for (i = 0; i < res_cfg->ddr_chan_num; i++) {
672 mcmtr = I10NM_GET_MCMTR(imc, i);
673 edac_dbg(1, "ch%d mcmtr reg %x\n", i, mcmtr);
679 * Some workstations' absent memory controllers still
680 * appear as PCIe devices, misleading the EDAC driver.
681 * By observing that the MMIO registers of these absent
682 * memory controllers consistently hold the value of ~0.
684 * We identify a memory controller as absent by checking
685 * if its MMIO register "mcmtr" == ~0 in all its channels.
693 static int i10nm_get_ddr_munits(void)
695 struct pci_dev *mdev;
703 list_for_each_entry(d, i10nm_edac_list, list) {
704 d->util_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->util_all_bdf.bus],
705 res_cfg->util_all_bdf.dev,
706 res_cfg->util_all_bdf.fun);
710 d->uracu = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->uracu_bdf.bus],
711 res_cfg->uracu_bdf.dev,
712 res_cfg->uracu_bdf.fun);
716 if (I10NM_GET_SCK_BAR(d, reg)) {
717 i10nm_printk(KERN_ERR, "Failed to socket bar\n");
721 base = I10NM_GET_SCK_MMIO_BASE(reg);
722 edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
725 for (lmc = 0, i = 0; i < res_cfg->ddr_imc_num; i++) {
726 mdev = get_ddr_munit(d, i, &off, &size);
728 if (i == 0 && !mdev) {
729 i10nm_printk(KERN_ERR, "No IMC found\n");
735 edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
736 i, base + off, size, reg);
738 mbase = ioremap(base + off, size);
740 i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
745 d->imc[lmc].mbase = mbase;
746 if (i10nm_imc_absent(&d->imc[lmc])) {
749 d->imc[lmc].mbase = NULL;
750 edac_dbg(2, "Skip absent mc%d\n", i);
753 d->imc[lmc].mdev = mdev;
762 static bool i10nm_check_hbm_imc(struct skx_dev *d)
766 if (I10NM_GET_CAPID3_CFG(d, reg)) {
767 i10nm_printk(KERN_ERR, "Failed to get capid3_cfg\n");
771 return I10NM_IS_HBM_PRESENT(reg) != 0;
774 static int i10nm_get_hbm_munits(void)
776 struct pci_dev *mdev;
783 list_for_each_entry(d, i10nm_edac_list, list) {
787 if (!i10nm_check_hbm_imc(d)) {
788 i10nm_printk(KERN_DEBUG, "No hbm memory\n");
792 if (I10NM_GET_SCK_BAR(d, reg)) {
793 i10nm_printk(KERN_ERR, "Failed to get socket bar\n");
796 base = I10NM_GET_SCK_MMIO_BASE(reg);
798 if (I10NM_GET_HBM_IMC_BAR(d, reg)) {
799 i10nm_printk(KERN_ERR, "Failed to get hbm mc bar\n");
802 base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
804 lmc = res_cfg->ddr_imc_num;
806 for (i = 0; i < res_cfg->hbm_imc_num; i++) {
807 mdev = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->hbm_mdev_bdf.bus],
808 res_cfg->hbm_mdev_bdf.dev + i / 4,
809 res_cfg->hbm_mdev_bdf.fun + i % 4);
811 if (i == 0 && !mdev) {
812 i10nm_printk(KERN_ERR, "No hbm mc found\n");
818 d->imc[lmc].mdev = mdev;
819 off = i * I10NM_HBM_IMC_MMIO_SIZE;
821 edac_dbg(2, "hbm mc%d mmio base 0x%llx size 0x%x\n",
822 lmc, base + off, I10NM_HBM_IMC_MMIO_SIZE);
824 mbase = ioremap(base + off, I10NM_HBM_IMC_MMIO_SIZE);
826 pci_dev_put(d->imc[lmc].mdev);
827 d->imc[lmc].mdev = NULL;
829 i10nm_printk(KERN_ERR, "Failed to ioremap for hbm mc 0x%llx\n",
834 d->imc[lmc].mbase = mbase;
835 d->imc[lmc].hbm_mc = true;
837 mcmtr = I10NM_GET_MCMTR(&d->imc[lmc], 0);
838 if (!I10NM_IS_HBM_IMC(mcmtr)) {
839 iounmap(d->imc[lmc].mbase);
840 d->imc[lmc].mbase = NULL;
841 d->imc[lmc].hbm_mc = false;
842 pci_dev_put(d->imc[lmc].mdev);
843 d->imc[lmc].mdev = NULL;
845 i10nm_printk(KERN_ERR, "This isn't an hbm mc!\n");
856 static struct res_config i10nm_cfg0 = {
859 .busno_cfg_offset = 0xcc,
863 .ddr_chan_mmio_sz = 0x4000,
864 .sad_all_bdf = {1, 29, 0},
865 .pcu_cr3_bdf = {1, 30, 3},
866 .util_all_bdf = {1, 29, 1},
867 .uracu_bdf = {0, 0, 1},
868 .ddr_mdev_bdf = {0, 12, 0},
869 .hbm_mdev_bdf = {0, 12, 1},
870 .sad_all_offset = 0x108,
871 .offsets_scrub = offsets_scrub_icx,
872 .offsets_demand = offsets_demand_icx,
875 static struct res_config i10nm_cfg1 = {
878 .busno_cfg_offset = 0xd0,
882 .ddr_chan_mmio_sz = 0x4000,
883 .sad_all_bdf = {1, 29, 0},
884 .pcu_cr3_bdf = {1, 30, 3},
885 .util_all_bdf = {1, 29, 1},
886 .uracu_bdf = {0, 0, 1},
887 .ddr_mdev_bdf = {0, 12, 0},
888 .hbm_mdev_bdf = {0, 12, 1},
889 .sad_all_offset = 0x108,
890 .offsets_scrub = offsets_scrub_icx,
891 .offsets_demand = offsets_demand_icx,
894 static struct res_config spr_cfg = {
897 .busno_cfg_offset = 0xd0,
904 .ddr_chan_mmio_sz = 0x8000,
905 .hbm_chan_mmio_sz = 0x4000,
906 .support_ddr5 = true,
907 .sad_all_bdf = {1, 10, 0},
908 .pcu_cr3_bdf = {1, 30, 3},
909 .util_all_bdf = {1, 29, 1},
910 .uracu_bdf = {0, 0, 1},
911 .ddr_mdev_bdf = {0, 12, 0},
912 .hbm_mdev_bdf = {0, 12, 1},
913 .sad_all_offset = 0x300,
914 .offsets_scrub = offsets_scrub_spr,
915 .offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
916 .offsets_scrub_hbm1 = offsets_scrub_spr_hbm1,
917 .offsets_demand = offsets_demand_spr,
918 .offsets_demand2 = offsets_demand2_spr,
919 .offsets_demand_hbm0 = offsets_demand_spr_hbm0,
920 .offsets_demand_hbm1 = offsets_demand_spr_hbm1,
923 static struct res_config gnr_cfg = {
926 .busno_cfg_offset = 0xd0,
930 .ddr_chan_mmio_sz = 0x4000,
931 .support_ddr5 = true,
932 .sad_all_bdf = {0, 13, 0},
933 .pcu_cr3_bdf = {0, 5, 0},
934 .util_all_bdf = {0, 13, 1},
935 .uracu_bdf = {0, 0, 1},
936 .ddr_mdev_bdf = {0, 5, 1},
937 .sad_all_offset = 0x300,
940 static const struct x86_cpu_id i10nm_cpuids[] = {
941 X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
942 X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
943 X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
944 X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
945 X86_MATCH_VFM_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
946 X86_MATCH_VFM_STEPPINGS(INTEL_SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
947 X86_MATCH_VFM_STEPPINGS(INTEL_EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
948 X86_MATCH_VFM_STEPPINGS(INTEL_GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
949 X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
950 X86_MATCH_VFM_STEPPINGS(INTEL_ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
953 MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
955 static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
959 mcmtr = I10NM_GET_MCMTR(imc, chan);
960 edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
962 return !!GET_BITFIELD(mcmtr, 2, 2);
965 static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
966 struct res_config *cfg)
968 struct skx_pvt *pvt = mci->pvt_info;
969 struct skx_imc *imc = pvt->imc;
970 u32 mtr, mcddrtcfg = 0;
971 struct dimm_info *dimm;
974 for (i = 0; i < imc->num_channels; i++) {
980 if (res_cfg->type != GNR)
981 mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
983 for (j = 0; j < imc->num_dimms; j++) {
984 dimm = edac_get_dimm(mci, i, j, 0);
985 mtr = I10NM_GET_DIMMMTR(imc, i, j);
986 edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
987 mtr, mcddrtcfg, imc->mc, i, j);
989 if (IS_DIMM_PRESENT(mtr))
990 ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
992 else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
993 ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
996 if (ndimms && !i10nm_check_ecc(imc, i)) {
997 i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
1006 static struct notifier_block i10nm_mce_dec = {
1007 .notifier_call = skx_mce_check_error,
1008 .priority = MCE_PRIO_EDAC,
1011 static int __init i10nm_init(void)
1013 u8 mc = 0, src_id = 0, node_id = 0;
1014 const struct x86_cpu_id *id;
1015 struct res_config *cfg;
1018 int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
1024 if (ghes_get_devices())
1027 owner = edac_get_owner();
1028 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1031 if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1034 id = x86_match_cpu(i10nm_cpuids);
1038 cfg = (struct res_config *)id->driver_data;
1039 skx_set_res_cfg(cfg);
1042 rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
1046 rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
1050 i10nm_printk(KERN_ERR, "No memory controllers found\n");
1054 rc = i10nm_get_imc_num(cfg);
1058 mem_cfg_2lm = i10nm_check_2lm(cfg);
1059 skx_set_mem_cfg(mem_cfg_2lm);
1061 rc = i10nm_get_ddr_munits();
1063 if (i10nm_get_hbm_munits() && rc)
1066 imc_num = res_cfg->ddr_imc_num + res_cfg->hbm_imc_num;
1068 list_for_each_entry(d, i10nm_edac_list, list) {
1069 rc = skx_get_src_id(d, 0xf8, &src_id);
1073 rc = skx_get_node_id(d, &node_id);
1077 edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
1078 for (i = 0; i < imc_num; i++) {
1079 if (!d->imc[i].mdev)
1082 d->imc[i].mc = mc++;
1084 d->imc[i].src_id = src_id;
1085 d->imc[i].node_id = node_id;
1086 if (d->imc[i].hbm_mc) {
1087 d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
1088 d->imc[i].num_channels = cfg->hbm_chan_num;
1089 d->imc[i].num_dimms = cfg->hbm_dimm_num;
1091 d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
1092 d->imc[i].num_channels = cfg->ddr_chan_num;
1093 d->imc[i].num_dimms = cfg->ddr_dimm_num;
1096 rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
1097 "Intel_10nm Socket", EDAC_MOD_STR,
1098 i10nm_get_dimm_config, cfg);
1104 rc = skx_adxl_get();
1109 mce_register_decode_chain(&i10nm_mce_dec);
1110 skx_setup_debug("i10nm_test");
1112 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
1113 skx_set_decode(i10nm_mc_decode, show_retry_rd_err_log);
1114 if (retry_rd_err_log == 2)
1115 enable_retry_rd_err_log(true);
1117 skx_set_decode(i10nm_mc_decode, NULL);
1120 i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
1128 static void __exit i10nm_exit(void)
1132 if (retry_rd_err_log && res_cfg->offsets_scrub && res_cfg->offsets_demand) {
1133 skx_set_decode(NULL, NULL);
1134 if (retry_rd_err_log == 2)
1135 enable_retry_rd_err_log(false);
1138 skx_teardown_debug();
1139 mce_unregister_decode_chain(&i10nm_mce_dec);
1144 module_init(i10nm_init);
1145 module_exit(i10nm_exit);
1147 static int set_decoding_via_mca(const char *buf, const struct kernel_param *kp)
1152 ret = kstrtoul(buf, 0, &val);
1157 if (val && mem_cfg_2lm) {
1158 i10nm_printk(KERN_NOTICE, "Decoding errors via MCA banks for 2LM isn't supported yet\n");
1162 ret = param_set_int(buf, kp);
1167 static const struct kernel_param_ops decoding_via_mca_param_ops = {
1168 .set = set_decoding_via_mca,
1169 .get = param_get_int,
1172 module_param_cb(decoding_via_mca, &decoding_via_mca_param_ops, &decoding_via_mca, 0644);
1173 MODULE_PARM_DESC(decoding_via_mca, "decoding_via_mca: 0=off(default), 1=enable");
1175 module_param(retry_rd_err_log, int, 0444);
1176 MODULE_PARM_DESC(retry_rd_err_log, "retry_rd_err_log: 0=off(default), 1=bios(Linux doesn't reset any control bits, but just reports values.), 2=linux(Linux tries to take control and resets mode bits, clear valid/UC bits after reading.)");
1178 MODULE_LICENSE("GPL v2");
1179 MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");