]>
Commit | Line | Data |
---|---|---|
df8bc08c HM |
1 | /* |
2 | * Intel X38 Memory Controller kernel module | |
3 | * Copyright (C) 2008 Cluster Computing, Inc. | |
4 | * | |
5 | * This file may be distributed under the terms of the | |
6 | * GNU General Public License. | |
7 | * | |
8 | * This file is based on i3200_edac.c | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/pci.h> | |
15 | #include <linux/pci_ids.h> | |
df8bc08c HM |
16 | #include <linux/edac.h> |
17 | #include "edac_core.h" | |
18 | ||
19 | #define X38_REVISION "1.1" | |
20 | ||
21 | #define EDAC_MOD_STR "x38_edac" | |
22 | ||
23 | #define PCI_DEVICE_ID_INTEL_X38_HB 0x29e0 | |
24 | ||
25 | #define X38_RANKS 8 | |
26 | #define X38_RANKS_PER_CHANNEL 4 | |
27 | #define X38_CHANNELS 2 | |
28 | ||
29 | /* Intel X38 register addresses - device 0 function 0 - DRAM Controller */ | |
30 | ||
31 | #define X38_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ | |
3d768213 | 32 | #define X38_MCHBAR_HIGH 0x4c |
df8bc08c HM |
33 | #define X38_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ |
34 | #define X38_MMR_WINDOW_SIZE 16384 | |
35 | ||
36 | #define X38_TOM 0xa0 /* Top of Memory (16b) | |
37 | * | |
38 | * 15:10 reserved | |
39 | * 9:0 total populated physical memory | |
40 | */ | |
41 | #define X38_TOM_MASK 0x3ff /* bits 9:0 */ | |
42 | #define X38_TOM_SHIFT 26 /* 64MiB grain */ | |
43 | ||
44 | #define X38_ERRSTS 0xc8 /* Error Status Register (16b) | |
45 | * | |
46 | * 15 reserved | |
47 | * 14 Isochronous TBWRR Run Behind FIFO Full | |
48 | * (ITCV) | |
49 | * 13 Isochronous TBWRR Run Behind FIFO Put | |
50 | * (ITSTV) | |
51 | * 12 reserved | |
52 | * 11 MCH Thermal Sensor Event | |
53 | * for SMI/SCI/SERR (GTSE) | |
54 | * 10 reserved | |
55 | * 9 LOCK to non-DRAM Memory Flag (LCKF) | |
56 | * 8 reserved | |
57 | * 7 DRAM Throttle Flag (DTF) | |
58 | * 6:2 reserved | |
59 | * 1 Multi-bit DRAM ECC Error Flag (DMERR) | |
60 | * 0 Single-bit DRAM ECC Error Flag (DSERR) | |
61 | */ | |
62 | #define X38_ERRSTS_UE 0x0002 | |
63 | #define X38_ERRSTS_CE 0x0001 | |
64 | #define X38_ERRSTS_BITS (X38_ERRSTS_UE | X38_ERRSTS_CE) | |
65 | ||
66 | ||
67 | /* Intel MMIO register space - device 0 function 0 - MMR space */ | |
68 | ||
69 | #define X38_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) | |
70 | * | |
71 | * 15:10 reserved | |
72 | * 9:0 Channel 0 DRAM Rank Boundary Address | |
73 | */ | |
74 | #define X38_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ | |
75 | #define X38_DRB_MASK 0x3ff /* bits 9:0 */ | |
76 | #define X38_DRB_SHIFT 26 /* 64MiB grain */ | |
77 | ||
78 | #define X38_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) | |
79 | * | |
80 | * 63:48 Error Column Address (ERRCOL) | |
81 | * 47:32 Error Row Address (ERRROW) | |
82 | * 31:29 Error Bank Address (ERRBANK) | |
83 | * 28:27 Error Rank Address (ERRRANK) | |
84 | * 26:24 reserved | |
85 | * 23:16 Error Syndrome (ERRSYND) | |
86 | * 15: 2 reserved | |
87 | * 1 Multiple Bit Error Status (MERRSTS) | |
88 | * 0 Correctable Error Status (CERRSTS) | |
89 | */ | |
90 | #define X38_C1ECCERRLOG 0x680 /* Channel 1 ECC Error Log (64b) */ | |
91 | #define X38_ECCERRLOG_CE 0x1 | |
92 | #define X38_ECCERRLOG_UE 0x2 | |
93 | #define X38_ECCERRLOG_RANK_BITS 0x18000000 | |
94 | #define X38_ECCERRLOG_SYNDROME_BITS 0xff0000 | |
95 | ||
96 | #define X38_CAPID0 0xe0 /* see P.94 of spec for details */ | |
97 | ||
98 | static int x38_channel_num; | |
99 | ||
100 | static int how_many_channel(struct pci_dev *pdev) | |
101 | { | |
102 | unsigned char capid0_8b; /* 8th byte of CAPID0 */ | |
103 | ||
104 | pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); | |
105 | if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ | |
956b9ba1 | 106 | edac_dbg(0, "In single channel mode\n"); |
df8bc08c HM |
107 | x38_channel_num = 1; |
108 | } else { | |
956b9ba1 | 109 | edac_dbg(0, "In dual channel mode\n"); |
df8bc08c HM |
110 | x38_channel_num = 2; |
111 | } | |
112 | ||
113 | return x38_channel_num; | |
114 | } | |
115 | ||
116 | static unsigned long eccerrlog_syndrome(u64 log) | |
117 | { | |
118 | return (log & X38_ECCERRLOG_SYNDROME_BITS) >> 16; | |
119 | } | |
120 | ||
121 | static int eccerrlog_row(int channel, u64 log) | |
122 | { | |
123 | return ((log & X38_ECCERRLOG_RANK_BITS) >> 27) | | |
124 | (channel * X38_RANKS_PER_CHANNEL); | |
125 | } | |
126 | ||
127 | enum x38_chips { | |
128 | X38 = 0, | |
129 | }; | |
130 | ||
131 | struct x38_dev_info { | |
132 | const char *ctl_name; | |
133 | }; | |
134 | ||
135 | struct x38_error_info { | |
136 | u16 errsts; | |
137 | u16 errsts2; | |
138 | u64 eccerrlog[X38_CHANNELS]; | |
139 | }; | |
140 | ||
141 | static const struct x38_dev_info x38_devs[] = { | |
142 | [X38] = { | |
143 | .ctl_name = "x38"}, | |
144 | }; | |
145 | ||
146 | static struct pci_dev *mci_pdev; | |
147 | static int x38_registered = 1; | |
148 | ||
149 | ||
150 | static void x38_clear_error_info(struct mem_ctl_info *mci) | |
151 | { | |
152 | struct pci_dev *pdev; | |
153 | ||
fd687502 | 154 | pdev = to_pci_dev(mci->pdev); |
df8bc08c HM |
155 | |
156 | /* | |
157 | * Clear any error bits. | |
158 | * (Yes, we really clear bits by writing 1 to them.) | |
159 | */ | |
160 | pci_write_bits16(pdev, X38_ERRSTS, X38_ERRSTS_BITS, | |
161 | X38_ERRSTS_BITS); | |
162 | } | |
163 | ||
164 | static u64 x38_readq(const void __iomem *addr) | |
165 | { | |
166 | return readl(addr) | (((u64)readl(addr + 4)) << 32); | |
167 | } | |
168 | ||
169 | static void x38_get_and_clear_error_info(struct mem_ctl_info *mci, | |
170 | struct x38_error_info *info) | |
171 | { | |
172 | struct pci_dev *pdev; | |
173 | void __iomem *window = mci->pvt_info; | |
174 | ||
fd687502 | 175 | pdev = to_pci_dev(mci->pdev); |
df8bc08c HM |
176 | |
177 | /* | |
178 | * This is a mess because there is no atomic way to read all the | |
179 | * registers at once and the registers can transition from CE being | |
180 | * overwritten by UE. | |
181 | */ | |
182 | pci_read_config_word(pdev, X38_ERRSTS, &info->errsts); | |
183 | if (!(info->errsts & X38_ERRSTS_BITS)) | |
184 | return; | |
185 | ||
186 | info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG); | |
187 | if (x38_channel_num == 2) | |
188 | info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG); | |
189 | ||
190 | pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2); | |
191 | ||
192 | /* | |
193 | * If the error is the same for both reads then the first set | |
194 | * of reads is valid. If there is a change then there is a CE | |
195 | * with no info and the second set of reads is valid and | |
196 | * should be UE info. | |
197 | */ | |
198 | if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { | |
199 | info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG); | |
200 | if (x38_channel_num == 2) | |
201 | info->eccerrlog[1] = | |
202 | x38_readq(window + X38_C1ECCERRLOG); | |
203 | } | |
204 | ||
205 | x38_clear_error_info(mci); | |
206 | } | |
207 | ||
208 | static void x38_process_error_info(struct mem_ctl_info *mci, | |
209 | struct x38_error_info *info) | |
210 | { | |
211 | int channel; | |
212 | u64 log; | |
213 | ||
214 | if (!(info->errsts & X38_ERRSTS_BITS)) | |
215 | return; | |
216 | ||
217 | if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { | |
9eb07a7f | 218 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, |
e2acc357 | 219 | -1, -1, -1, |
03f7eae8 | 220 | "UE overwrote CE", ""); |
df8bc08c HM |
221 | info->errsts = info->errsts2; |
222 | } | |
223 | ||
224 | for (channel = 0; channel < x38_channel_num; channel++) { | |
225 | log = info->eccerrlog[channel]; | |
226 | if (log & X38_ECCERRLOG_UE) { | |
9eb07a7f | 227 | edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, |
e2acc357 MCC |
228 | 0, 0, 0, |
229 | eccerrlog_row(channel, log), | |
230 | -1, -1, | |
03f7eae8 | 231 | "x38 UE", ""); |
df8bc08c | 232 | } else if (log & X38_ECCERRLOG_CE) { |
9eb07a7f | 233 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, |
e2acc357 MCC |
234 | 0, 0, eccerrlog_syndrome(log), |
235 | eccerrlog_row(channel, log), | |
236 | -1, -1, | |
03f7eae8 | 237 | "x38 CE", ""); |
df8bc08c HM |
238 | } |
239 | } | |
240 | } | |
241 | ||
242 | static void x38_check(struct mem_ctl_info *mci) | |
243 | { | |
244 | struct x38_error_info info; | |
245 | ||
956b9ba1 | 246 | edac_dbg(1, "MC%d\n", mci->mc_idx); |
df8bc08c HM |
247 | x38_get_and_clear_error_info(mci, &info); |
248 | x38_process_error_info(mci, &info); | |
249 | } | |
250 | ||
251 | ||
252 | void __iomem *x38_map_mchbar(struct pci_dev *pdev) | |
253 | { | |
254 | union { | |
255 | u64 mchbar; | |
256 | struct { | |
257 | u32 mchbar_low; | |
258 | u32 mchbar_high; | |
259 | }; | |
260 | } u; | |
261 | void __iomem *window; | |
262 | ||
263 | pci_read_config_dword(pdev, X38_MCHBAR_LOW, &u.mchbar_low); | |
264 | pci_write_config_dword(pdev, X38_MCHBAR_LOW, u.mchbar_low | 0x1); | |
265 | pci_read_config_dword(pdev, X38_MCHBAR_HIGH, &u.mchbar_high); | |
266 | u.mchbar &= X38_MCHBAR_MASK; | |
267 | ||
268 | if (u.mchbar != (resource_size_t)u.mchbar) { | |
269 | printk(KERN_ERR | |
270 | "x38: mmio space beyond accessible range (0x%llx)\n", | |
271 | (unsigned long long)u.mchbar); | |
272 | return NULL; | |
273 | } | |
274 | ||
275 | window = ioremap_nocache(u.mchbar, X38_MMR_WINDOW_SIZE); | |
276 | if (!window) | |
277 | printk(KERN_ERR "x38: cannot map mmio space at 0x%llx\n", | |
278 | (unsigned long long)u.mchbar); | |
279 | ||
280 | return window; | |
281 | } | |
282 | ||
283 | ||
284 | static void x38_get_drbs(void __iomem *window, | |
285 | u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]) | |
286 | { | |
287 | int i; | |
288 | ||
289 | for (i = 0; i < X38_RANKS_PER_CHANNEL; i++) { | |
290 | drbs[0][i] = readw(window + X38_C0DRB + 2*i) & X38_DRB_MASK; | |
291 | drbs[1][i] = readw(window + X38_C1DRB + 2*i) & X38_DRB_MASK; | |
292 | } | |
293 | } | |
294 | ||
295 | static bool x38_is_stacked(struct pci_dev *pdev, | |
296 | u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]) | |
297 | { | |
298 | u16 tom; | |
299 | ||
300 | pci_read_config_word(pdev, X38_TOM, &tom); | |
301 | tom &= X38_TOM_MASK; | |
302 | ||
303 | return drbs[X38_CHANNELS - 1][X38_RANKS_PER_CHANNEL - 1] == tom; | |
304 | } | |
305 | ||
306 | static unsigned long drb_to_nr_pages( | |
307 | u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL], | |
308 | bool stacked, int channel, int rank) | |
309 | { | |
310 | int n; | |
311 | ||
312 | n = drbs[channel][rank]; | |
313 | if (rank > 0) | |
314 | n -= drbs[channel][rank - 1]; | |
315 | if (stacked && (channel == 1) && drbs[channel][rank] == | |
316 | drbs[channel][X38_RANKS_PER_CHANNEL - 1]) { | |
317 | n -= drbs[0][X38_RANKS_PER_CHANNEL - 1]; | |
318 | } | |
319 | ||
320 | n <<= (X38_DRB_SHIFT - PAGE_SHIFT); | |
321 | return n; | |
322 | } | |
323 | ||
324 | static int x38_probe1(struct pci_dev *pdev, int dev_idx) | |
325 | { | |
326 | int rc; | |
084a4fcc | 327 | int i, j; |
df8bc08c | 328 | struct mem_ctl_info *mci = NULL; |
e2acc357 | 329 | struct edac_mc_layer layers[2]; |
df8bc08c HM |
330 | u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]; |
331 | bool stacked; | |
332 | void __iomem *window; | |
333 | ||
956b9ba1 | 334 | edac_dbg(0, "MC:\n"); |
df8bc08c HM |
335 | |
336 | window = x38_map_mchbar(pdev); | |
337 | if (!window) | |
338 | return -ENODEV; | |
339 | ||
340 | x38_get_drbs(window, drbs); | |
341 | ||
342 | how_many_channel(pdev); | |
343 | ||
344 | /* FIXME: unconventional pvt_info usage */ | |
e2acc357 MCC |
345 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; |
346 | layers[0].size = X38_RANKS; | |
347 | layers[0].is_virt_csrow = true; | |
348 | layers[1].type = EDAC_MC_LAYER_CHANNEL; | |
349 | layers[1].size = x38_channel_num; | |
350 | layers[1].is_virt_csrow = false; | |
ca0907b9 | 351 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); |
df8bc08c HM |
352 | if (!mci) |
353 | return -ENOMEM; | |
354 | ||
956b9ba1 | 355 | edac_dbg(3, "MC: init mci\n"); |
df8bc08c | 356 | |
fd687502 | 357 | mci->pdev = &pdev->dev; |
df8bc08c HM |
358 | mci->mtype_cap = MEM_FLAG_DDR2; |
359 | ||
360 | mci->edac_ctl_cap = EDAC_FLAG_SECDED; | |
361 | mci->edac_cap = EDAC_FLAG_SECDED; | |
362 | ||
363 | mci->mod_name = EDAC_MOD_STR; | |
364 | mci->mod_ver = X38_REVISION; | |
365 | mci->ctl_name = x38_devs[dev_idx].ctl_name; | |
366 | mci->dev_name = pci_name(pdev); | |
367 | mci->edac_check = x38_check; | |
368 | mci->ctl_page_to_phys = NULL; | |
369 | mci->pvt_info = window; | |
370 | ||
371 | stacked = x38_is_stacked(pdev, drbs); | |
372 | ||
373 | /* | |
374 | * The dram rank boundary (DRB) reg values are boundary addresses | |
375 | * for each DRAM rank with a granularity of 64MB. DRB regs are | |
376 | * cumulative; the last one will contain the total memory | |
377 | * contained in all ranks. | |
378 | */ | |
df8bc08c HM |
379 | for (i = 0; i < mci->nr_csrows; i++) { |
380 | unsigned long nr_pages; | |
de3910eb | 381 | struct csrow_info *csrow = mci->csrows[i]; |
df8bc08c HM |
382 | |
383 | nr_pages = drb_to_nr_pages(drbs, stacked, | |
384 | i / X38_RANKS_PER_CHANNEL, | |
385 | i % X38_RANKS_PER_CHANNEL); | |
386 | ||
084a4fcc | 387 | if (nr_pages == 0) |
df8bc08c | 388 | continue; |
df8bc08c | 389 | |
084a4fcc | 390 | for (j = 0; j < x38_channel_num; j++) { |
de3910eb | 391 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
a895bf8b MCC |
392 | |
393 | dimm->nr_pages = nr_pages / x38_channel_num; | |
084a4fcc MCC |
394 | dimm->grain = nr_pages << PAGE_SHIFT; |
395 | dimm->mtype = MEM_DDR2; | |
396 | dimm->dtype = DEV_UNKNOWN; | |
397 | dimm->edac_mode = EDAC_UNKNOWN; | |
398 | } | |
df8bc08c HM |
399 | } |
400 | ||
401 | x38_clear_error_info(mci); | |
402 | ||
403 | rc = -ENODEV; | |
404 | if (edac_mc_add_mc(mci)) { | |
956b9ba1 | 405 | edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); |
df8bc08c HM |
406 | goto fail; |
407 | } | |
408 | ||
409 | /* get this far and it's successful */ | |
956b9ba1 | 410 | edac_dbg(3, "MC: success\n"); |
df8bc08c HM |
411 | return 0; |
412 | ||
413 | fail: | |
414 | iounmap(window); | |
415 | if (mci) | |
416 | edac_mc_free(mci); | |
417 | ||
418 | return rc; | |
419 | } | |
420 | ||
9b3c6e85 | 421 | static int x38_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
df8bc08c HM |
422 | { |
423 | int rc; | |
424 | ||
956b9ba1 | 425 | edac_dbg(0, "MC:\n"); |
df8bc08c HM |
426 | |
427 | if (pci_enable_device(pdev) < 0) | |
428 | return -EIO; | |
429 | ||
430 | rc = x38_probe1(pdev, ent->driver_data); | |
431 | if (!mci_pdev) | |
432 | mci_pdev = pci_dev_get(pdev); | |
433 | ||
434 | return rc; | |
435 | } | |
436 | ||
9b3c6e85 | 437 | static void x38_remove_one(struct pci_dev *pdev) |
df8bc08c HM |
438 | { |
439 | struct mem_ctl_info *mci; | |
440 | ||
956b9ba1 | 441 | edac_dbg(0, "\n"); |
df8bc08c HM |
442 | |
443 | mci = edac_mc_del_mc(&pdev->dev); | |
444 | if (!mci) | |
445 | return; | |
446 | ||
447 | iounmap(mci->pvt_info); | |
448 | ||
449 | edac_mc_free(mci); | |
450 | } | |
451 | ||
36c46f31 | 452 | static DEFINE_PCI_DEVICE_TABLE(x38_pci_tbl) = { |
df8bc08c HM |
453 | { |
454 | PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | |
455 | X38}, | |
456 | { | |
457 | 0, | |
458 | } /* 0 terminated list. */ | |
459 | }; | |
460 | ||
461 | MODULE_DEVICE_TABLE(pci, x38_pci_tbl); | |
462 | ||
463 | static struct pci_driver x38_driver = { | |
464 | .name = EDAC_MOD_STR, | |
465 | .probe = x38_init_one, | |
9b3c6e85 | 466 | .remove = x38_remove_one, |
df8bc08c HM |
467 | .id_table = x38_pci_tbl, |
468 | }; | |
469 | ||
470 | static int __init x38_init(void) | |
471 | { | |
472 | int pci_rc; | |
473 | ||
956b9ba1 | 474 | edac_dbg(3, "MC:\n"); |
df8bc08c HM |
475 | |
476 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | |
477 | opstate_init(); | |
478 | ||
479 | pci_rc = pci_register_driver(&x38_driver); | |
480 | if (pci_rc < 0) | |
481 | goto fail0; | |
482 | ||
483 | if (!mci_pdev) { | |
484 | x38_registered = 0; | |
485 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | |
486 | PCI_DEVICE_ID_INTEL_X38_HB, NULL); | |
487 | if (!mci_pdev) { | |
956b9ba1 | 488 | edac_dbg(0, "x38 pci_get_device fail\n"); |
df8bc08c HM |
489 | pci_rc = -ENODEV; |
490 | goto fail1; | |
491 | } | |
492 | ||
493 | pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); | |
494 | if (pci_rc < 0) { | |
956b9ba1 | 495 | edac_dbg(0, "x38 init fail\n"); |
df8bc08c HM |
496 | pci_rc = -ENODEV; |
497 | goto fail1; | |
498 | } | |
499 | } | |
500 | ||
501 | return 0; | |
502 | ||
503 | fail1: | |
504 | pci_unregister_driver(&x38_driver); | |
505 | ||
506 | fail0: | |
507 | if (mci_pdev) | |
508 | pci_dev_put(mci_pdev); | |
509 | ||
510 | return pci_rc; | |
511 | } | |
512 | ||
513 | static void __exit x38_exit(void) | |
514 | { | |
956b9ba1 | 515 | edac_dbg(3, "MC:\n"); |
df8bc08c HM |
516 | |
517 | pci_unregister_driver(&x38_driver); | |
518 | if (!x38_registered) { | |
519 | x38_remove_one(mci_pdev); | |
520 | pci_dev_put(mci_pdev); | |
521 | } | |
522 | } | |
523 | ||
524 | module_init(x38_init); | |
525 | module_exit(x38_exit); | |
526 | ||
527 | MODULE_LICENSE("GPL"); | |
528 | MODULE_AUTHOR("Cluster Computing, Inc. Hitoshi Mitake"); | |
529 | MODULE_DESCRIPTION("MC support for Intel X38 memory hub controllers"); | |
530 | ||
531 | module_param(edac_op_state, int, 0444); | |
532 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |