]>
Commit | Line | Data |
---|---|---|
2bc65418 | 1 | #include "amd64_edac.h" |
7d6034d3 | 2 | #include <asm/k8.h> |
2bc65418 DT |
3 | |
4 | static struct edac_pci_ctl_info *amd64_ctl_pci; | |
5 | ||
6 | static int report_gart_errors; | |
7 | module_param(report_gart_errors, int, 0644); | |
8 | ||
9 | /* | |
10 | * Set by command line parameter. If BIOS has enabled the ECC, this override is | |
11 | * cleared to prevent re-enabling the hardware by this driver. | |
12 | */ | |
13 | static int ecc_enable_override; | |
14 | module_param(ecc_enable_override, int, 0644); | |
15 | ||
16 | /* Lookup table for all possible MC control instances */ | |
17 | struct amd64_pvt; | |
3011b20d BP |
18 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; |
19 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; | |
2bc65418 | 20 | |
b70ef010 | 21 | /* |
1433eb99 BP |
22 | * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and |
23 | * later. | |
b70ef010 | 24 | */ |
1433eb99 BP |
25 | static int ddr2_dbam_revCG[] = { |
26 | [0] = 32, | |
27 | [1] = 64, | |
28 | [2] = 128, | |
29 | [3] = 256, | |
30 | [4] = 512, | |
31 | [5] = 1024, | |
32 | [6] = 2048, | |
33 | }; | |
34 | ||
35 | static int ddr2_dbam_revD[] = { | |
36 | [0] = 32, | |
37 | [1] = 64, | |
38 | [2 ... 3] = 128, | |
39 | [4] = 256, | |
40 | [5] = 512, | |
41 | [6] = 256, | |
42 | [7] = 512, | |
43 | [8 ... 9] = 1024, | |
44 | [10] = 2048, | |
45 | }; | |
46 | ||
47 | static int ddr2_dbam[] = { [0] = 128, | |
48 | [1] = 256, | |
49 | [2 ... 4] = 512, | |
50 | [5 ... 6] = 1024, | |
51 | [7 ... 8] = 2048, | |
52 | [9 ... 10] = 4096, | |
53 | [11] = 8192, | |
54 | }; | |
55 | ||
56 | static int ddr3_dbam[] = { [0] = -1, | |
57 | [1] = 256, | |
58 | [2] = 512, | |
59 | [3 ... 4] = -1, | |
60 | [5 ... 6] = 1024, | |
61 | [7 ... 8] = 2048, | |
62 | [9 ... 10] = 4096, | |
63 | [11] = 8192, | |
b70ef010 BP |
64 | }; |
65 | ||
66 | /* | |
67 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing | |
68 | * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- | |
69 | * or higher value'. | |
70 | * | |
71 | *FIXME: Produce a better mapping/linearisation. | |
72 | */ | |
73 | ||
74 | struct scrubrate scrubrates[] = { | |
75 | { 0x01, 1600000000UL}, | |
76 | { 0x02, 800000000UL}, | |
77 | { 0x03, 400000000UL}, | |
78 | { 0x04, 200000000UL}, | |
79 | { 0x05, 100000000UL}, | |
80 | { 0x06, 50000000UL}, | |
81 | { 0x07, 25000000UL}, | |
82 | { 0x08, 12284069UL}, | |
83 | { 0x09, 6274509UL}, | |
84 | { 0x0A, 3121951UL}, | |
85 | { 0x0B, 1560975UL}, | |
86 | { 0x0C, 781440UL}, | |
87 | { 0x0D, 390720UL}, | |
88 | { 0x0E, 195300UL}, | |
89 | { 0x0F, 97650UL}, | |
90 | { 0x10, 48854UL}, | |
91 | { 0x11, 24427UL}, | |
92 | { 0x12, 12213UL}, | |
93 | { 0x13, 6101UL}, | |
94 | { 0x14, 3051UL}, | |
95 | { 0x15, 1523UL}, | |
96 | { 0x16, 761UL}, | |
97 | { 0x00, 0UL}, /* scrubbing off */ | |
98 | }; | |
99 | ||
2bc65418 DT |
100 | /* |
101 | * Memory scrubber control interface. For K8, memory scrubbing is handled by | |
102 | * hardware and can involve L2 cache, dcache as well as the main memory. With | |
103 | * F10, this is extended to L3 cache scrubbing on CPU models sporting that | |
104 | * functionality. | |
105 | * | |
106 | * This causes the "units" for the scrubbing speed to vary from 64 byte blocks | |
107 | * (dram) over to cache lines. This is nasty, so we will use bandwidth in | |
108 | * bytes/sec for the setting. | |
109 | * | |
110 | * Currently, we only do dram scrubbing. If the scrubbing is done in software on | |
111 | * other archs, we might not have access to the caches directly. | |
112 | */ | |
113 | ||
114 | /* | |
115 | * scan the scrub rate mapping table for a close or matching bandwidth value to | |
116 | * issue. If requested is too big, then use last maximum value found. | |
117 | */ | |
118 | static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | |
119 | u32 min_scrubrate) | |
120 | { | |
121 | u32 scrubval; | |
122 | int i; | |
123 | ||
124 | /* | |
125 | * map the configured rate (new_bw) to a value specific to the AMD64 | |
126 | * memory controller and apply to register. Search for the first | |
127 | * bandwidth entry that is greater or equal than the setting requested | |
128 | * and program that. If at last entry, turn off DRAM scrubbing. | |
129 | */ | |
130 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { | |
131 | /* | |
132 | * skip scrub rates which aren't recommended | |
133 | * (see F10 BKDG, F3x58) | |
134 | */ | |
135 | if (scrubrates[i].scrubval < min_scrubrate) | |
136 | continue; | |
137 | ||
138 | if (scrubrates[i].bandwidth <= new_bw) | |
139 | break; | |
140 | ||
141 | /* | |
142 | * if no suitable bandwidth found, turn off DRAM scrubbing | |
143 | * entirely by falling back to the last element in the | |
144 | * scrubrates array. | |
145 | */ | |
146 | } | |
147 | ||
148 | scrubval = scrubrates[i].scrubval; | |
149 | if (scrubval) | |
150 | edac_printk(KERN_DEBUG, EDAC_MC, | |
151 | "Setting scrub rate bandwidth: %u\n", | |
152 | scrubrates[i].bandwidth); | |
153 | else | |
154 | edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); | |
155 | ||
156 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
161 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth) | |
162 | { | |
163 | struct amd64_pvt *pvt = mci->pvt_info; | |
164 | u32 min_scrubrate = 0x0; | |
165 | ||
166 | switch (boot_cpu_data.x86) { | |
167 | case 0xf: | |
168 | min_scrubrate = K8_MIN_SCRUB_RATE_BITS; | |
169 | break; | |
170 | case 0x10: | |
171 | min_scrubrate = F10_MIN_SCRUB_RATE_BITS; | |
172 | break; | |
173 | case 0x11: | |
174 | min_scrubrate = F11_MIN_SCRUB_RATE_BITS; | |
175 | break; | |
176 | ||
177 | default: | |
178 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | |
179 | break; | |
180 | } | |
181 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth, | |
182 | min_scrubrate); | |
183 | } | |
184 | ||
185 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |
186 | { | |
187 | struct amd64_pvt *pvt = mci->pvt_info; | |
188 | u32 scrubval = 0; | |
6ba5dcdc | 189 | int status = -1, i; |
2bc65418 | 190 | |
6ba5dcdc | 191 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); |
2bc65418 DT |
192 | |
193 | scrubval = scrubval & 0x001F; | |
194 | ||
195 | edac_printk(KERN_DEBUG, EDAC_MC, | |
196 | "pci-read, sdram scrub control value: %d \n", scrubval); | |
197 | ||
198 | for (i = 0; ARRAY_SIZE(scrubrates); i++) { | |
199 | if (scrubrates[i].scrubval == scrubval) { | |
200 | *bw = scrubrates[i].bandwidth; | |
201 | status = 0; | |
202 | break; | |
203 | } | |
204 | } | |
205 | ||
206 | return status; | |
207 | } | |
208 | ||
6775763a DT |
209 | /* Map from a CSROW entry to the mask entry that operates on it */ |
210 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | |
211 | { | |
1433eb99 | 212 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) |
9d858bb1 BP |
213 | return csrow; |
214 | else | |
215 | return csrow >> 1; | |
6775763a DT |
216 | } |
217 | ||
218 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | |
219 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) | |
220 | { | |
221 | if (dct == 0) | |
222 | return pvt->dcsb0[csrow]; | |
223 | else | |
224 | return pvt->dcsb1[csrow]; | |
225 | } | |
226 | ||
227 | /* | |
228 | * Return the 'mask' address the i'th CS entry. This function is needed because | |
229 | * there number of DCSM registers on Rev E and prior vs Rev F and later is | |
230 | * different. | |
231 | */ | |
232 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) | |
233 | { | |
234 | if (dct == 0) | |
235 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; | |
236 | else | |
237 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; | |
238 | } | |
239 | ||
240 | ||
241 | /* | |
242 | * In *base and *limit, pass back the full 40-bit base and limit physical | |
243 | * addresses for the node given by node_id. This information is obtained from | |
244 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The | |
245 | * base and limit addresses are of type SysAddr, as defined at the start of | |
246 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses | |
247 | * in the address range they represent. | |
248 | */ | |
249 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, | |
250 | u64 *base, u64 *limit) | |
251 | { | |
252 | *base = pvt->dram_base[node_id]; | |
253 | *limit = pvt->dram_limit[node_id]; | |
254 | } | |
255 | ||
256 | /* | |
257 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated | |
258 | * with node_id | |
259 | */ | |
260 | static int amd64_base_limit_match(struct amd64_pvt *pvt, | |
261 | u64 sys_addr, int node_id) | |
262 | { | |
263 | u64 base, limit, addr; | |
264 | ||
265 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); | |
266 | ||
267 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be | |
268 | * all ones if the most significant implemented address bit is 1. | |
269 | * Here we discard bits 63-40. See section 3.4.2 of AMD publication | |
270 | * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 | |
271 | * Application Programming. | |
272 | */ | |
273 | addr = sys_addr & 0x000000ffffffffffull; | |
274 | ||
275 | return (addr >= base) && (addr <= limit); | |
276 | } | |
277 | ||
278 | /* | |
279 | * Attempt to map a SysAddr to a node. On success, return a pointer to the | |
280 | * mem_ctl_info structure for the node that the SysAddr maps to. | |
281 | * | |
282 | * On failure, return NULL. | |
283 | */ | |
284 | static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |
285 | u64 sys_addr) | |
286 | { | |
287 | struct amd64_pvt *pvt; | |
288 | int node_id; | |
289 | u32 intlv_en, bits; | |
290 | ||
291 | /* | |
292 | * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section | |
293 | * 3.4.4.2) registers to map the SysAddr to a node ID. | |
294 | */ | |
295 | pvt = mci->pvt_info; | |
296 | ||
297 | /* | |
298 | * The value of this field should be the same for all DRAM Base | |
299 | * registers. Therefore we arbitrarily choose to read it from the | |
300 | * register for node 0. | |
301 | */ | |
302 | intlv_en = pvt->dram_IntlvEn[0]; | |
303 | ||
304 | if (intlv_en == 0) { | |
8edc5445 | 305 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { |
6775763a | 306 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
8edc5445 | 307 | goto found; |
6775763a | 308 | } |
8edc5445 | 309 | goto err_no_match; |
6775763a DT |
310 | } |
311 | ||
72f158fe BP |
312 | if (unlikely((intlv_en != 0x01) && |
313 | (intlv_en != 0x03) && | |
314 | (intlv_en != 0x07))) { | |
6775763a DT |
315 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " |
316 | "IntlvEn field of DRAM Base Register for node 0: " | |
72f158fe | 317 | "this probably indicates a BIOS bug.\n", intlv_en); |
6775763a DT |
318 | return NULL; |
319 | } | |
320 | ||
321 | bits = (((u32) sys_addr) >> 12) & intlv_en; | |
322 | ||
323 | for (node_id = 0; ; ) { | |
8edc5445 | 324 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) |
6775763a DT |
325 | break; /* intlv_sel field matches */ |
326 | ||
327 | if (++node_id >= DRAM_REG_COUNT) | |
328 | goto err_no_match; | |
329 | } | |
330 | ||
331 | /* sanity test for sys_addr */ | |
332 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | |
333 | amd64_printk(KERN_WARNING, | |
8edc5445 BP |
334 | "%s(): sys_addr 0x%llx falls outside base/limit " |
335 | "address range for node %d with node interleaving " | |
336 | "enabled.\n", | |
337 | __func__, sys_addr, node_id); | |
6775763a DT |
338 | return NULL; |
339 | } | |
340 | ||
341 | found: | |
342 | return edac_mc_find(node_id); | |
343 | ||
344 | err_no_match: | |
345 | debugf2("sys_addr 0x%lx doesn't match any node\n", | |
346 | (unsigned long)sys_addr); | |
347 | ||
348 | return NULL; | |
349 | } | |
e2ce7255 DT |
350 | |
351 | /* | |
352 | * Extract the DRAM CS base address from selected csrow register. | |
353 | */ | |
354 | static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) | |
355 | { | |
356 | return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << | |
357 | pvt->dcs_shift; | |
358 | } | |
359 | ||
360 | /* | |
361 | * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. | |
362 | */ | |
363 | static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) | |
364 | { | |
365 | u64 dcsm_bits, other_bits; | |
366 | u64 mask; | |
367 | ||
368 | /* Extract bits from DRAM CS Mask. */ | |
369 | dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; | |
370 | ||
371 | other_bits = pvt->dcsm_mask; | |
372 | other_bits = ~(other_bits << pvt->dcs_shift); | |
373 | ||
374 | /* | |
375 | * The extracted bits from DCSM belong in the spaces represented by | |
376 | * the cleared bits in other_bits. | |
377 | */ | |
378 | mask = (dcsm_bits << pvt->dcs_shift) | other_bits; | |
379 | ||
380 | return mask; | |
381 | } | |
382 | ||
383 | /* | |
384 | * @input_addr is an InputAddr associated with the node given by mci. Return the | |
385 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). | |
386 | */ | |
387 | static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |
388 | { | |
389 | struct amd64_pvt *pvt; | |
390 | int csrow; | |
391 | u64 base, mask; | |
392 | ||
393 | pvt = mci->pvt_info; | |
394 | ||
395 | /* | |
396 | * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS | |
397 | * base/mask register pair, test the condition shown near the start of | |
398 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | |
399 | */ | |
9d858bb1 | 400 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
e2ce7255 DT |
401 | |
402 | /* This DRAM chip select is disabled on this node */ | |
403 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | |
404 | continue; | |
405 | ||
406 | base = base_from_dct_base(pvt, csrow); | |
407 | mask = ~mask_from_dct_mask(pvt, csrow); | |
408 | ||
409 | if ((input_addr & mask) == (base & mask)) { | |
410 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", | |
411 | (unsigned long)input_addr, csrow, | |
412 | pvt->mc_node_id); | |
413 | ||
414 | return csrow; | |
415 | } | |
416 | } | |
417 | ||
418 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", | |
419 | (unsigned long)input_addr, pvt->mc_node_id); | |
420 | ||
421 | return -1; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Return the base value defined by the DRAM Base register for the node | |
426 | * represented by mci. This function returns the full 40-bit value despite the | |
427 | * fact that the register only stores bits 39-24 of the value. See section | |
428 | * 3.4.4.1 (BKDG #26094, K8, revA-E) | |
429 | */ | |
430 | static inline u64 get_dram_base(struct mem_ctl_info *mci) | |
431 | { | |
432 | struct amd64_pvt *pvt = mci->pvt_info; | |
433 | ||
434 | return pvt->dram_base[pvt->mc_node_id]; | |
435 | } | |
436 | ||
437 | /* | |
438 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) | |
439 | * for the node represented by mci. Info is passed back in *hole_base, | |
440 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if | |
441 | * info is invalid. Info may be invalid for either of the following reasons: | |
442 | * | |
443 | * - The revision of the node is not E or greater. In this case, the DRAM Hole | |
444 | * Address Register does not exist. | |
445 | * | |
446 | * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, | |
447 | * indicating that its contents are not valid. | |
448 | * | |
449 | * The values passed back in *hole_base, *hole_offset, and *hole_size are | |
450 | * complete 32-bit values despite the fact that the bitfields in the DHAR | |
451 | * only represent bits 31-24 of the base and offset values. | |
452 | */ | |
453 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |
454 | u64 *hole_offset, u64 *hole_size) | |
455 | { | |
456 | struct amd64_pvt *pvt = mci->pvt_info; | |
457 | u64 base; | |
458 | ||
459 | /* only revE and later have the DRAM Hole Address Register */ | |
1433eb99 | 460 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { |
e2ce7255 DT |
461 | debugf1(" revision %d for node %d does not support DHAR\n", |
462 | pvt->ext_model, pvt->mc_node_id); | |
463 | return 1; | |
464 | } | |
465 | ||
466 | /* only valid for Fam10h */ | |
467 | if (boot_cpu_data.x86 == 0x10 && | |
468 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { | |
469 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); | |
470 | return 1; | |
471 | } | |
472 | ||
473 | if ((pvt->dhar & DHAR_VALID) == 0) { | |
474 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", | |
475 | pvt->mc_node_id); | |
476 | return 1; | |
477 | } | |
478 | ||
479 | /* This node has Memory Hoisting */ | |
480 | ||
481 | /* +------------------+--------------------+--------------------+----- | |
482 | * | memory | DRAM hole | relocated | | |
483 | * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | | |
484 | * | | | DRAM hole | | |
485 | * | | | [0x100000000, | | |
486 | * | | | (0x100000000+ | | |
487 | * | | | (0xffffffff-x))] | | |
488 | * +------------------+--------------------+--------------------+----- | |
489 | * | |
490 | * Above is a diagram of physical memory showing the DRAM hole and the | |
491 | * relocated addresses from the DRAM hole. As shown, the DRAM hole | |
492 | * starts at address x (the base address) and extends through address | |
493 | * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the | |
494 | * addresses in the hole so that they start at 0x100000000. | |
495 | */ | |
496 | ||
497 | base = dhar_base(pvt->dhar); | |
498 | ||
499 | *hole_base = base; | |
500 | *hole_size = (0x1ull << 32) - base; | |
501 | ||
502 | if (boot_cpu_data.x86 > 0xf) | |
503 | *hole_offset = f10_dhar_offset(pvt->dhar); | |
504 | else | |
505 | *hole_offset = k8_dhar_offset(pvt->dhar); | |
506 | ||
507 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | |
508 | pvt->mc_node_id, (unsigned long)*hole_base, | |
509 | (unsigned long)*hole_offset, (unsigned long)*hole_size); | |
510 | ||
511 | return 0; | |
512 | } | |
513 | EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); | |
514 | ||
93c2df58 DT |
515 | /* |
516 | * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is | |
517 | * assumed that sys_addr maps to the node given by mci. | |
518 | * | |
519 | * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section | |
520 | * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a | |
521 | * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, | |
522 | * then it is also involved in translating a SysAddr to a DramAddr. Sections | |
523 | * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. | |
524 | * These parts of the documentation are unclear. I interpret them as follows: | |
525 | * | |
526 | * When node n receives a SysAddr, it processes the SysAddr as follows: | |
527 | * | |
528 | * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM | |
529 | * Limit registers for node n. If the SysAddr is not within the range | |
530 | * specified by the base and limit values, then node n ignores the Sysaddr | |
531 | * (since it does not map to node n). Otherwise continue to step 2 below. | |
532 | * | |
533 | * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is | |
534 | * disabled so skip to step 3 below. Otherwise see if the SysAddr is within | |
535 | * the range of relocated addresses (starting at 0x100000000) from the DRAM | |
536 | * hole. If not, skip to step 3 below. Else get the value of the | |
537 | * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the | |
538 | * offset defined by this value from the SysAddr. | |
539 | * | |
540 | * 3. Obtain the base address for node n from the DRAMBase field of the DRAM | |
541 | * Base register for node n. To obtain the DramAddr, subtract the base | |
542 | * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). | |
543 | */ | |
544 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |
545 | { | |
546 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; | |
547 | int ret = 0; | |
548 | ||
549 | dram_base = get_dram_base(mci); | |
550 | ||
551 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | |
552 | &hole_size); | |
553 | if (!ret) { | |
554 | if ((sys_addr >= (1ull << 32)) && | |
555 | (sys_addr < ((1ull << 32) + hole_size))) { | |
556 | /* use DHAR to translate SysAddr to DramAddr */ | |
557 | dram_addr = sys_addr - hole_offset; | |
558 | ||
559 | debugf2("using DHAR to translate SysAddr 0x%lx to " | |
560 | "DramAddr 0x%lx\n", | |
561 | (unsigned long)sys_addr, | |
562 | (unsigned long)dram_addr); | |
563 | ||
564 | return dram_addr; | |
565 | } | |
566 | } | |
567 | ||
568 | /* | |
569 | * Translate the SysAddr to a DramAddr as shown near the start of | |
570 | * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 | |
571 | * only deals with 40-bit values. Therefore we discard bits 63-40 of | |
572 | * sys_addr below. If bit 39 of sys_addr is 1 then the bits we | |
573 | * discard are all 1s. Otherwise the bits we discard are all 0s. See | |
574 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture | |
575 | * Programmer's Manual Volume 1 Application Programming. | |
576 | */ | |
577 | dram_addr = (sys_addr & 0xffffffffffull) - dram_base; | |
578 | ||
579 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " | |
580 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, | |
581 | (unsigned long)dram_addr); | |
582 | return dram_addr; | |
583 | } | |
584 | ||
585 | /* | |
586 | * @intlv_en is the value of the IntlvEn field from a DRAM Base register | |
587 | * (section 3.4.4.1). Return the number of bits from a SysAddr that are used | |
588 | * for node interleaving. | |
589 | */ | |
590 | static int num_node_interleave_bits(unsigned intlv_en) | |
591 | { | |
592 | static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; | |
593 | int n; | |
594 | ||
595 | BUG_ON(intlv_en > 7); | |
596 | n = intlv_shift_table[intlv_en]; | |
597 | return n; | |
598 | } | |
599 | ||
600 | /* Translate the DramAddr given by @dram_addr to an InputAddr. */ | |
601 | static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) | |
602 | { | |
603 | struct amd64_pvt *pvt; | |
604 | int intlv_shift; | |
605 | u64 input_addr; | |
606 | ||
607 | pvt = mci->pvt_info; | |
608 | ||
609 | /* | |
610 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | |
611 | * concerning translating a DramAddr to an InputAddr. | |
612 | */ | |
613 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | |
614 | input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + | |
615 | (dram_addr & 0xfff); | |
616 | ||
617 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", | |
618 | intlv_shift, (unsigned long)dram_addr, | |
619 | (unsigned long)input_addr); | |
620 | ||
621 | return input_addr; | |
622 | } | |
623 | ||
624 | /* | |
625 | * Translate the SysAddr represented by @sys_addr to an InputAddr. It is | |
626 | * assumed that @sys_addr maps to the node given by mci. | |
627 | */ | |
628 | static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) | |
629 | { | |
630 | u64 input_addr; | |
631 | ||
632 | input_addr = | |
633 | dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); | |
634 | ||
635 | debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", | |
636 | (unsigned long)sys_addr, (unsigned long)input_addr); | |
637 | ||
638 | return input_addr; | |
639 | } | |
640 | ||
641 | ||
642 | /* | |
643 | * @input_addr is an InputAddr associated with the node represented by mci. | |
644 | * Translate @input_addr to a DramAddr and return the result. | |
645 | */ | |
646 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |
647 | { | |
648 | struct amd64_pvt *pvt; | |
649 | int node_id, intlv_shift; | |
650 | u64 bits, dram_addr; | |
651 | u32 intlv_sel; | |
652 | ||
653 | /* | |
654 | * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | |
655 | * shows how to translate a DramAddr to an InputAddr. Here we reverse | |
656 | * this procedure. When translating from a DramAddr to an InputAddr, the | |
657 | * bits used for node interleaving are discarded. Here we recover these | |
658 | * bits from the IntlvSel field of the DRAM Limit register (section | |
659 | * 3.4.4.2) for the node that input_addr is associated with. | |
660 | */ | |
661 | pvt = mci->pvt_info; | |
662 | node_id = pvt->mc_node_id; | |
663 | BUG_ON((node_id < 0) || (node_id > 7)); | |
664 | ||
665 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | |
666 | ||
667 | if (intlv_shift == 0) { | |
668 | debugf1(" InputAddr 0x%lx translates to DramAddr of " | |
669 | "same value\n", (unsigned long)input_addr); | |
670 | ||
671 | return input_addr; | |
672 | } | |
673 | ||
674 | bits = ((input_addr & 0xffffff000ull) << intlv_shift) + | |
675 | (input_addr & 0xfff); | |
676 | ||
677 | intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); | |
678 | dram_addr = bits + (intlv_sel << 12); | |
679 | ||
680 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " | |
681 | "(%d node interleave bits)\n", (unsigned long)input_addr, | |
682 | (unsigned long)dram_addr, intlv_shift); | |
683 | ||
684 | return dram_addr; | |
685 | } | |
686 | ||
687 | /* | |
688 | * @dram_addr is a DramAddr that maps to the node represented by mci. Convert | |
689 | * @dram_addr to a SysAddr. | |
690 | */ | |
691 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |
692 | { | |
693 | struct amd64_pvt *pvt = mci->pvt_info; | |
694 | u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; | |
695 | int ret = 0; | |
696 | ||
697 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | |
698 | &hole_size); | |
699 | if (!ret) { | |
700 | if ((dram_addr >= hole_base) && | |
701 | (dram_addr < (hole_base + hole_size))) { | |
702 | sys_addr = dram_addr + hole_offset; | |
703 | ||
704 | debugf1("using DHAR to translate DramAddr 0x%lx to " | |
705 | "SysAddr 0x%lx\n", (unsigned long)dram_addr, | |
706 | (unsigned long)sys_addr); | |
707 | ||
708 | return sys_addr; | |
709 | } | |
710 | } | |
711 | ||
712 | amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); | |
713 | sys_addr = dram_addr + base; | |
714 | ||
715 | /* | |
716 | * The sys_addr we have computed up to this point is a 40-bit value | |
717 | * because the k8 deals with 40-bit values. However, the value we are | |
718 | * supposed to return is a full 64-bit physical address. The AMD | |
719 | * x86-64 architecture specifies that the most significant implemented | |
720 | * address bit through bit 63 of a physical address must be either all | |
721 | * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a | |
722 | * 64-bit value below. See section 3.4.2 of AMD publication 24592: | |
723 | * AMD x86-64 Architecture Programmer's Manual Volume 1 Application | |
724 | * Programming. | |
725 | */ | |
726 | sys_addr |= ~((sys_addr & (1ull << 39)) - 1); | |
727 | ||
728 | debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", | |
729 | pvt->mc_node_id, (unsigned long)dram_addr, | |
730 | (unsigned long)sys_addr); | |
731 | ||
732 | return sys_addr; | |
733 | } | |
734 | ||
735 | /* | |
736 | * @input_addr is an InputAddr associated with the node given by mci. Translate | |
737 | * @input_addr to a SysAddr. | |
738 | */ | |
739 | static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, | |
740 | u64 input_addr) | |
741 | { | |
742 | return dram_addr_to_sys_addr(mci, | |
743 | input_addr_to_dram_addr(mci, input_addr)); | |
744 | } | |
745 | ||
746 | /* | |
747 | * Find the minimum and maximum InputAddr values that map to the given @csrow. | |
748 | * Pass back these values in *input_addr_min and *input_addr_max. | |
749 | */ | |
750 | static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |
751 | u64 *input_addr_min, u64 *input_addr_max) | |
752 | { | |
753 | struct amd64_pvt *pvt; | |
754 | u64 base, mask; | |
755 | ||
756 | pvt = mci->pvt_info; | |
9d858bb1 | 757 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); |
93c2df58 DT |
758 | |
759 | base = base_from_dct_base(pvt, csrow); | |
760 | mask = mask_from_dct_mask(pvt, csrow); | |
761 | ||
762 | *input_addr_min = base & ~mask; | |
763 | *input_addr_max = base | mask | pvt->dcs_mask_notused; | |
764 | } | |
765 | ||
93c2df58 DT |
766 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
767 | static inline void error_address_to_page_and_offset(u64 error_address, | |
768 | u32 *page, u32 *offset) | |
769 | { | |
770 | *page = (u32) (error_address >> PAGE_SHIFT); | |
771 | *offset = ((u32) error_address) & ~PAGE_MASK; | |
772 | } | |
773 | ||
774 | /* | |
775 | * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address | |
776 | * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers | |
777 | * of a node that detected an ECC memory error. mci represents the node that | |
778 | * the error address maps to (possibly different from the node that detected | |
779 | * the error). Return the number of the csrow that sys_addr maps to, or -1 on | |
780 | * error. | |
781 | */ | |
782 | static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |
783 | { | |
784 | int csrow; | |
785 | ||
786 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); | |
787 | ||
788 | if (csrow == -1) | |
789 | amd64_mc_printk(mci, KERN_ERR, | |
790 | "Failed to translate InputAddr to csrow for " | |
791 | "address 0x%lx\n", (unsigned long)sys_addr); | |
792 | return csrow; | |
793 | } | |
e2ce7255 | 794 | |
bfc04aec | 795 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
2da11654 DT |
796 | |
797 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) | |
798 | { | |
799 | if (boot_cpu_data.x86 == 0x11) | |
800 | edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n"); | |
801 | else if (boot_cpu_data.x86 == 0x10) | |
802 | edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); | |
803 | else if (boot_cpu_data.x86 == 0xf) | |
804 | edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", | |
1433eb99 | 805 | (pvt->ext_model >= K8_REV_F) ? |
2da11654 DT |
806 | "Rev F or later" : "Rev E or earlier"); |
807 | else | |
808 | /* we'll hardly ever ever get here */ | |
809 | edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n"); | |
810 | } | |
811 | ||
812 | /* | |
813 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs | |
814 | * are ECC capable. | |
815 | */ | |
816 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |
817 | { | |
818 | int bit; | |
584fcff4 | 819 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
2da11654 | 820 | |
1433eb99 | 821 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
2da11654 DT |
822 | ? 19 |
823 | : 17; | |
824 | ||
584fcff4 | 825 | if (pvt->dclr0 & BIT(bit)) |
2da11654 DT |
826 | edac_cap = EDAC_FLAG_SECDED; |
827 | ||
828 | return edac_cap; | |
829 | } | |
830 | ||
831 | ||
8566c4df | 832 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); |
2da11654 | 833 | |
68798e17 BP |
834 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) |
835 | { | |
836 | debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); | |
837 | ||
838 | debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", | |
839 | (dclr & BIT(16)) ? "un" : "", | |
840 | (dclr & BIT(19)) ? "yes" : "no"); | |
841 | ||
842 | debugf1(" PAR/ERR parity: %s\n", | |
843 | (dclr & BIT(8)) ? "enabled" : "disabled"); | |
844 | ||
845 | debugf1(" DCT 128bit mode width: %s\n", | |
846 | (dclr & BIT(11)) ? "128b" : "64b"); | |
847 | ||
848 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | |
849 | (dclr & BIT(12)) ? "yes" : "no", | |
850 | (dclr & BIT(13)) ? "yes" : "no", | |
851 | (dclr & BIT(14)) ? "yes" : "no", | |
852 | (dclr & BIT(15)) ? "yes" : "no"); | |
853 | } | |
854 | ||
2da11654 DT |
855 | /* Display and decode various NB registers for debug purposes. */ |
856 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |
857 | { | |
858 | int ganged; | |
859 | ||
68798e17 BP |
860 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
861 | ||
862 | debugf1(" NB two channel DRAM capable: %s\n", | |
863 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); | |
2da11654 | 864 | |
68798e17 BP |
865 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
866 | (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", | |
867 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); | |
868 | ||
869 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | |
2da11654 | 870 | |
8de1d91e | 871 | debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); |
2da11654 | 872 | |
8de1d91e BP |
873 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
874 | "offset: 0x%08x\n", | |
875 | pvt->dhar, | |
876 | dhar_base(pvt->dhar), | |
877 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) | |
878 | : f10_dhar_offset(pvt->dhar)); | |
2da11654 | 879 | |
8de1d91e BP |
880 | debugf1(" DramHoleValid: %s\n", |
881 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); | |
2da11654 | 882 | |
8de1d91e | 883 | /* everything below this point is Fam10h and above */ |
8566c4df BP |
884 | if (boot_cpu_data.x86 == 0xf) { |
885 | amd64_debug_display_dimm_sizes(0, pvt); | |
2da11654 | 886 | return; |
8566c4df | 887 | } |
2da11654 | 888 | |
8de1d91e | 889 | /* Only if NOT ganged does dclr1 have valid info */ |
68798e17 BP |
890 | if (!dct_ganging_enabled(pvt)) |
891 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | |
2da11654 DT |
892 | |
893 | /* | |
894 | * Determine if ganged and then dump memory sizes for first controller, | |
895 | * and if NOT ganged dump info for 2nd controller. | |
896 | */ | |
897 | ganged = dct_ganging_enabled(pvt); | |
898 | ||
8566c4df | 899 | amd64_debug_display_dimm_sizes(0, pvt); |
2da11654 DT |
900 | |
901 | if (!ganged) | |
8566c4df | 902 | amd64_debug_display_dimm_sizes(1, pvt); |
2da11654 DT |
903 | } |
904 | ||
905 | /* Read in both of DBAM registers */ | |
906 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | |
907 | { | |
6ba5dcdc | 908 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); |
2da11654 | 909 | |
6ba5dcdc BP |
910 | if (boot_cpu_data.x86 >= 0x10) |
911 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); | |
2da11654 DT |
912 | } |
913 | ||
94be4bff DT |
914 | /* |
915 | * NOTE: CPU Revision Dependent code: Rev E and Rev F | |
916 | * | |
917 | * Set the DCSB and DCSM mask values depending on the CPU revision value. Also | |
918 | * set the shift factor for the DCSB and DCSM values. | |
919 | * | |
920 | * ->dcs_mask_notused, RevE: | |
921 | * | |
922 | * To find the max InputAddr for the csrow, start with the base address and set | |
923 | * all bits that are "don't care" bits in the test at the start of section | |
924 | * 3.5.4 (p. 84). | |
925 | * | |
926 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | |
927 | * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS | |
928 | * represents bits [24:20] and [12:0], which are all bits in the above-mentioned | |
929 | * gaps. | |
930 | * | |
931 | * ->dcs_mask_notused, RevF and later: | |
932 | * | |
933 | * To find the max InputAddr for the csrow, start with the base address and set | |
934 | * all bits that are "don't care" bits in the test at the start of NPT section | |
935 | * 4.5.4 (p. 87). | |
936 | * | |
937 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | |
938 | * between bit ranges [36:27] and [21:13]. | |
939 | * | |
940 | * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], | |
941 | * which are all bits in the above-mentioned gaps. | |
942 | */ | |
943 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | |
944 | { | |
9d858bb1 | 945 | |
1433eb99 | 946 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
9d858bb1 BP |
947 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; |
948 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | |
949 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | |
950 | pvt->dcs_shift = REV_E_DCS_SHIFT; | |
951 | pvt->cs_count = 8; | |
952 | pvt->num_dcsm = 8; | |
953 | } else { | |
94be4bff DT |
954 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; |
955 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | |
956 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | |
957 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | |
958 | ||
9d858bb1 BP |
959 | if (boot_cpu_data.x86 == 0x11) { |
960 | pvt->cs_count = 4; | |
961 | pvt->num_dcsm = 2; | |
962 | } else { | |
963 | pvt->cs_count = 8; | |
964 | pvt->num_dcsm = 4; | |
94be4bff | 965 | } |
94be4bff DT |
966 | } |
967 | } | |
968 | ||
969 | /* | |
970 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers | |
971 | */ | |
972 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |
973 | { | |
6ba5dcdc | 974 | int cs, reg; |
94be4bff DT |
975 | |
976 | amd64_set_dct_base_and_mask(pvt); | |
977 | ||
9d858bb1 | 978 | for (cs = 0; cs < pvt->cs_count; cs++) { |
94be4bff | 979 | reg = K8_DCSB0 + (cs * 4); |
6ba5dcdc | 980 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) |
94be4bff DT |
981 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
982 | cs, pvt->dcsb0[cs], reg); | |
983 | ||
984 | /* If DCT are NOT ganged, then read in DCT1's base */ | |
985 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | |
986 | reg = F10_DCSB1 + (cs * 4); | |
6ba5dcdc BP |
987 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, |
988 | &pvt->dcsb1[cs])) | |
94be4bff DT |
989 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
990 | cs, pvt->dcsb1[cs], reg); | |
991 | } else { | |
992 | pvt->dcsb1[cs] = 0; | |
993 | } | |
994 | } | |
995 | ||
996 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | |
4afcd2dc | 997 | reg = K8_DCSM0 + (cs * 4); |
6ba5dcdc | 998 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) |
94be4bff DT |
999 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
1000 | cs, pvt->dcsm0[cs], reg); | |
1001 | ||
1002 | /* If DCT are NOT ganged, then read in DCT1's mask */ | |
1003 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | |
1004 | reg = F10_DCSM1 + (cs * 4); | |
6ba5dcdc BP |
1005 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, |
1006 | &pvt->dcsm1[cs])) | |
94be4bff DT |
1007 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
1008 | cs, pvt->dcsm1[cs], reg); | |
6ba5dcdc | 1009 | } else { |
94be4bff | 1010 | pvt->dcsm1[cs] = 0; |
6ba5dcdc | 1011 | } |
94be4bff DT |
1012 | } |
1013 | } | |
1014 | ||
1015 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | |
1016 | { | |
1017 | enum mem_type type; | |
1018 | ||
1433eb99 | 1019 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { |
6b4c0bde BP |
1020 | if (pvt->dchr0 & DDR3_MODE) |
1021 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | |
1022 | else | |
1023 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; | |
94be4bff | 1024 | } else { |
94be4bff DT |
1025 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; |
1026 | } | |
1027 | ||
239642fe | 1028 | debugf1(" Memory type is: %s\n", edac_mem_types[type]); |
94be4bff DT |
1029 | |
1030 | return type; | |
1031 | } | |
1032 | ||
ddff876d DT |
1033 | /* |
1034 | * Read the DRAM Configuration Low register. It differs between CG, D & E revs | |
1035 | * and the later RevF memory controllers (DDR vs DDR2) | |
1036 | * | |
1037 | * Return: | |
1038 | * number of memory channels in operation | |
1039 | * Pass back: | |
1040 | * contents of the DCL0_LOW register | |
1041 | */ | |
1042 | static int k8_early_channel_count(struct amd64_pvt *pvt) | |
1043 | { | |
1044 | int flag, err = 0; | |
1045 | ||
6ba5dcdc | 1046 | err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); |
ddff876d DT |
1047 | if (err) |
1048 | return err; | |
1049 | ||
1433eb99 | 1050 | if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) { |
ddff876d DT |
1051 | /* RevF (NPT) and later */ |
1052 | flag = pvt->dclr0 & F10_WIDTH_128; | |
1053 | } else { | |
1054 | /* RevE and earlier */ | |
1055 | flag = pvt->dclr0 & REVE_WIDTH_128; | |
1056 | } | |
1057 | ||
1058 | /* not used */ | |
1059 | pvt->dclr1 = 0; | |
1060 | ||
1061 | return (flag) ? 2 : 1; | |
1062 | } | |
1063 | ||
1064 | /* extract the ERROR ADDRESS for the K8 CPUs */ | |
1065 | static u64 k8_get_error_address(struct mem_ctl_info *mci, | |
ef44cc4c | 1066 | struct err_regs *info) |
ddff876d DT |
1067 | { |
1068 | return (((u64) (info->nbeah & 0xff)) << 32) + | |
1069 | (info->nbeal & ~0x03); | |
1070 | } | |
1071 | ||
1072 | /* | |
1073 | * Read the Base and Limit registers for K8 based Memory controllers; extract | |
1074 | * fields from the 'raw' reg into separate data fields | |
1075 | * | |
1076 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN | |
1077 | */ | |
1078 | static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |
1079 | { | |
1080 | u32 low; | |
1081 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ | |
ddff876d | 1082 | |
6ba5dcdc | 1083 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); |
ddff876d DT |
1084 | |
1085 | /* Extract parts into separate data entries */ | |
4997811e | 1086 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; |
ddff876d DT |
1087 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; |
1088 | pvt->dram_rw_en[dram] = (low & 0x3); | |
1089 | ||
6ba5dcdc | 1090 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); |
ddff876d DT |
1091 | |
1092 | /* | |
1093 | * Extract parts into separate data entries. Limit is the HIGHEST memory | |
1094 | * location of the region, so lower 24 bits need to be all ones | |
1095 | */ | |
4997811e | 1096 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; |
ddff876d DT |
1097 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; |
1098 | pvt->dram_DstNode[dram] = (low & 0x7); | |
1099 | } | |
1100 | ||
1101 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |
ef44cc4c | 1102 | struct err_regs *info, |
44e9e2ee | 1103 | u64 sys_addr) |
ddff876d DT |
1104 | { |
1105 | struct mem_ctl_info *src_mci; | |
1106 | unsigned short syndrome; | |
1107 | int channel, csrow; | |
1108 | u32 page, offset; | |
1109 | ||
1110 | /* Extract the syndrome parts and form a 16-bit syndrome */ | |
b70ef010 BP |
1111 | syndrome = HIGH_SYNDROME(info->nbsl) << 8; |
1112 | syndrome |= LOW_SYNDROME(info->nbsh); | |
ddff876d DT |
1113 | |
1114 | /* CHIPKILL enabled */ | |
1115 | if (info->nbcfg & K8_NBCFG_CHIPKILL) { | |
bfc04aec | 1116 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
ddff876d DT |
1117 | if (channel < 0) { |
1118 | /* | |
1119 | * Syndrome didn't map, so we don't know which of the | |
1120 | * 2 DIMMs is in error. So we need to ID 'both' of them | |
1121 | * as suspect. | |
1122 | */ | |
1123 | amd64_mc_printk(mci, KERN_WARNING, | |
1124 | "unknown syndrome 0x%x - possible error " | |
1125 | "reporting race\n", syndrome); | |
1126 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | |
1127 | return; | |
1128 | } | |
1129 | } else { | |
1130 | /* | |
1131 | * non-chipkill ecc mode | |
1132 | * | |
1133 | * The k8 documentation is unclear about how to determine the | |
1134 | * channel number when using non-chipkill memory. This method | |
1135 | * was obtained from email communication with someone at AMD. | |
1136 | * (Wish the email was placed in this comment - norsk) | |
1137 | */ | |
44e9e2ee | 1138 | channel = ((sys_addr & BIT(3)) != 0); |
ddff876d DT |
1139 | } |
1140 | ||
1141 | /* | |
1142 | * Find out which node the error address belongs to. This may be | |
1143 | * different from the node that detected the error. | |
1144 | */ | |
44e9e2ee | 1145 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
2cff18c2 | 1146 | if (!src_mci) { |
ddff876d DT |
1147 | amd64_mc_printk(mci, KERN_ERR, |
1148 | "failed to map error address 0x%lx to a node\n", | |
44e9e2ee | 1149 | (unsigned long)sys_addr); |
ddff876d DT |
1150 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1151 | return; | |
1152 | } | |
1153 | ||
44e9e2ee BP |
1154 | /* Now map the sys_addr to a CSROW */ |
1155 | csrow = sys_addr_to_csrow(src_mci, sys_addr); | |
ddff876d DT |
1156 | if (csrow < 0) { |
1157 | edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); | |
1158 | } else { | |
44e9e2ee | 1159 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
ddff876d DT |
1160 | |
1161 | edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, | |
1162 | channel, EDAC_MOD_STR); | |
1163 | } | |
1164 | } | |
1165 | ||
1433eb99 | 1166 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
ddff876d | 1167 | { |
1433eb99 | 1168 | int *dbam_map; |
ddff876d | 1169 | |
1433eb99 BP |
1170 | if (pvt->ext_model >= K8_REV_F) |
1171 | dbam_map = ddr2_dbam; | |
1172 | else if (pvt->ext_model >= K8_REV_D) | |
1173 | dbam_map = ddr2_dbam_revD; | |
1174 | else | |
1175 | dbam_map = ddr2_dbam_revCG; | |
ddff876d | 1176 | |
1433eb99 | 1177 | return dbam_map[cs_mode]; |
ddff876d DT |
1178 | } |
1179 | ||
1afd3c98 DT |
1180 | /* |
1181 | * Get the number of DCT channels in use. | |
1182 | * | |
1183 | * Return: | |
1184 | * number of Memory Channels in operation | |
1185 | * Pass back: | |
1186 | * contents of the DCL0_LOW register | |
1187 | */ | |
1188 | static int f10_early_channel_count(struct amd64_pvt *pvt) | |
1189 | { | |
57a30854 | 1190 | int dbams[] = { DBAM0, DBAM1 }; |
6ba5dcdc | 1191 | int i, j, channels = 0; |
1afd3c98 DT |
1192 | u32 dbam; |
1193 | ||
1afd3c98 DT |
1194 | /* If we are in 128 bit mode, then we are using 2 channels */ |
1195 | if (pvt->dclr0 & F10_WIDTH_128) { | |
1afd3c98 DT |
1196 | channels = 2; |
1197 | return channels; | |
1198 | } | |
1199 | ||
1200 | /* | |
d16149e8 BP |
1201 | * Need to check if in unganged mode: In such, there are 2 channels, |
1202 | * but they are not in 128 bit mode and thus the above 'dclr0' status | |
1203 | * bit will be OFF. | |
1afd3c98 DT |
1204 | * |
1205 | * Need to check DCT0[0] and DCT1[0] to see if only one of them has | |
1206 | * their CSEnable bit on. If so, then SINGLE DIMM case. | |
1207 | */ | |
d16149e8 | 1208 | debugf0("Data width is not 128 bits - need more decoding\n"); |
ddff876d | 1209 | |
1afd3c98 DT |
1210 | /* |
1211 | * Check DRAM Bank Address Mapping values for each DIMM to see if there | |
1212 | * is more than just one DIMM present in unganged mode. Need to check | |
1213 | * both controllers since DIMMs can be placed in either one. | |
1214 | */ | |
57a30854 | 1215 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { |
6ba5dcdc | 1216 | if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) |
1afd3c98 DT |
1217 | goto err_reg; |
1218 | ||
57a30854 WW |
1219 | for (j = 0; j < 4; j++) { |
1220 | if (DBAM_DIMM(j, dbam) > 0) { | |
1221 | channels++; | |
1222 | break; | |
1223 | } | |
1224 | } | |
1afd3c98 DT |
1225 | } |
1226 | ||
d16149e8 BP |
1227 | if (channels > 2) |
1228 | channels = 2; | |
1229 | ||
37da0450 | 1230 | debugf0("MCT channel count: %d\n", channels); |
1afd3c98 DT |
1231 | |
1232 | return channels; | |
1233 | ||
1234 | err_reg: | |
1235 | return -1; | |
1236 | ||
1237 | } | |
1238 | ||
1433eb99 | 1239 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
1afd3c98 | 1240 | { |
1433eb99 BP |
1241 | int *dbam_map; |
1242 | ||
1243 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | |
1244 | dbam_map = ddr3_dbam; | |
1245 | else | |
1246 | dbam_map = ddr2_dbam; | |
1247 | ||
1248 | return dbam_map[cs_mode]; | |
1afd3c98 DT |
1249 | } |
1250 | ||
1251 | /* Enable extended configuration access via 0xCF8 feature */ | |
1252 | static void amd64_setup(struct amd64_pvt *pvt) | |
1253 | { | |
1254 | u32 reg; | |
1255 | ||
6ba5dcdc | 1256 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); |
1afd3c98 DT |
1257 | |
1258 | pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); | |
1259 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
1260 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | |
1261 | } | |
1262 | ||
1263 | /* Restore the extended configuration access via 0xCF8 feature */ | |
1264 | static void amd64_teardown(struct amd64_pvt *pvt) | |
1265 | { | |
1266 | u32 reg; | |
1267 | ||
6ba5dcdc | 1268 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); |
1afd3c98 DT |
1269 | |
1270 | reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
1271 | if (pvt->flags.cf8_extcfg) | |
1272 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
1273 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | |
1274 | } | |
1275 | ||
1276 | static u64 f10_get_error_address(struct mem_ctl_info *mci, | |
ef44cc4c | 1277 | struct err_regs *info) |
1afd3c98 DT |
1278 | { |
1279 | return (((u64) (info->nbeah & 0xffff)) << 32) + | |
1280 | (info->nbeal & ~0x01); | |
1281 | } | |
1282 | ||
1283 | /* | |
1284 | * Read the Base and Limit registers for F10 based Memory controllers. Extract | |
1285 | * fields from the 'raw' reg into separate data fields. | |
1286 | * | |
1287 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. | |
1288 | */ | |
1289 | static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |
1290 | { | |
1291 | u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; | |
1292 | ||
1293 | low_offset = K8_DRAM_BASE_LOW + (dram << 3); | |
1294 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); | |
1295 | ||
1296 | /* read the 'raw' DRAM BASE Address register */ | |
6ba5dcdc | 1297 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); |
1afd3c98 DT |
1298 | |
1299 | /* Read from the ECS data register */ | |
6ba5dcdc | 1300 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base); |
1afd3c98 DT |
1301 | |
1302 | /* Extract parts into separate data entries */ | |
1303 | pvt->dram_rw_en[dram] = (low_base & 0x3); | |
1304 | ||
1305 | if (pvt->dram_rw_en[dram] == 0) | |
1306 | return; | |
1307 | ||
1308 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | |
1309 | ||
66216a7a | 1310 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | |
4997811e | 1311 | (((u64)low_base & 0xFFFF0000) << 8); |
1afd3c98 DT |
1312 | |
1313 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | |
1314 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | |
1315 | ||
1316 | /* read the 'raw' LIMIT registers */ | |
6ba5dcdc | 1317 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); |
1afd3c98 DT |
1318 | |
1319 | /* Read from the ECS data register for the HIGH portion */ | |
6ba5dcdc | 1320 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit); |
1afd3c98 | 1321 | |
1afd3c98 DT |
1322 | pvt->dram_DstNode[dram] = (low_limit & 0x7); |
1323 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; | |
1324 | ||
1325 | /* | |
1326 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | |
1327 | * memory location of the region, so low 24 bits need to be all ones. | |
1328 | */ | |
66216a7a | 1329 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | |
4997811e | 1330 | (((u64) low_limit & 0xFFFF0000) << 8) | |
66216a7a | 1331 | 0x00FFFFFF; |
1afd3c98 | 1332 | } |
6163b5d4 DT |
1333 | |
1334 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | |
1335 | { | |
6163b5d4 | 1336 | |
6ba5dcdc BP |
1337 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, |
1338 | &pvt->dram_ctl_select_low)) { | |
72381bd5 BP |
1339 | debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " |
1340 | "High range addresses at: 0x%x\n", | |
1341 | pvt->dram_ctl_select_low, | |
1342 | dct_sel_baseaddr(pvt)); | |
1343 | ||
1344 | debugf0(" DCT mode: %s, All DCTs on: %s\n", | |
1345 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), | |
1346 | (dct_dram_enabled(pvt) ? "yes" : "no")); | |
1347 | ||
1348 | if (!dct_ganging_enabled(pvt)) | |
1349 | debugf0(" Address range split per DCT: %s\n", | |
1350 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | |
1351 | ||
1352 | debugf0(" DCT data interleave for ECC: %s, " | |
1353 | "DRAM cleared since last warm reset: %s\n", | |
1354 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | |
1355 | (dct_memory_cleared(pvt) ? "yes" : "no")); | |
1356 | ||
1357 | debugf0(" DCT channel interleave: %s, " | |
1358 | "DCT interleave bits selector: 0x%x\n", | |
1359 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | |
6163b5d4 DT |
1360 | dct_sel_interleave_addr(pvt)); |
1361 | } | |
1362 | ||
6ba5dcdc BP |
1363 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, |
1364 | &pvt->dram_ctl_select_high); | |
6163b5d4 DT |
1365 | } |
1366 | ||
f71d0a05 DT |
1367 | /* |
1368 | * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory | |
1369 | * Interleaving Modes. | |
1370 | */ | |
6163b5d4 DT |
1371 | static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, |
1372 | int hi_range_sel, u32 intlv_en) | |
1373 | { | |
1374 | u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; | |
1375 | ||
1376 | if (dct_ganging_enabled(pvt)) | |
1377 | cs = 0; | |
1378 | else if (hi_range_sel) | |
1379 | cs = dct_sel_high; | |
1380 | else if (dct_interleave_enabled(pvt)) { | |
f71d0a05 DT |
1381 | /* |
1382 | * see F2x110[DctSelIntLvAddr] - channel interleave mode | |
1383 | */ | |
6163b5d4 DT |
1384 | if (dct_sel_interleave_addr(pvt) == 0) |
1385 | cs = sys_addr >> 6 & 1; | |
1386 | else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { | |
1387 | temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; | |
1388 | ||
1389 | if (dct_sel_interleave_addr(pvt) & 1) | |
1390 | cs = (sys_addr >> 9 & 1) ^ temp; | |
1391 | else | |
1392 | cs = (sys_addr >> 6 & 1) ^ temp; | |
1393 | } else if (intlv_en & 4) | |
1394 | cs = sys_addr >> 15 & 1; | |
1395 | else if (intlv_en & 2) | |
1396 | cs = sys_addr >> 14 & 1; | |
1397 | else if (intlv_en & 1) | |
1398 | cs = sys_addr >> 13 & 1; | |
1399 | else | |
1400 | cs = sys_addr >> 12 & 1; | |
1401 | } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) | |
1402 | cs = ~dct_sel_high & 1; | |
1403 | else | |
1404 | cs = 0; | |
1405 | ||
1406 | return cs; | |
1407 | } | |
1408 | ||
1409 | static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) | |
1410 | { | |
1411 | if (intlv_en == 1) | |
1412 | return 1; | |
1413 | else if (intlv_en == 3) | |
1414 | return 2; | |
1415 | else if (intlv_en == 7) | |
1416 | return 3; | |
1417 | ||
1418 | return 0; | |
1419 | } | |
1420 | ||
f71d0a05 DT |
1421 | /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ |
1422 | static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, | |
6163b5d4 DT |
1423 | u32 dct_sel_base_addr, |
1424 | u64 dct_sel_base_off, | |
f71d0a05 | 1425 | u32 hole_valid, u32 hole_off, |
6163b5d4 DT |
1426 | u64 dram_base) |
1427 | { | |
1428 | u64 chan_off; | |
1429 | ||
1430 | if (hi_range_sel) { | |
1431 | if (!(dct_sel_base_addr & 0xFFFFF800) && | |
f71d0a05 | 1432 | hole_valid && (sys_addr >= 0x100000000ULL)) |
6163b5d4 DT |
1433 | chan_off = hole_off << 16; |
1434 | else | |
1435 | chan_off = dct_sel_base_off; | |
1436 | } else { | |
f71d0a05 | 1437 | if (hole_valid && (sys_addr >= 0x100000000ULL)) |
6163b5d4 DT |
1438 | chan_off = hole_off << 16; |
1439 | else | |
1440 | chan_off = dram_base & 0xFFFFF8000000ULL; | |
1441 | } | |
1442 | ||
1443 | return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - | |
1444 | (chan_off & 0x0000FFFFFF800000ULL); | |
1445 | } | |
1446 | ||
1447 | /* Hack for the time being - Can we get this from BIOS?? */ | |
1448 | #define CH0SPARE_RANK 0 | |
1449 | #define CH1SPARE_RANK 1 | |
1450 | ||
1451 | /* | |
1452 | * checks if the csrow passed in is marked as SPARED, if so returns the new | |
1453 | * spare row | |
1454 | */ | |
1455 | static inline int f10_process_possible_spare(int csrow, | |
1456 | u32 cs, struct amd64_pvt *pvt) | |
1457 | { | |
1458 | u32 swap_done; | |
1459 | u32 bad_dram_cs; | |
1460 | ||
1461 | /* Depending on channel, isolate respective SPARING info */ | |
1462 | if (cs) { | |
1463 | swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); | |
1464 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); | |
1465 | if (swap_done && (csrow == bad_dram_cs)) | |
1466 | csrow = CH1SPARE_RANK; | |
1467 | } else { | |
1468 | swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); | |
1469 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); | |
1470 | if (swap_done && (csrow == bad_dram_cs)) | |
1471 | csrow = CH0SPARE_RANK; | |
1472 | } | |
1473 | return csrow; | |
1474 | } | |
1475 | ||
1476 | /* | |
1477 | * Iterate over the DRAM DCT "base" and "mask" registers looking for a | |
1478 | * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' | |
1479 | * | |
1480 | * Return: | |
1481 | * -EINVAL: NOT FOUND | |
1482 | * 0..csrow = Chip-Select Row | |
1483 | */ | |
1484 | static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |
1485 | { | |
1486 | struct mem_ctl_info *mci; | |
1487 | struct amd64_pvt *pvt; | |
1488 | u32 cs_base, cs_mask; | |
1489 | int cs_found = -EINVAL; | |
1490 | int csrow; | |
1491 | ||
1492 | mci = mci_lookup[nid]; | |
1493 | if (!mci) | |
1494 | return cs_found; | |
1495 | ||
1496 | pvt = mci->pvt_info; | |
1497 | ||
1498 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | |
1499 | ||
9d858bb1 | 1500 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
6163b5d4 DT |
1501 | |
1502 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | |
1503 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | |
1504 | continue; | |
1505 | ||
1506 | /* | |
1507 | * We have an ENABLED CSROW, Isolate just the MASK bits of the | |
1508 | * target: [28:19] and [13:5], which map to [36:27] and [21:13] | |
1509 | * of the actual address. | |
1510 | */ | |
1511 | cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; | |
1512 | ||
1513 | /* | |
1514 | * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and | |
1515 | * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) | |
1516 | */ | |
1517 | cs_mask = amd64_get_dct_mask(pvt, cs, csrow); | |
1518 | ||
1519 | debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", | |
1520 | csrow, cs_base, cs_mask); | |
1521 | ||
1522 | cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; | |
1523 | ||
1524 | debugf1(" Final CSMask=0x%x\n", cs_mask); | |
1525 | debugf1(" (InputAddr & ~CSMask)=0x%x " | |
1526 | "(CSBase & ~CSMask)=0x%x\n", | |
1527 | (in_addr & ~cs_mask), (cs_base & ~cs_mask)); | |
1528 | ||
1529 | if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { | |
1530 | cs_found = f10_process_possible_spare(csrow, cs, pvt); | |
1531 | ||
1532 | debugf1(" MATCH csrow=%d\n", cs_found); | |
1533 | break; | |
1534 | } | |
1535 | } | |
1536 | return cs_found; | |
1537 | } | |
1538 | ||
f71d0a05 DT |
1539 | /* For a given @dram_range, check if @sys_addr falls within it. */ |
1540 | static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |
1541 | u64 sys_addr, int *nid, int *chan_sel) | |
1542 | { | |
1543 | int node_id, cs_found = -EINVAL, high_range = 0; | |
1544 | u32 intlv_en, intlv_sel, intlv_shift, hole_off; | |
1545 | u32 hole_valid, tmp, dct_sel_base, channel; | |
1546 | u64 dram_base, chan_addr, dct_sel_base_off; | |
1547 | ||
1548 | dram_base = pvt->dram_base[dram_range]; | |
1549 | intlv_en = pvt->dram_IntlvEn[dram_range]; | |
1550 | ||
1551 | node_id = pvt->dram_DstNode[dram_range]; | |
1552 | intlv_sel = pvt->dram_IntlvSel[dram_range]; | |
1553 | ||
1554 | debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", | |
1555 | dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); | |
1556 | ||
1557 | /* | |
1558 | * This assumes that one node's DHAR is the same as all the other | |
1559 | * nodes' DHAR. | |
1560 | */ | |
1561 | hole_off = (pvt->dhar & 0x0000FF80); | |
1562 | hole_valid = (pvt->dhar & 0x1); | |
1563 | dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; | |
1564 | ||
1565 | debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", | |
1566 | hole_off, hole_valid, intlv_sel); | |
1567 | ||
1568 | if (intlv_en || | |
1569 | (intlv_sel != ((sys_addr >> 12) & intlv_en))) | |
1570 | return -EINVAL; | |
1571 | ||
1572 | dct_sel_base = dct_sel_baseaddr(pvt); | |
1573 | ||
1574 | /* | |
1575 | * check whether addresses >= DctSelBaseAddr[47:27] are to be used to | |
1576 | * select between DCT0 and DCT1. | |
1577 | */ | |
1578 | if (dct_high_range_enabled(pvt) && | |
1579 | !dct_ganging_enabled(pvt) && | |
1580 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) | |
1581 | high_range = 1; | |
1582 | ||
1583 | channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); | |
1584 | ||
1585 | chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, | |
1586 | dct_sel_base_off, hole_valid, | |
1587 | hole_off, dram_base); | |
1588 | ||
1589 | intlv_shift = f10_map_intlv_en_to_shift(intlv_en); | |
1590 | ||
1591 | /* remove Node ID (in case of memory interleaving) */ | |
1592 | tmp = chan_addr & 0xFC0; | |
1593 | ||
1594 | chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; | |
1595 | ||
1596 | /* remove channel interleave and hash */ | |
1597 | if (dct_interleave_enabled(pvt) && | |
1598 | !dct_high_range_enabled(pvt) && | |
1599 | !dct_ganging_enabled(pvt)) { | |
1600 | if (dct_sel_interleave_addr(pvt) != 1) | |
1601 | chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; | |
1602 | else { | |
1603 | tmp = chan_addr & 0xFC0; | |
1604 | chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) | |
1605 | | tmp; | |
1606 | } | |
1607 | } | |
1608 | ||
1609 | debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", | |
1610 | chan_addr, (u32)(chan_addr >> 8)); | |
1611 | ||
1612 | cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); | |
1613 | ||
1614 | if (cs_found >= 0) { | |
1615 | *nid = node_id; | |
1616 | *chan_sel = channel; | |
1617 | } | |
1618 | return cs_found; | |
1619 | } | |
1620 | ||
1621 | static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |
1622 | int *node, int *chan_sel) | |
1623 | { | |
1624 | int dram_range, cs_found = -EINVAL; | |
1625 | u64 dram_base, dram_limit; | |
1626 | ||
1627 | for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { | |
1628 | ||
1629 | if (!pvt->dram_rw_en[dram_range]) | |
1630 | continue; | |
1631 | ||
1632 | dram_base = pvt->dram_base[dram_range]; | |
1633 | dram_limit = pvt->dram_limit[dram_range]; | |
1634 | ||
1635 | if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { | |
1636 | ||
1637 | cs_found = f10_match_to_this_node(pvt, dram_range, | |
1638 | sys_addr, node, | |
1639 | chan_sel); | |
1640 | if (cs_found >= 0) | |
1641 | break; | |
1642 | } | |
1643 | } | |
1644 | return cs_found; | |
1645 | } | |
1646 | ||
1647 | /* | |
bdc30a0c BP |
1648 | * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps |
1649 | * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). | |
f71d0a05 | 1650 | * |
bdc30a0c BP |
1651 | * The @sys_addr is usually an error address received from the hardware |
1652 | * (MCX_ADDR). | |
f71d0a05 DT |
1653 | */ |
1654 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |
ef44cc4c | 1655 | struct err_regs *info, |
f71d0a05 DT |
1656 | u64 sys_addr) |
1657 | { | |
1658 | struct amd64_pvt *pvt = mci->pvt_info; | |
1659 | u32 page, offset; | |
1660 | unsigned short syndrome; | |
1661 | int nid, csrow, chan = 0; | |
1662 | ||
1663 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | |
1664 | ||
bdc30a0c BP |
1665 | if (csrow < 0) { |
1666 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | |
1667 | return; | |
1668 | } | |
1669 | ||
1670 | error_address_to_page_and_offset(sys_addr, &page, &offset); | |
f71d0a05 | 1671 | |
bdc30a0c BP |
1672 | syndrome = HIGH_SYNDROME(info->nbsl) << 8; |
1673 | syndrome |= LOW_SYNDROME(info->nbsh); | |
1674 | ||
1675 | /* | |
1676 | * We need the syndromes for channel detection only when we're | |
1677 | * ganged. Otherwise @chan should already contain the channel at | |
1678 | * this point. | |
1679 | */ | |
1680 | if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL) | |
1681 | chan = get_channel_from_ecc_syndrome(mci, syndrome); | |
f71d0a05 | 1682 | |
bdc30a0c BP |
1683 | if (chan >= 0) |
1684 | edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, | |
1685 | EDAC_MOD_STR); | |
1686 | else | |
f71d0a05 | 1687 | /* |
bdc30a0c | 1688 | * Channel unknown, report all channels on this CSROW as failed. |
f71d0a05 | 1689 | */ |
bdc30a0c | 1690 | for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) |
f71d0a05 | 1691 | edac_mc_handle_ce(mci, page, offset, syndrome, |
bdc30a0c | 1692 | csrow, chan, EDAC_MOD_STR); |
f71d0a05 DT |
1693 | } |
1694 | ||
f71d0a05 | 1695 | /* |
8566c4df | 1696 | * debug routine to display the memory sizes of all logical DIMMs and its |
f71d0a05 DT |
1697 | * CSROWs as well |
1698 | */ | |
8566c4df | 1699 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) |
f71d0a05 DT |
1700 | { |
1701 | int dimm, size0, size1; | |
1702 | u32 dbam; | |
1703 | u32 *dcsb; | |
1704 | ||
8566c4df BP |
1705 | if (boot_cpu_data.x86 == 0xf) { |
1706 | /* K8 families < revF not supported yet */ | |
1433eb99 | 1707 | if (pvt->ext_model < K8_REV_F) |
8566c4df BP |
1708 | return; |
1709 | else | |
1710 | WARN_ON(ctrl != 0); | |
1711 | } | |
1712 | ||
1713 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | |
1714 | ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); | |
f71d0a05 DT |
1715 | |
1716 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | |
1717 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; | |
1718 | ||
8566c4df BP |
1719 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); |
1720 | ||
f71d0a05 DT |
1721 | /* Dump memory sizes for DIMM and its CSROWs */ |
1722 | for (dimm = 0; dimm < 4; dimm++) { | |
1723 | ||
1724 | size0 = 0; | |
1725 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) | |
1433eb99 | 1726 | size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
f71d0a05 DT |
1727 | |
1728 | size1 = 0; | |
1729 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | |
1433eb99 | 1730 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
f71d0a05 | 1731 | |
8566c4df BP |
1732 | edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", |
1733 | dimm * 2, size0, dimm * 2 + 1, size1); | |
f71d0a05 DT |
1734 | } |
1735 | } | |
1736 | ||
4d37607a DT |
1737 | /* |
1738 | * There currently are 3 types type of MC devices for AMD Athlon/Opterons | |
1739 | * (as per PCI DEVICE_IDs): | |
1740 | * | |
1741 | * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI | |
1742 | * DEVICE ID, even though there is differences between the different Revisions | |
1743 | * (CG,D,E,F). | |
1744 | * | |
1745 | * Family F10h and F11h. | |
1746 | * | |
1747 | */ | |
1748 | static struct amd64_family_type amd64_family_types[] = { | |
1749 | [K8_CPUS] = { | |
1750 | .ctl_name = "RevF", | |
1751 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, | |
1752 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, | |
1753 | .ops = { | |
1433eb99 BP |
1754 | .early_channel_count = k8_early_channel_count, |
1755 | .get_error_address = k8_get_error_address, | |
1756 | .read_dram_base_limit = k8_read_dram_base_limit, | |
1757 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, | |
1758 | .dbam_to_cs = k8_dbam_to_chip_select, | |
4d37607a DT |
1759 | } |
1760 | }, | |
1761 | [F10_CPUS] = { | |
1762 | .ctl_name = "Family 10h", | |
1763 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, | |
1764 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, | |
1765 | .ops = { | |
1433eb99 BP |
1766 | .early_channel_count = f10_early_channel_count, |
1767 | .get_error_address = f10_get_error_address, | |
1768 | .read_dram_base_limit = f10_read_dram_base_limit, | |
1769 | .read_dram_ctl_register = f10_read_dram_ctl_register, | |
1770 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | |
1771 | .dbam_to_cs = f10_dbam_to_chip_select, | |
4d37607a DT |
1772 | } |
1773 | }, | |
1774 | [F11_CPUS] = { | |
1775 | .ctl_name = "Family 11h", | |
1776 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, | |
1777 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, | |
1778 | .ops = { | |
1433eb99 BP |
1779 | .early_channel_count = f10_early_channel_count, |
1780 | .get_error_address = f10_get_error_address, | |
1781 | .read_dram_base_limit = f10_read_dram_base_limit, | |
1782 | .read_dram_ctl_register = f10_read_dram_ctl_register, | |
1783 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | |
1784 | .dbam_to_cs = f10_dbam_to_chip_select, | |
4d37607a DT |
1785 | } |
1786 | }, | |
1787 | }; | |
1788 | ||
1789 | static struct pci_dev *pci_get_related_function(unsigned int vendor, | |
1790 | unsigned int device, | |
1791 | struct pci_dev *related) | |
1792 | { | |
1793 | struct pci_dev *dev = NULL; | |
1794 | ||
1795 | dev = pci_get_device(vendor, device, dev); | |
1796 | while (dev) { | |
1797 | if ((dev->bus->number == related->bus->number) && | |
1798 | (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) | |
1799 | break; | |
1800 | dev = pci_get_device(vendor, device, dev); | |
1801 | } | |
1802 | ||
1803 | return dev; | |
1804 | } | |
1805 | ||
b1289d6f | 1806 | /* |
bfc04aec BP |
1807 | * These are tables of eigenvectors (one per line) which can be used for the |
1808 | * construction of the syndrome tables. The modified syndrome search algorithm | |
1809 | * uses those to find the symbol in error and thus the DIMM. | |
b1289d6f | 1810 | * |
bfc04aec | 1811 | * Algorithm courtesy of Ross LaFetra from AMD. |
b1289d6f | 1812 | */ |
bfc04aec BP |
1813 | static u16 x4_vectors[] = { |
1814 | 0x2f57, 0x1afe, 0x66cc, 0xdd88, | |
1815 | 0x11eb, 0x3396, 0x7f4c, 0xeac8, | |
1816 | 0x0001, 0x0002, 0x0004, 0x0008, | |
1817 | 0x1013, 0x3032, 0x4044, 0x8088, | |
1818 | 0x106b, 0x30d6, 0x70fc, 0xe0a8, | |
1819 | 0x4857, 0xc4fe, 0x13cc, 0x3288, | |
1820 | 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, | |
1821 | 0x1f39, 0x251e, 0xbd6c, 0x6bd8, | |
1822 | 0x15c1, 0x2a42, 0x89ac, 0x4758, | |
1823 | 0x2b03, 0x1602, 0x4f0c, 0xca08, | |
1824 | 0x1f07, 0x3a0e, 0x6b04, 0xbd08, | |
1825 | 0x8ba7, 0x465e, 0x244c, 0x1cc8, | |
1826 | 0x2b87, 0x164e, 0x642c, 0xdc18, | |
1827 | 0x40b9, 0x80de, 0x1094, 0x20e8, | |
1828 | 0x27db, 0x1eb6, 0x9dac, 0x7b58, | |
1829 | 0x11c1, 0x2242, 0x84ac, 0x4c58, | |
1830 | 0x1be5, 0x2d7a, 0x5e34, 0xa718, | |
1831 | 0x4b39, 0x8d1e, 0x14b4, 0x28d8, | |
1832 | 0x4c97, 0xc87e, 0x11fc, 0x33a8, | |
1833 | 0x8e97, 0x497e, 0x2ffc, 0x1aa8, | |
1834 | 0x16b3, 0x3d62, 0x4f34, 0x8518, | |
1835 | 0x1e2f, 0x391a, 0x5cac, 0xf858, | |
1836 | 0x1d9f, 0x3b7a, 0x572c, 0xfe18, | |
1837 | 0x15f5, 0x2a5a, 0x5264, 0xa3b8, | |
1838 | 0x1dbb, 0x3b66, 0x715c, 0xe3f8, | |
1839 | 0x4397, 0xc27e, 0x17fc, 0x3ea8, | |
1840 | 0x1617, 0x3d3e, 0x6464, 0xb8b8, | |
1841 | 0x23ff, 0x12aa, 0xab6c, 0x56d8, | |
1842 | 0x2dfb, 0x1ba6, 0x913c, 0x7328, | |
1843 | 0x185d, 0x2ca6, 0x7914, 0x9e28, | |
1844 | 0x171b, 0x3e36, 0x7d7c, 0xebe8, | |
1845 | 0x4199, 0x82ee, 0x19f4, 0x2e58, | |
1846 | 0x4807, 0xc40e, 0x130c, 0x3208, | |
1847 | 0x1905, 0x2e0a, 0x5804, 0xac08, | |
1848 | 0x213f, 0x132a, 0xadfc, 0x5ba8, | |
1849 | 0x19a9, 0x2efe, 0xb5cc, 0x6f88, | |
b1289d6f DT |
1850 | }; |
1851 | ||
bfc04aec BP |
1852 | static u16 x8_vectors[] = { |
1853 | 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, | |
1854 | 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, | |
1855 | 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, | |
1856 | 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, | |
1857 | 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, | |
1858 | 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, | |
1859 | 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, | |
1860 | 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, | |
1861 | 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, | |
1862 | 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, | |
1863 | 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, | |
1864 | 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, | |
1865 | 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, | |
1866 | 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, | |
1867 | 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, | |
1868 | 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, | |
1869 | 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, | |
1870 | 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, | |
1871 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, | |
1872 | }; | |
1873 | ||
1874 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | |
1875 | int v_dim) | |
b1289d6f | 1876 | { |
bfc04aec BP |
1877 | unsigned int i, err_sym; |
1878 | ||
1879 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { | |
1880 | u16 s = syndrome; | |
1881 | int v_idx = err_sym * v_dim; | |
1882 | int v_end = (err_sym + 1) * v_dim; | |
1883 | ||
1884 | /* walk over all 16 bits of the syndrome */ | |
1885 | for (i = 1; i < (1U << 16); i <<= 1) { | |
1886 | ||
1887 | /* if bit is set in that eigenvector... */ | |
1888 | if (v_idx < v_end && vectors[v_idx] & i) { | |
1889 | u16 ev_comp = vectors[v_idx++]; | |
1890 | ||
1891 | /* ... and bit set in the modified syndrome, */ | |
1892 | if (s & i) { | |
1893 | /* remove it. */ | |
1894 | s ^= ev_comp; | |
4d37607a | 1895 | |
bfc04aec BP |
1896 | if (!s) |
1897 | return err_sym; | |
1898 | } | |
b1289d6f | 1899 | |
bfc04aec BP |
1900 | } else if (s & i) |
1901 | /* can't get to zero, move to next symbol */ | |
1902 | break; | |
1903 | } | |
b1289d6f DT |
1904 | } |
1905 | ||
1906 | debugf0("syndrome(%x) not found\n", syndrome); | |
1907 | return -1; | |
1908 | } | |
d27bf6fa | 1909 | |
bfc04aec BP |
1910 | static int map_err_sym_to_channel(int err_sym, int sym_size) |
1911 | { | |
1912 | if (sym_size == 4) | |
1913 | switch (err_sym) { | |
1914 | case 0x20: | |
1915 | case 0x21: | |
1916 | return 0; | |
1917 | break; | |
1918 | case 0x22: | |
1919 | case 0x23: | |
1920 | return 1; | |
1921 | break; | |
1922 | default: | |
1923 | return err_sym >> 4; | |
1924 | break; | |
1925 | } | |
1926 | /* x8 symbols */ | |
1927 | else | |
1928 | switch (err_sym) { | |
1929 | /* imaginary bits not in a DIMM */ | |
1930 | case 0x10: | |
1931 | WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", | |
1932 | err_sym); | |
1933 | return -1; | |
1934 | break; | |
1935 | ||
1936 | case 0x11: | |
1937 | return 0; | |
1938 | break; | |
1939 | case 0x12: | |
1940 | return 1; | |
1941 | break; | |
1942 | default: | |
1943 | return err_sym >> 3; | |
1944 | break; | |
1945 | } | |
1946 | return -1; | |
1947 | } | |
1948 | ||
1949 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |
1950 | { | |
1951 | struct amd64_pvt *pvt = mci->pvt_info; | |
1952 | u32 value = 0; | |
1953 | int err_sym = 0; | |
1954 | ||
1955 | amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value); | |
1956 | ||
1957 | /* F3x180[EccSymbolSize]=1, x8 symbols */ | |
1958 | if (boot_cpu_data.x86 == 0x10 && | |
1959 | boot_cpu_data.x86_model > 7 && | |
1960 | value & BIT(25)) { | |
1961 | err_sym = decode_syndrome(syndrome, x8_vectors, | |
1962 | ARRAY_SIZE(x8_vectors), 8); | |
1963 | return map_err_sym_to_channel(err_sym, 8); | |
1964 | } else { | |
1965 | err_sym = decode_syndrome(syndrome, x4_vectors, | |
1966 | ARRAY_SIZE(x4_vectors), 4); | |
1967 | return map_err_sym_to_channel(err_sym, 4); | |
1968 | } | |
1969 | } | |
1970 | ||
d27bf6fa DT |
1971 | /* |
1972 | * Check for valid error in the NB Status High register. If so, proceed to read | |
1973 | * NB Status Low, NB Address Low and NB Address High registers and store data | |
1974 | * into error structure. | |
1975 | * | |
1976 | * Returns: | |
1977 | * - 1: if hardware regs contains valid error info | |
1978 | * - 0: if no valid error is indicated | |
1979 | */ | |
1980 | static int amd64_get_error_info_regs(struct mem_ctl_info *mci, | |
ef44cc4c | 1981 | struct err_regs *regs) |
d27bf6fa DT |
1982 | { |
1983 | struct amd64_pvt *pvt; | |
1984 | struct pci_dev *misc_f3_ctl; | |
d27bf6fa DT |
1985 | |
1986 | pvt = mci->pvt_info; | |
1987 | misc_f3_ctl = pvt->misc_f3_ctl; | |
1988 | ||
6ba5dcdc BP |
1989 | if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, ®s->nbsh)) |
1990 | return 0; | |
d27bf6fa DT |
1991 | |
1992 | if (!(regs->nbsh & K8_NBSH_VALID_BIT)) | |
1993 | return 0; | |
1994 | ||
1995 | /* valid error, read remaining error information registers */ | |
6ba5dcdc BP |
1996 | if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, ®s->nbsl) || |
1997 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, ®s->nbeal) || | |
1998 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, ®s->nbeah) || | |
1999 | amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, ®s->nbcfg)) | |
2000 | return 0; | |
d27bf6fa DT |
2001 | |
2002 | return 1; | |
d27bf6fa DT |
2003 | } |
2004 | ||
2005 | /* | |
2006 | * This function is called to retrieve the error data from hardware and store it | |
2007 | * in the info structure. | |
2008 | * | |
2009 | * Returns: | |
2010 | * - 1: if a valid error is found | |
2011 | * - 0: if no error is found | |
2012 | */ | |
2013 | static int amd64_get_error_info(struct mem_ctl_info *mci, | |
ef44cc4c | 2014 | struct err_regs *info) |
d27bf6fa DT |
2015 | { |
2016 | struct amd64_pvt *pvt; | |
ef44cc4c | 2017 | struct err_regs regs; |
d27bf6fa DT |
2018 | |
2019 | pvt = mci->pvt_info; | |
2020 | ||
2021 | if (!amd64_get_error_info_regs(mci, info)) | |
2022 | return 0; | |
2023 | ||
2024 | /* | |
2025 | * Here's the problem with the K8's EDAC reporting: There are four | |
2026 | * registers which report pieces of error information. They are shared | |
2027 | * between CEs and UEs. Furthermore, contrary to what is stated in the | |
2028 | * BKDG, the overflow bit is never used! Every error always updates the | |
2029 | * reporting registers. | |
2030 | * | |
2031 | * Can you see the race condition? All four error reporting registers | |
2032 | * must be read before a new error updates them! There is no way to read | |
2033 | * all four registers atomically. The best than can be done is to detect | |
2034 | * that a race has occured and then report the error without any kind of | |
2035 | * precision. | |
2036 | * | |
2037 | * What is still positive is that errors are still reported and thus | |
2038 | * problems can still be detected - just not localized because the | |
2039 | * syndrome and address are spread out across registers. | |
2040 | * | |
2041 | * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev. | |
2042 | * UEs and CEs should have separate register sets with proper overflow | |
2043 | * bits that are used! At very least the problem can be fixed by | |
2044 | * honoring the ErrValid bit in 'nbsh' and not updating registers - just | |
2045 | * set the overflow bit - unless the current error is CE and the new | |
2046 | * error is UE which would be the only situation for overwriting the | |
2047 | * current values. | |
2048 | */ | |
2049 | ||
2050 | regs = *info; | |
2051 | ||
2052 | /* Use info from the second read - most current */ | |
2053 | if (unlikely(!amd64_get_error_info_regs(mci, info))) | |
2054 | return 0; | |
2055 | ||
2056 | /* clear the error bits in hardware */ | |
2057 | pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT); | |
2058 | ||
2059 | /* Check for the possible race condition */ | |
2060 | if ((regs.nbsh != info->nbsh) || | |
2061 | (regs.nbsl != info->nbsl) || | |
2062 | (regs.nbeah != info->nbeah) || | |
2063 | (regs.nbeal != info->nbeal)) { | |
2064 | amd64_mc_printk(mci, KERN_WARNING, | |
2065 | "hardware STATUS read access race condition " | |
2066 | "detected!\n"); | |
2067 | return 0; | |
2068 | } | |
2069 | return 1; | |
2070 | } | |
2071 | ||
d27bf6fa DT |
2072 | /* |
2073 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR | |
2074 | * ADDRESS and process. | |
2075 | */ | |
2076 | static void amd64_handle_ce(struct mem_ctl_info *mci, | |
ef44cc4c | 2077 | struct err_regs *info) |
d27bf6fa DT |
2078 | { |
2079 | struct amd64_pvt *pvt = mci->pvt_info; | |
44e9e2ee | 2080 | u64 sys_addr; |
d27bf6fa DT |
2081 | |
2082 | /* Ensure that the Error Address is VALID */ | |
2083 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | |
2084 | amd64_mc_printk(mci, KERN_ERR, | |
2085 | "HW has no ERROR_ADDRESS available\n"); | |
2086 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | |
2087 | return; | |
2088 | } | |
2089 | ||
1f6bcee7 | 2090 | sys_addr = pvt->ops->get_error_address(mci, info); |
d27bf6fa DT |
2091 | |
2092 | amd64_mc_printk(mci, KERN_ERR, | |
44e9e2ee | 2093 | "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
d27bf6fa | 2094 | |
44e9e2ee | 2095 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); |
d27bf6fa DT |
2096 | } |
2097 | ||
2098 | /* Handle any Un-correctable Errors (UEs) */ | |
2099 | static void amd64_handle_ue(struct mem_ctl_info *mci, | |
ef44cc4c | 2100 | struct err_regs *info) |
d27bf6fa | 2101 | { |
1f6bcee7 BP |
2102 | struct amd64_pvt *pvt = mci->pvt_info; |
2103 | struct mem_ctl_info *log_mci, *src_mci = NULL; | |
d27bf6fa | 2104 | int csrow; |
44e9e2ee | 2105 | u64 sys_addr; |
d27bf6fa | 2106 | u32 page, offset; |
d27bf6fa DT |
2107 | |
2108 | log_mci = mci; | |
2109 | ||
2110 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | |
2111 | amd64_mc_printk(mci, KERN_CRIT, | |
2112 | "HW has no ERROR_ADDRESS available\n"); | |
2113 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | |
2114 | return; | |
2115 | } | |
2116 | ||
1f6bcee7 | 2117 | sys_addr = pvt->ops->get_error_address(mci, info); |
d27bf6fa DT |
2118 | |
2119 | /* | |
2120 | * Find out which node the error address belongs to. This may be | |
2121 | * different from the node that detected the error. | |
2122 | */ | |
44e9e2ee | 2123 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
d27bf6fa DT |
2124 | if (!src_mci) { |
2125 | amd64_mc_printk(mci, KERN_CRIT, | |
2126 | "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", | |
44e9e2ee | 2127 | (unsigned long)sys_addr); |
d27bf6fa DT |
2128 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2129 | return; | |
2130 | } | |
2131 | ||
2132 | log_mci = src_mci; | |
2133 | ||
44e9e2ee | 2134 | csrow = sys_addr_to_csrow(log_mci, sys_addr); |
d27bf6fa DT |
2135 | if (csrow < 0) { |
2136 | amd64_mc_printk(mci, KERN_CRIT, | |
2137 | "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", | |
44e9e2ee | 2138 | (unsigned long)sys_addr); |
d27bf6fa DT |
2139 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2140 | } else { | |
44e9e2ee | 2141 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
d27bf6fa DT |
2142 | edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); |
2143 | } | |
2144 | } | |
2145 | ||
549d042d | 2146 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, |
b69b29de | 2147 | struct err_regs *info) |
d27bf6fa | 2148 | { |
b70ef010 BP |
2149 | u32 ec = ERROR_CODE(info->nbsl); |
2150 | u32 xec = EXT_ERROR_CODE(info->nbsl); | |
17adea01 | 2151 | int ecc_type = (info->nbsh >> 13) & 0x3; |
d27bf6fa | 2152 | |
b70ef010 BP |
2153 | /* Bail early out if this was an 'observed' error */ |
2154 | if (PP(ec) == K8_NBSL_PP_OBS) | |
2155 | return; | |
d27bf6fa | 2156 | |
ecaf5606 BP |
2157 | /* Do only ECC errors */ |
2158 | if (xec && xec != F10_NBSL_EXT_ERR_ECC) | |
d27bf6fa | 2159 | return; |
d27bf6fa | 2160 | |
ecaf5606 | 2161 | if (ecc_type == 2) |
d27bf6fa | 2162 | amd64_handle_ce(mci, info); |
ecaf5606 | 2163 | else if (ecc_type == 1) |
d27bf6fa DT |
2164 | amd64_handle_ue(mci, info); |
2165 | ||
2166 | /* | |
2167 | * If main error is CE then overflow must be CE. If main error is UE | |
2168 | * then overflow is unknown. We'll call the overflow a CE - if | |
2169 | * panic_on_ue is set then we're already panic'ed and won't arrive | |
2170 | * here. Else, then apparently someone doesn't think that UE's are | |
2171 | * catastrophic. | |
2172 | */ | |
2173 | if (info->nbsh & K8_NBSH_OVERFLOW) | |
ecaf5606 | 2174 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow"); |
d27bf6fa DT |
2175 | } |
2176 | ||
b69b29de | 2177 | void amd64_decode_bus_error(int node_id, struct err_regs *regs) |
d27bf6fa | 2178 | { |
549d042d | 2179 | struct mem_ctl_info *mci = mci_lookup[node_id]; |
d27bf6fa | 2180 | |
b69b29de | 2181 | __amd64_decode_bus_error(mci, regs); |
d27bf6fa | 2182 | |
d27bf6fa DT |
2183 | /* |
2184 | * Check the UE bit of the NB status high register, if set generate some | |
2185 | * logs. If NOT a GART error, then process the event as a NO-INFO event. | |
2186 | * If it was a GART error, skip that process. | |
549d042d BP |
2187 | * |
2188 | * FIXME: this should go somewhere else, if at all. | |
d27bf6fa | 2189 | */ |
5110dbde BP |
2190 | if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors) |
2191 | edac_mc_handle_ue_no_info(mci, "UE bit is set"); | |
549d042d | 2192 | |
d27bf6fa | 2193 | } |
d27bf6fa | 2194 | |
0ec449ee DT |
2195 | /* |
2196 | * The main polling 'check' function, called FROM the edac core to perform the | |
2197 | * error checking and if an error is encountered, error processing. | |
2198 | */ | |
2199 | static void amd64_check(struct mem_ctl_info *mci) | |
2200 | { | |
ef44cc4c | 2201 | struct err_regs regs; |
0ec449ee | 2202 | |
549d042d BP |
2203 | if (amd64_get_error_info(mci, ®s)) { |
2204 | struct amd64_pvt *pvt = mci->pvt_info; | |
2205 | amd_decode_nb_mce(pvt->mc_node_id, ®s, 1); | |
2206 | } | |
0ec449ee DT |
2207 | } |
2208 | ||
2209 | /* | |
2210 | * Input: | |
2211 | * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer | |
2212 | * 2) AMD Family index value | |
2213 | * | |
2214 | * Ouput: | |
2215 | * Upon return of 0, the following filled in: | |
2216 | * | |
2217 | * struct pvt->addr_f1_ctl | |
2218 | * struct pvt->misc_f3_ctl | |
2219 | * | |
2220 | * Filled in with related device funcitions of 'dram_f2_ctl' | |
2221 | * These devices are "reserved" via the pci_get_device() | |
2222 | * | |
2223 | * Upon return of 1 (error status): | |
2224 | * | |
2225 | * Nothing reserved | |
2226 | */ | |
2227 | static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx) | |
2228 | { | |
2229 | const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx]; | |
2230 | ||
2231 | /* Reserve the ADDRESS MAP Device */ | |
2232 | pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | |
2233 | amd64_dev->addr_f1_ctl, | |
2234 | pvt->dram_f2_ctl); | |
2235 | ||
2236 | if (!pvt->addr_f1_ctl) { | |
2237 | amd64_printk(KERN_ERR, "error address map device not found: " | |
2238 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2239 | PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl); | |
2240 | return 1; | |
2241 | } | |
2242 | ||
2243 | /* Reserve the MISC Device */ | |
2244 | pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | |
2245 | amd64_dev->misc_f3_ctl, | |
2246 | pvt->dram_f2_ctl); | |
2247 | ||
2248 | if (!pvt->misc_f3_ctl) { | |
2249 | pci_dev_put(pvt->addr_f1_ctl); | |
2250 | pvt->addr_f1_ctl = NULL; | |
2251 | ||
2252 | amd64_printk(KERN_ERR, "error miscellaneous device not found: " | |
2253 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2254 | PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl); | |
2255 | return 1; | |
2256 | } | |
2257 | ||
2258 | debugf1(" Addr Map device PCI Bus ID:\t%s\n", | |
2259 | pci_name(pvt->addr_f1_ctl)); | |
2260 | debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n", | |
2261 | pci_name(pvt->dram_f2_ctl)); | |
2262 | debugf1(" Misc device PCI Bus ID:\t%s\n", | |
2263 | pci_name(pvt->misc_f3_ctl)); | |
2264 | ||
2265 | return 0; | |
2266 | } | |
2267 | ||
2268 | static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) | |
2269 | { | |
2270 | pci_dev_put(pvt->addr_f1_ctl); | |
2271 | pci_dev_put(pvt->misc_f3_ctl); | |
2272 | } | |
2273 | ||
2274 | /* | |
2275 | * Retrieve the hardware registers of the memory controller (this includes the | |
2276 | * 'Address Map' and 'Misc' device regs) | |
2277 | */ | |
2278 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |
2279 | { | |
2280 | u64 msr_val; | |
6ba5dcdc | 2281 | int dram; |
0ec449ee DT |
2282 | |
2283 | /* | |
2284 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | |
2285 | * those are Read-As-Zero | |
2286 | */ | |
e97f8bb8 BP |
2287 | rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); |
2288 | debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); | |
0ec449ee DT |
2289 | |
2290 | /* check first whether TOP_MEM2 is enabled */ | |
2291 | rdmsrl(MSR_K8_SYSCFG, msr_val); | |
2292 | if (msr_val & (1U << 21)) { | |
e97f8bb8 BP |
2293 | rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); |
2294 | debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); | |
0ec449ee DT |
2295 | } else |
2296 | debugf0(" TOP_MEM2 disabled.\n"); | |
2297 | ||
2298 | amd64_cpu_display_info(pvt); | |
2299 | ||
6ba5dcdc | 2300 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); |
0ec449ee DT |
2301 | |
2302 | if (pvt->ops->read_dram_ctl_register) | |
2303 | pvt->ops->read_dram_ctl_register(pvt); | |
2304 | ||
2305 | for (dram = 0; dram < DRAM_REG_COUNT; dram++) { | |
2306 | /* | |
2307 | * Call CPU specific READ function to get the DRAM Base and | |
2308 | * Limit values from the DCT. | |
2309 | */ | |
2310 | pvt->ops->read_dram_base_limit(pvt, dram); | |
2311 | ||
2312 | /* | |
2313 | * Only print out debug info on rows with both R and W Enabled. | |
2314 | * Normal processing, compiler should optimize this whole 'if' | |
2315 | * debug output block away. | |
2316 | */ | |
2317 | if (pvt->dram_rw_en[dram] != 0) { | |
e97f8bb8 BP |
2318 | debugf1(" DRAM-BASE[%d]: 0x%016llx " |
2319 | "DRAM-LIMIT: 0x%016llx\n", | |
0ec449ee | 2320 | dram, |
e97f8bb8 BP |
2321 | pvt->dram_base[dram], |
2322 | pvt->dram_limit[dram]); | |
2323 | ||
0ec449ee DT |
2324 | debugf1(" IntlvEn=%s %s %s " |
2325 | "IntlvSel=%d DstNode=%d\n", | |
2326 | pvt->dram_IntlvEn[dram] ? | |
2327 | "Enabled" : "Disabled", | |
2328 | (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", | |
2329 | (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", | |
2330 | pvt->dram_IntlvSel[dram], | |
2331 | pvt->dram_DstNode[dram]); | |
2332 | } | |
2333 | } | |
2334 | ||
2335 | amd64_read_dct_base_mask(pvt); | |
2336 | ||
6ba5dcdc | 2337 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); |
0ec449ee DT |
2338 | amd64_read_dbam_reg(pvt); |
2339 | ||
6ba5dcdc BP |
2340 | amd64_read_pci_cfg(pvt->misc_f3_ctl, |
2341 | F10_ONLINE_SPARE, &pvt->online_spare); | |
0ec449ee | 2342 | |
6ba5dcdc BP |
2343 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); |
2344 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); | |
0ec449ee DT |
2345 | |
2346 | if (!dct_ganging_enabled(pvt)) { | |
6ba5dcdc BP |
2347 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); |
2348 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); | |
0ec449ee | 2349 | } |
0ec449ee | 2350 | amd64_dump_misc_regs(pvt); |
0ec449ee DT |
2351 | } |
2352 | ||
2353 | /* | |
2354 | * NOTE: CPU Revision Dependent code | |
2355 | * | |
2356 | * Input: | |
9d858bb1 | 2357 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) |
0ec449ee DT |
2358 | * k8 private pointer to --> |
2359 | * DRAM Bank Address mapping register | |
2360 | * node_id | |
2361 | * DCL register where dual_channel_active is | |
2362 | * | |
2363 | * The DBAM register consists of 4 sets of 4 bits each definitions: | |
2364 | * | |
2365 | * Bits: CSROWs | |
2366 | * 0-3 CSROWs 0 and 1 | |
2367 | * 4-7 CSROWs 2 and 3 | |
2368 | * 8-11 CSROWs 4 and 5 | |
2369 | * 12-15 CSROWs 6 and 7 | |
2370 | * | |
2371 | * Values range from: 0 to 15 | |
2372 | * The meaning of the values depends on CPU revision and dual-channel state, | |
2373 | * see relevant BKDG more info. | |
2374 | * | |
2375 | * The memory controller provides for total of only 8 CSROWs in its current | |
2376 | * architecture. Each "pair" of CSROWs normally represents just one DIMM in | |
2377 | * single channel or two (2) DIMMs in dual channel mode. | |
2378 | * | |
2379 | * The following code logic collapses the various tables for CSROW based on CPU | |
2380 | * revision. | |
2381 | * | |
2382 | * Returns: | |
2383 | * The number of PAGE_SIZE pages on the specified CSROW number it | |
2384 | * encompasses | |
2385 | * | |
2386 | */ | |
2387 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |
2388 | { | |
1433eb99 | 2389 | u32 cs_mode, nr_pages; |
0ec449ee DT |
2390 | |
2391 | /* | |
2392 | * The math on this doesn't look right on the surface because x/2*4 can | |
2393 | * be simplified to x*2 but this expression makes use of the fact that | |
2394 | * it is integral math where 1/2=0. This intermediate value becomes the | |
2395 | * number of bits to shift the DBAM register to extract the proper CSROW | |
2396 | * field. | |
2397 | */ | |
1433eb99 | 2398 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
0ec449ee | 2399 | |
1433eb99 | 2400 | nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); |
0ec449ee DT |
2401 | |
2402 | /* | |
2403 | * If dual channel then double the memory size of single channel. | |
2404 | * Channel count is 1 or 2 | |
2405 | */ | |
2406 | nr_pages <<= (pvt->channel_count - 1); | |
2407 | ||
1433eb99 | 2408 | debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); |
0ec449ee DT |
2409 | debugf0(" nr_pages= %u channel-count = %d\n", |
2410 | nr_pages, pvt->channel_count); | |
2411 | ||
2412 | return nr_pages; | |
2413 | } | |
2414 | ||
2415 | /* | |
2416 | * Initialize the array of csrow attribute instances, based on the values | |
2417 | * from pci config hardware registers. | |
2418 | */ | |
2419 | static int amd64_init_csrows(struct mem_ctl_info *mci) | |
2420 | { | |
2421 | struct csrow_info *csrow; | |
2422 | struct amd64_pvt *pvt; | |
2423 | u64 input_addr_min, input_addr_max, sys_addr; | |
6ba5dcdc | 2424 | int i, empty = 1; |
0ec449ee DT |
2425 | |
2426 | pvt = mci->pvt_info; | |
2427 | ||
6ba5dcdc | 2428 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); |
0ec449ee DT |
2429 | |
2430 | debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, | |
2431 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2432 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" | |
2433 | ); | |
2434 | ||
9d858bb1 | 2435 | for (i = 0; i < pvt->cs_count; i++) { |
0ec449ee DT |
2436 | csrow = &mci->csrows[i]; |
2437 | ||
2438 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | |
2439 | debugf1("----CSROW %d EMPTY for node %d\n", i, | |
2440 | pvt->mc_node_id); | |
2441 | continue; | |
2442 | } | |
2443 | ||
2444 | debugf1("----CSROW %d VALID for MC node %d\n", | |
2445 | i, pvt->mc_node_id); | |
2446 | ||
2447 | empty = 0; | |
2448 | csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); | |
2449 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); | |
2450 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); | |
2451 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); | |
2452 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); | |
2453 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); | |
2454 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); | |
2455 | /* 8 bytes of resolution */ | |
2456 | ||
2457 | csrow->mtype = amd64_determine_memory_type(pvt); | |
2458 | ||
2459 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); | |
2460 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", | |
2461 | (unsigned long)input_addr_min, | |
2462 | (unsigned long)input_addr_max); | |
2463 | debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n", | |
2464 | (unsigned long)sys_addr, csrow->page_mask); | |
2465 | debugf1(" nr_pages: %u first_page: 0x%lx " | |
2466 | "last_page: 0x%lx\n", | |
2467 | (unsigned)csrow->nr_pages, | |
2468 | csrow->first_page, csrow->last_page); | |
2469 | ||
2470 | /* | |
2471 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating | |
2472 | */ | |
2473 | if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) | |
2474 | csrow->edac_mode = | |
2475 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? | |
2476 | EDAC_S4ECD4ED : EDAC_SECDED; | |
2477 | else | |
2478 | csrow->edac_mode = EDAC_NONE; | |
2479 | } | |
2480 | ||
2481 | return empty; | |
2482 | } | |
d27bf6fa | 2483 | |
f6d6ae96 BP |
2484 | /* get all cores on this DCT */ |
2485 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | |
2486 | { | |
2487 | int cpu; | |
2488 | ||
2489 | for_each_online_cpu(cpu) | |
2490 | if (amd_get_nb_id(cpu) == nid) | |
2491 | cpumask_set_cpu(cpu, mask); | |
2492 | } | |
2493 | ||
2494 | /* check MCG_CTL on all the cpus on this node */ | |
2495 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | |
2496 | { | |
2497 | cpumask_var_t mask; | |
2498 | struct msr *msrs; | |
2499 | int cpu, nbe, idx = 0; | |
2500 | bool ret = false; | |
2501 | ||
2502 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | |
2503 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | |
2504 | __func__); | |
2505 | return false; | |
2506 | } | |
2507 | ||
2508 | get_cpus_on_this_dct_cpumask(mask, nid); | |
2509 | ||
2510 | msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL); | |
2511 | if (!msrs) { | |
2512 | amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", | |
2513 | __func__); | |
2514 | free_cpumask_var(mask); | |
2515 | return false; | |
2516 | } | |
2517 | ||
2518 | rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); | |
2519 | ||
2520 | for_each_cpu(cpu, mask) { | |
2521 | nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE; | |
2522 | ||
2523 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | |
2524 | cpu, msrs[idx].q, | |
2525 | (nbe ? "enabled" : "disabled")); | |
2526 | ||
2527 | if (!nbe) | |
2528 | goto out; | |
2529 | ||
2530 | idx++; | |
2531 | } | |
2532 | ret = true; | |
2533 | ||
2534 | out: | |
2535 | kfree(msrs); | |
2536 | free_cpumask_var(mask); | |
2537 | return ret; | |
2538 | } | |
2539 | ||
2540 | static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |
2541 | { | |
2542 | cpumask_var_t cmask; | |
2543 | struct msr *msrs = NULL; | |
2544 | int cpu, idx = 0; | |
2545 | ||
2546 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | |
2547 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | |
2548 | __func__); | |
2549 | return false; | |
2550 | } | |
2551 | ||
2552 | get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); | |
2553 | ||
2554 | msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL); | |
2555 | if (!msrs) { | |
2556 | amd64_printk(KERN_WARNING, "%s: error allocating msrs\n", | |
2557 | __func__); | |
2558 | return -ENOMEM; | |
2559 | } | |
2560 | ||
2561 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | |
2562 | ||
2563 | for_each_cpu(cpu, cmask) { | |
2564 | ||
2565 | if (on) { | |
2566 | if (msrs[idx].l & K8_MSR_MCGCTL_NBE) | |
2567 | pvt->flags.ecc_report = 1; | |
2568 | ||
2569 | msrs[idx].l |= K8_MSR_MCGCTL_NBE; | |
2570 | } else { | |
2571 | /* | |
2572 | * Turn off ECC reporting only when it was off before | |
2573 | */ | |
2574 | if (!pvt->flags.ecc_report) | |
2575 | msrs[idx].l &= ~K8_MSR_MCGCTL_NBE; | |
2576 | } | |
2577 | idx++; | |
2578 | } | |
2579 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | |
2580 | ||
2581 | kfree(msrs); | |
2582 | free_cpumask_var(cmask); | |
2583 | ||
2584 | return 0; | |
2585 | } | |
2586 | ||
f9431992 DT |
2587 | /* |
2588 | * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" | |
2589 | * enable it. | |
2590 | */ | |
2591 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) | |
2592 | { | |
2593 | struct amd64_pvt *pvt = mci->pvt_info; | |
f6d6ae96 | 2594 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
f9431992 DT |
2595 | |
2596 | if (!ecc_enable_override) | |
2597 | return; | |
2598 | ||
f9431992 DT |
2599 | amd64_printk(KERN_WARNING, |
2600 | "'ecc_enable_override' parameter is active, " | |
2601 | "Enabling AMD ECC hardware now: CAUTION\n"); | |
2602 | ||
6ba5dcdc | 2603 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); |
f9431992 DT |
2604 | |
2605 | /* turn on UECCn and CECCEn bits */ | |
2606 | pvt->old_nbctl = value & mask; | |
2607 | pvt->nbctl_mcgctl_saved = 1; | |
2608 | ||
2609 | value |= mask; | |
2610 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | |
2611 | ||
f6d6ae96 BP |
2612 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) |
2613 | amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " | |
2614 | "MCGCTL!\n"); | |
f9431992 | 2615 | |
6ba5dcdc | 2616 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
f9431992 DT |
2617 | |
2618 | debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | |
2619 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2620 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | |
2621 | ||
2622 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | |
2623 | amd64_printk(KERN_WARNING, | |
2624 | "This node reports that DRAM ECC is " | |
2625 | "currently Disabled; ENABLING now\n"); | |
2626 | ||
2627 | /* Attempt to turn on DRAM ECC Enable */ | |
2628 | value |= K8_NBCFG_ECC_ENABLE; | |
2629 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | |
2630 | ||
6ba5dcdc | 2631 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
f9431992 DT |
2632 | |
2633 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | |
2634 | amd64_printk(KERN_WARNING, | |
2635 | "Hardware rejects Enabling DRAM ECC checking\n" | |
2636 | "Check memory DIMM configuration\n"); | |
2637 | } else { | |
2638 | amd64_printk(KERN_DEBUG, | |
2639 | "Hardware accepted DRAM ECC Enable\n"); | |
2640 | } | |
2641 | } | |
2642 | debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | |
2643 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2644 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | |
2645 | ||
2646 | pvt->ctl_error_info.nbcfg = value; | |
2647 | } | |
2648 | ||
2649 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | |
2650 | { | |
f6d6ae96 | 2651 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
f9431992 DT |
2652 | |
2653 | if (!pvt->nbctl_mcgctl_saved) | |
2654 | return; | |
2655 | ||
6ba5dcdc | 2656 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); |
f9431992 DT |
2657 | value &= ~mask; |
2658 | value |= pvt->old_nbctl; | |
2659 | ||
2660 | /* restore the NB Enable MCGCTL bit */ | |
2661 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | |
2662 | ||
f6d6ae96 BP |
2663 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) |
2664 | amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " | |
2665 | "MCGCTL!\n"); | |
f9431992 DT |
2666 | } |
2667 | ||
2668 | /* | |
2669 | * EDAC requires that the BIOS have ECC enabled before taking over the | |
2670 | * processing of ECC errors. This is because the BIOS can properly initialize | |
2671 | * the memory system completely. A command line option allows to force-enable | |
2672 | * hardware ECC later in amd64_enable_ecc_error_reporting(). | |
2673 | */ | |
be3468e8 BP |
2674 | static const char *ecc_warning = |
2675 | "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n" | |
2676 | " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n" | |
2677 | " Also, use of the override can cause unknown side effects.\n"; | |
2678 | ||
f9431992 DT |
2679 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) |
2680 | { | |
2681 | u32 value; | |
06724535 BP |
2682 | u8 ecc_enabled = 0; |
2683 | bool nb_mce_en = false; | |
f9431992 | 2684 | |
6ba5dcdc | 2685 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
f9431992 DT |
2686 | |
2687 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | |
be3468e8 BP |
2688 | if (!ecc_enabled) |
2689 | amd64_printk(KERN_WARNING, "This node reports that Memory ECC " | |
2690 | "is currently disabled, set F3x%x[22] (%s).\n", | |
2691 | K8_NBCFG, pci_name(pvt->misc_f3_ctl)); | |
2692 | else | |
2693 | amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n"); | |
f9431992 | 2694 | |
06724535 BP |
2695 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); |
2696 | if (!nb_mce_en) | |
be3468e8 BP |
2697 | amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR " |
2698 | "0x%08x[4] on node %d to enable.\n", | |
2699 | MSR_IA32_MCG_CTL, pvt->mc_node_id); | |
f9431992 | 2700 | |
06724535 | 2701 | if (!ecc_enabled || !nb_mce_en) { |
f9431992 | 2702 | if (!ecc_enable_override) { |
be3468e8 BP |
2703 | amd64_printk(KERN_WARNING, "%s", ecc_warning); |
2704 | return -ENODEV; | |
2705 | } | |
2706 | } else | |
f9431992 DT |
2707 | /* CLEAR the override, since BIOS controlled it */ |
2708 | ecc_enable_override = 0; | |
f9431992 | 2709 | |
be3468e8 | 2710 | return 0; |
f9431992 DT |
2711 | } |
2712 | ||
7d6034d3 DT |
2713 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + |
2714 | ARRAY_SIZE(amd64_inj_attrs) + | |
2715 | 1]; | |
2716 | ||
2717 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; | |
2718 | ||
2719 | static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) | |
2720 | { | |
2721 | unsigned int i = 0, j = 0; | |
2722 | ||
2723 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) | |
2724 | sysfs_attrs[i] = amd64_dbg_attrs[i]; | |
2725 | ||
2726 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) | |
2727 | sysfs_attrs[i] = amd64_inj_attrs[j]; | |
2728 | ||
2729 | sysfs_attrs[i] = terminator; | |
2730 | ||
2731 | mci->mc_driver_sysfs_attributes = sysfs_attrs; | |
2732 | } | |
2733 | ||
2734 | static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | |
2735 | { | |
2736 | struct amd64_pvt *pvt = mci->pvt_info; | |
2737 | ||
2738 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; | |
2739 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | |
7d6034d3 DT |
2740 | |
2741 | if (pvt->nbcap & K8_NBCAP_SECDED) | |
2742 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; | |
2743 | ||
2744 | if (pvt->nbcap & K8_NBCAP_CHIPKILL) | |
2745 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; | |
2746 | ||
2747 | mci->edac_cap = amd64_determine_edac_cap(pvt); | |
2748 | mci->mod_name = EDAC_MOD_STR; | |
2749 | mci->mod_ver = EDAC_AMD64_VERSION; | |
2750 | mci->ctl_name = get_amd_family_name(pvt->mc_type_index); | |
2751 | mci->dev_name = pci_name(pvt->dram_f2_ctl); | |
2752 | mci->ctl_page_to_phys = NULL; | |
2753 | ||
2754 | /* IMPORTANT: Set the polling 'check' function in this module */ | |
2755 | mci->edac_check = amd64_check; | |
2756 | ||
2757 | /* memory scrubber interface */ | |
2758 | mci->set_sdram_scrub_rate = amd64_set_scrub_rate; | |
2759 | mci->get_sdram_scrub_rate = amd64_get_scrub_rate; | |
2760 | } | |
2761 | ||
2762 | /* | |
2763 | * Init stuff for this DRAM Controller device. | |
2764 | * | |
2765 | * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration | |
2766 | * Space feature MUST be enabled on ALL Processors prior to actually reading | |
2767 | * from the ECS registers. Since the loading of the module can occur on any | |
2768 | * 'core', and cores don't 'see' all the other processors ECS data when the | |
2769 | * others are NOT enabled. Our solution is to first enable ECS access in this | |
2770 | * routine on all processors, gather some data in a amd64_pvt structure and | |
2771 | * later come back in a finish-setup function to perform that final | |
2772 | * initialization. See also amd64_init_2nd_stage() for that. | |
2773 | */ | |
2774 | static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, | |
2775 | int mc_type_index) | |
2776 | { | |
2777 | struct amd64_pvt *pvt = NULL; | |
2778 | int err = 0, ret; | |
2779 | ||
2780 | ret = -ENOMEM; | |
2781 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); | |
2782 | if (!pvt) | |
2783 | goto err_exit; | |
2784 | ||
37da0450 | 2785 | pvt->mc_node_id = get_node_id(dram_f2_ctl); |
7d6034d3 DT |
2786 | |
2787 | pvt->dram_f2_ctl = dram_f2_ctl; | |
2788 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | |
2789 | pvt->mc_type_index = mc_type_index; | |
2790 | pvt->ops = family_ops(mc_type_index); | |
7d6034d3 DT |
2791 | |
2792 | /* | |
2793 | * We have the dram_f2_ctl device as an argument, now go reserve its | |
2794 | * sibling devices from the PCI system. | |
2795 | */ | |
2796 | ret = -ENODEV; | |
2797 | err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index); | |
2798 | if (err) | |
2799 | goto err_free; | |
2800 | ||
2801 | ret = -EINVAL; | |
2802 | err = amd64_check_ecc_enabled(pvt); | |
2803 | if (err) | |
2804 | goto err_put; | |
2805 | ||
2806 | /* | |
2807 | * Key operation here: setup of HW prior to performing ops on it. Some | |
2808 | * setup is required to access ECS data. After this is performed, the | |
2809 | * 'teardown' function must be called upon error and normal exit paths. | |
2810 | */ | |
2811 | if (boot_cpu_data.x86 >= 0x10) | |
2812 | amd64_setup(pvt); | |
2813 | ||
2814 | /* | |
2815 | * Save the pointer to the private data for use in 2nd initialization | |
2816 | * stage | |
2817 | */ | |
2818 | pvt_lookup[pvt->mc_node_id] = pvt; | |
2819 | ||
2820 | return 0; | |
2821 | ||
2822 | err_put: | |
2823 | amd64_free_mc_sibling_devices(pvt); | |
2824 | ||
2825 | err_free: | |
2826 | kfree(pvt); | |
2827 | ||
2828 | err_exit: | |
2829 | return ret; | |
2830 | } | |
2831 | ||
2832 | /* | |
2833 | * This is the finishing stage of the init code. Needs to be performed after all | |
2834 | * MCs' hardware have been prepped for accessing extended config space. | |
2835 | */ | |
2836 | static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |
2837 | { | |
2838 | int node_id = pvt->mc_node_id; | |
2839 | struct mem_ctl_info *mci; | |
18ba54ac | 2840 | int ret = -ENODEV; |
7d6034d3 DT |
2841 | |
2842 | amd64_read_mc_registers(pvt); | |
2843 | ||
7d6034d3 DT |
2844 | /* |
2845 | * We need to determine how many memory channels there are. Then use | |
2846 | * that information for calculating the size of the dynamic instance | |
2847 | * tables in the 'mci' structure | |
2848 | */ | |
2849 | pvt->channel_count = pvt->ops->early_channel_count(pvt); | |
2850 | if (pvt->channel_count < 0) | |
2851 | goto err_exit; | |
2852 | ||
2853 | ret = -ENOMEM; | |
9d858bb1 | 2854 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); |
7d6034d3 DT |
2855 | if (!mci) |
2856 | goto err_exit; | |
2857 | ||
2858 | mci->pvt_info = pvt; | |
2859 | ||
2860 | mci->dev = &pvt->dram_f2_ctl->dev; | |
2861 | amd64_setup_mci_misc_attributes(mci); | |
2862 | ||
2863 | if (amd64_init_csrows(mci)) | |
2864 | mci->edac_cap = EDAC_FLAG_NONE; | |
2865 | ||
2866 | amd64_enable_ecc_error_reporting(mci); | |
2867 | amd64_set_mc_sysfs_attributes(mci); | |
2868 | ||
2869 | ret = -ENODEV; | |
2870 | if (edac_mc_add_mc(mci)) { | |
2871 | debugf1("failed edac_mc_add_mc()\n"); | |
2872 | goto err_add_mc; | |
2873 | } | |
2874 | ||
2875 | mci_lookup[node_id] = mci; | |
2876 | pvt_lookup[node_id] = NULL; | |
549d042d BP |
2877 | |
2878 | /* register stuff with EDAC MCE */ | |
2879 | if (report_gart_errors) | |
2880 | amd_report_gart_errors(true); | |
2881 | ||
2882 | amd_register_ecc_decoder(amd64_decode_bus_error); | |
2883 | ||
7d6034d3 DT |
2884 | return 0; |
2885 | ||
2886 | err_add_mc: | |
2887 | edac_mc_free(mci); | |
2888 | ||
2889 | err_exit: | |
2890 | debugf0("failure to init 2nd stage: ret=%d\n", ret); | |
2891 | ||
2892 | amd64_restore_ecc_error_reporting(pvt); | |
2893 | ||
2894 | if (boot_cpu_data.x86 > 0xf) | |
2895 | amd64_teardown(pvt); | |
2896 | ||
2897 | amd64_free_mc_sibling_devices(pvt); | |
2898 | ||
2899 | kfree(pvt_lookup[pvt->mc_node_id]); | |
2900 | pvt_lookup[node_id] = NULL; | |
2901 | ||
2902 | return ret; | |
2903 | } | |
2904 | ||
2905 | ||
2906 | static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | |
2907 | const struct pci_device_id *mc_type) | |
2908 | { | |
2909 | int ret = 0; | |
2910 | ||
37da0450 | 2911 | debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev), |
7d6034d3 DT |
2912 | get_amd_family_name(mc_type->driver_data)); |
2913 | ||
2914 | ret = pci_enable_device(pdev); | |
2915 | if (ret < 0) | |
2916 | ret = -EIO; | |
2917 | else | |
2918 | ret = amd64_probe_one_instance(pdev, mc_type->driver_data); | |
2919 | ||
2920 | if (ret < 0) | |
2921 | debugf0("ret=%d\n", ret); | |
2922 | ||
2923 | return ret; | |
2924 | } | |
2925 | ||
2926 | static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | |
2927 | { | |
2928 | struct mem_ctl_info *mci; | |
2929 | struct amd64_pvt *pvt; | |
2930 | ||
2931 | /* Remove from EDAC CORE tracking list */ | |
2932 | mci = edac_mc_del_mc(&pdev->dev); | |
2933 | if (!mci) | |
2934 | return; | |
2935 | ||
2936 | pvt = mci->pvt_info; | |
2937 | ||
2938 | amd64_restore_ecc_error_reporting(pvt); | |
2939 | ||
2940 | if (boot_cpu_data.x86 > 0xf) | |
2941 | amd64_teardown(pvt); | |
2942 | ||
2943 | amd64_free_mc_sibling_devices(pvt); | |
2944 | ||
2945 | kfree(pvt); | |
2946 | mci->pvt_info = NULL; | |
2947 | ||
2948 | mci_lookup[pvt->mc_node_id] = NULL; | |
2949 | ||
549d042d BP |
2950 | /* unregister from EDAC MCE */ |
2951 | amd_report_gart_errors(false); | |
2952 | amd_unregister_ecc_decoder(amd64_decode_bus_error); | |
2953 | ||
7d6034d3 DT |
2954 | /* Free the EDAC CORE resources */ |
2955 | edac_mc_free(mci); | |
2956 | } | |
2957 | ||
2958 | /* | |
2959 | * This table is part of the interface for loading drivers for PCI devices. The | |
2960 | * PCI core identifies what devices are on a system during boot, and then | |
2961 | * inquiry this table to see if this driver is for a given device found. | |
2962 | */ | |
2963 | static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |
2964 | { | |
2965 | .vendor = PCI_VENDOR_ID_AMD, | |
2966 | .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, | |
2967 | .subvendor = PCI_ANY_ID, | |
2968 | .subdevice = PCI_ANY_ID, | |
2969 | .class = 0, | |
2970 | .class_mask = 0, | |
2971 | .driver_data = K8_CPUS | |
2972 | }, | |
2973 | { | |
2974 | .vendor = PCI_VENDOR_ID_AMD, | |
2975 | .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM, | |
2976 | .subvendor = PCI_ANY_ID, | |
2977 | .subdevice = PCI_ANY_ID, | |
2978 | .class = 0, | |
2979 | .class_mask = 0, | |
2980 | .driver_data = F10_CPUS | |
2981 | }, | |
2982 | { | |
2983 | .vendor = PCI_VENDOR_ID_AMD, | |
2984 | .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM, | |
2985 | .subvendor = PCI_ANY_ID, | |
2986 | .subdevice = PCI_ANY_ID, | |
2987 | .class = 0, | |
2988 | .class_mask = 0, | |
2989 | .driver_data = F11_CPUS | |
2990 | }, | |
2991 | {0, } | |
2992 | }; | |
2993 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); | |
2994 | ||
2995 | static struct pci_driver amd64_pci_driver = { | |
2996 | .name = EDAC_MOD_STR, | |
2997 | .probe = amd64_init_one_instance, | |
2998 | .remove = __devexit_p(amd64_remove_one_instance), | |
2999 | .id_table = amd64_pci_table, | |
3000 | }; | |
3001 | ||
3002 | static void amd64_setup_pci_device(void) | |
3003 | { | |
3004 | struct mem_ctl_info *mci; | |
3005 | struct amd64_pvt *pvt; | |
3006 | ||
3007 | if (amd64_ctl_pci) | |
3008 | return; | |
3009 | ||
3010 | mci = mci_lookup[0]; | |
3011 | if (mci) { | |
3012 | ||
3013 | pvt = mci->pvt_info; | |
3014 | amd64_ctl_pci = | |
3015 | edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev, | |
3016 | EDAC_MOD_STR); | |
3017 | ||
3018 | if (!amd64_ctl_pci) { | |
3019 | pr_warning("%s(): Unable to create PCI control\n", | |
3020 | __func__); | |
3021 | ||
3022 | pr_warning("%s(): PCI error report via EDAC not set\n", | |
3023 | __func__); | |
3024 | } | |
3025 | } | |
3026 | } | |
3027 | ||
3028 | static int __init amd64_edac_init(void) | |
3029 | { | |
3030 | int nb, err = -ENODEV; | |
3031 | ||
3032 | edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); | |
3033 | ||
3034 | opstate_init(); | |
3035 | ||
3036 | if (cache_k8_northbridges() < 0) | |
a3c4c580 | 3037 | return err; |
7d6034d3 DT |
3038 | |
3039 | err = pci_register_driver(&amd64_pci_driver); | |
3040 | if (err) | |
3041 | return err; | |
3042 | ||
3043 | /* | |
3044 | * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd | |
3045 | * amd64_pvt structs. These will be used in the 2nd stage init function | |
3046 | * to finish initialization of the MC instances. | |
3047 | */ | |
3048 | for (nb = 0; nb < num_k8_northbridges; nb++) { | |
3049 | if (!pvt_lookup[nb]) | |
3050 | continue; | |
3051 | ||
3052 | err = amd64_init_2nd_stage(pvt_lookup[nb]); | |
3053 | if (err) | |
37da0450 | 3054 | goto err_2nd_stage; |
7d6034d3 DT |
3055 | } |
3056 | ||
3057 | amd64_setup_pci_device(); | |
3058 | ||
3059 | return 0; | |
3060 | ||
37da0450 BP |
3061 | err_2nd_stage: |
3062 | debugf0("2nd stage failed\n"); | |
7d6034d3 DT |
3063 | pci_unregister_driver(&amd64_pci_driver); |
3064 | ||
3065 | return err; | |
3066 | } | |
3067 | ||
3068 | static void __exit amd64_edac_exit(void) | |
3069 | { | |
3070 | if (amd64_ctl_pci) | |
3071 | edac_pci_release_generic_ctl(amd64_ctl_pci); | |
3072 | ||
3073 | pci_unregister_driver(&amd64_pci_driver); | |
3074 | } | |
3075 | ||
3076 | module_init(amd64_edac_init); | |
3077 | module_exit(amd64_edac_exit); | |
3078 | ||
3079 | MODULE_LICENSE("GPL"); | |
3080 | MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " | |
3081 | "Dave Peterson, Thayne Harbaugh"); | |
3082 | MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " | |
3083 | EDAC_AMD64_VERSION); | |
3084 | ||
3085 | module_param(edac_op_state, int, 0444); | |
3086 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |