amd64_edac: Don't pass driver name as an error parameter
[deliverable/linux.git] / drivers / edac / amd64_edac.c
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
5
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8
9 /*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15
16 static struct msr __percpu *msrs;
17
18 /*
19 * count successfully initialized driver instances for setup_pci_device()
20 */
21 static atomic_t drv_instances = ATOMIC_INIT(0);
22
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
26
27 /*
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30 * or higher value'.
31 *
32 *FIXME: Produce a better mapping/linearisation.
33 */
34 struct scrubrate {
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
37 } scrubrates[] = {
38 { 0x01, 1600000000UL},
39 { 0x02, 800000000UL},
40 { 0x03, 400000000UL},
41 { 0x04, 200000000UL},
42 { 0x05, 100000000UL},
43 { 0x06, 50000000UL},
44 { 0x07, 25000000UL},
45 { 0x08, 12284069UL},
46 { 0x09, 6274509UL},
47 { 0x0A, 3121951UL},
48 { 0x0B, 1560975UL},
49 { 0x0C, 781440UL},
50 { 0x0D, 390720UL},
51 { 0x0E, 195300UL},
52 { 0x0F, 97650UL},
53 { 0x10, 48854UL},
54 { 0x11, 24427UL},
55 { 0x12, 12213UL},
56 { 0x13, 6101UL},
57 { 0x14, 3051UL},
58 { 0x15, 1523UL},
59 { 0x16, 761UL},
60 { 0x00, 0UL}, /* scrubbing off */
61 };
62
63 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
65 {
66 int err = 0;
67
68 err = pci_read_config_dword(pdev, offset, val);
69 if (err)
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
72
73 return err;
74 }
75
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
78 {
79 int err = 0;
80
81 err = pci_write_config_dword(pdev, offset, val);
82 if (err)
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
85
86 return err;
87 }
88
89 /*
90 *
91 * Depending on the family, F2 DCT reads need special handling:
92 *
93 * K8: has a single DCT only
94 *
95 * F10h: each DCT has its own set of regs
96 * DCT0 -> F2x040..
97 * DCT1 -> F2x140..
98 *
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100 *
101 */
102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
103 const char *func)
104 {
105 if (addr >= 0x100)
106 return -EINVAL;
107
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
109 }
110
111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
112 const char *func)
113 {
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
115 }
116
117 /*
118 * Select DCT to which PCI cfg accesses are routed
119 */
120 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
121 {
122 u32 reg = 0;
123
124 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
125 reg &= 0xfffffffe;
126 reg |= dct;
127 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
128 }
129
130 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
131 const char *func)
132 {
133 u8 dct = 0;
134
135 if (addr >= 0x140 && addr <= 0x1a0) {
136 dct = 1;
137 addr -= 0x100;
138 }
139
140 f15h_select_dct(pvt, dct);
141
142 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
143 }
144
145 /*
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
149 * functionality.
150 *
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
154 *
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
157 */
158
159 /*
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
162 */
163 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
164 {
165 u32 scrubval;
166 int i;
167
168 /*
169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing.
173 */
174 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
175 /*
176 * skip scrub rates which aren't recommended
177 * (see F10 BKDG, F3x58)
178 */
179 if (scrubrates[i].scrubval < min_rate)
180 continue;
181
182 if (scrubrates[i].bandwidth <= new_bw)
183 break;
184
185 /*
186 * if no suitable bandwidth found, turn off DRAM scrubbing
187 * entirely by falling back to the last element in the
188 * scrubrates array.
189 */
190 }
191
192 scrubval = scrubrates[i].scrubval;
193
194 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
195
196 if (scrubval)
197 return scrubrates[i].bandwidth;
198
199 return 0;
200 }
201
202 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
203 {
204 struct amd64_pvt *pvt = mci->pvt_info;
205 u32 min_scrubrate = 0x5;
206
207 if (boot_cpu_data.x86 == 0xf)
208 min_scrubrate = 0x0;
209
210 /* F15h Erratum #505 */
211 if (boot_cpu_data.x86 == 0x15)
212 f15h_select_dct(pvt, 0);
213
214 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
215 }
216
217 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
218 {
219 struct amd64_pvt *pvt = mci->pvt_info;
220 u32 scrubval = 0;
221 int i, retval = -EINVAL;
222
223 /* F15h Erratum #505 */
224 if (boot_cpu_data.x86 == 0x15)
225 f15h_select_dct(pvt, 0);
226
227 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
228
229 scrubval = scrubval & 0x001F;
230
231 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
232 if (scrubrates[i].scrubval == scrubval) {
233 retval = scrubrates[i].bandwidth;
234 break;
235 }
236 }
237 return retval;
238 }
239
240 /*
241 * returns true if the SysAddr given by sys_addr matches the
242 * DRAM base/limit associated with node_id
243 */
244 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
245 unsigned nid)
246 {
247 u64 addr;
248
249 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
250 * all ones if the most significant implemented address bit is 1.
251 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
252 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
253 * Application Programming.
254 */
255 addr = sys_addr & 0x000000ffffffffffull;
256
257 return ((addr >= get_dram_base(pvt, nid)) &&
258 (addr <= get_dram_limit(pvt, nid)));
259 }
260
261 /*
262 * Attempt to map a SysAddr to a node. On success, return a pointer to the
263 * mem_ctl_info structure for the node that the SysAddr maps to.
264 *
265 * On failure, return NULL.
266 */
267 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
268 u64 sys_addr)
269 {
270 struct amd64_pvt *pvt;
271 unsigned node_id;
272 u32 intlv_en, bits;
273
274 /*
275 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
276 * 3.4.4.2) registers to map the SysAddr to a node ID.
277 */
278 pvt = mci->pvt_info;
279
280 /*
281 * The value of this field should be the same for all DRAM Base
282 * registers. Therefore we arbitrarily choose to read it from the
283 * register for node 0.
284 */
285 intlv_en = dram_intlv_en(pvt, 0);
286
287 if (intlv_en == 0) {
288 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
289 if (amd64_base_limit_match(pvt, sys_addr, node_id))
290 goto found;
291 }
292 goto err_no_match;
293 }
294
295 if (unlikely((intlv_en != 0x01) &&
296 (intlv_en != 0x03) &&
297 (intlv_en != 0x07))) {
298 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
299 return NULL;
300 }
301
302 bits = (((u32) sys_addr) >> 12) & intlv_en;
303
304 for (node_id = 0; ; ) {
305 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
306 break; /* intlv_sel field matches */
307
308 if (++node_id >= DRAM_RANGES)
309 goto err_no_match;
310 }
311
312 /* sanity test for sys_addr */
313 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
314 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
315 "range for node %d with node interleaving enabled.\n",
316 __func__, sys_addr, node_id);
317 return NULL;
318 }
319
320 found:
321 return edac_mc_find((int)node_id);
322
323 err_no_match:
324 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
325 (unsigned long)sys_addr);
326
327 return NULL;
328 }
329
330 /*
331 * compute the CS base address of the @csrow on the DRAM controller @dct.
332 * For details see F2x[5C:40] in the processor's BKDG
333 */
334 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
335 u64 *base, u64 *mask)
336 {
337 u64 csbase, csmask, base_bits, mask_bits;
338 u8 addr_shift;
339
340 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
341 csbase = pvt->csels[dct].csbases[csrow];
342 csmask = pvt->csels[dct].csmasks[csrow];
343 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
344 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
345 addr_shift = 4;
346 } else {
347 csbase = pvt->csels[dct].csbases[csrow];
348 csmask = pvt->csels[dct].csmasks[csrow >> 1];
349 addr_shift = 8;
350
351 if (boot_cpu_data.x86 == 0x15)
352 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
353 else
354 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
355 }
356
357 *base = (csbase & base_bits) << addr_shift;
358
359 *mask = ~0ULL;
360 /* poke holes for the csmask */
361 *mask &= ~(mask_bits << addr_shift);
362 /* OR them in */
363 *mask |= (csmask & mask_bits) << addr_shift;
364 }
365
366 #define for_each_chip_select(i, dct, pvt) \
367 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
368
369 #define chip_select_base(i, dct, pvt) \
370 pvt->csels[dct].csbases[i]
371
372 #define for_each_chip_select_mask(i, dct, pvt) \
373 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
374
375 /*
376 * @input_addr is an InputAddr associated with the node given by mci. Return the
377 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
378 */
379 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
380 {
381 struct amd64_pvt *pvt;
382 int csrow;
383 u64 base, mask;
384
385 pvt = mci->pvt_info;
386
387 for_each_chip_select(csrow, 0, pvt) {
388 if (!csrow_enabled(csrow, 0, pvt))
389 continue;
390
391 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
392
393 mask = ~mask;
394
395 if ((input_addr & mask) == (base & mask)) {
396 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
397 (unsigned long)input_addr, csrow,
398 pvt->mc_node_id);
399
400 return csrow;
401 }
402 }
403 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404 (unsigned long)input_addr, pvt->mc_node_id);
405
406 return -1;
407 }
408
409 /*
410 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
411 * for the node represented by mci. Info is passed back in *hole_base,
412 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
413 * info is invalid. Info may be invalid for either of the following reasons:
414 *
415 * - The revision of the node is not E or greater. In this case, the DRAM Hole
416 * Address Register does not exist.
417 *
418 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
419 * indicating that its contents are not valid.
420 *
421 * The values passed back in *hole_base, *hole_offset, and *hole_size are
422 * complete 32-bit values despite the fact that the bitfields in the DHAR
423 * only represent bits 31-24 of the base and offset values.
424 */
425 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
426 u64 *hole_offset, u64 *hole_size)
427 {
428 struct amd64_pvt *pvt = mci->pvt_info;
429 u64 base;
430
431 /* only revE and later have the DRAM Hole Address Register */
432 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
433 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
434 pvt->ext_model, pvt->mc_node_id);
435 return 1;
436 }
437
438 /* valid for Fam10h and above */
439 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
440 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
441 return 1;
442 }
443
444 if (!dhar_valid(pvt)) {
445 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
446 pvt->mc_node_id);
447 return 1;
448 }
449
450 /* This node has Memory Hoisting */
451
452 /* +------------------+--------------------+--------------------+-----
453 * | memory | DRAM hole | relocated |
454 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
455 * | | | DRAM hole |
456 * | | | [0x100000000, |
457 * | | | (0x100000000+ |
458 * | | | (0xffffffff-x))] |
459 * +------------------+--------------------+--------------------+-----
460 *
461 * Above is a diagram of physical memory showing the DRAM hole and the
462 * relocated addresses from the DRAM hole. As shown, the DRAM hole
463 * starts at address x (the base address) and extends through address
464 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
465 * addresses in the hole so that they start at 0x100000000.
466 */
467
468 base = dhar_base(pvt);
469
470 *hole_base = base;
471 *hole_size = (0x1ull << 32) - base;
472
473 if (boot_cpu_data.x86 > 0xf)
474 *hole_offset = f10_dhar_offset(pvt);
475 else
476 *hole_offset = k8_dhar_offset(pvt);
477
478 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479 pvt->mc_node_id, (unsigned long)*hole_base,
480 (unsigned long)*hole_offset, (unsigned long)*hole_size);
481
482 return 0;
483 }
484 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
485
486 /*
487 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
488 * assumed that sys_addr maps to the node given by mci.
489 *
490 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
491 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
492 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
493 * then it is also involved in translating a SysAddr to a DramAddr. Sections
494 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
495 * These parts of the documentation are unclear. I interpret them as follows:
496 *
497 * When node n receives a SysAddr, it processes the SysAddr as follows:
498 *
499 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
500 * Limit registers for node n. If the SysAddr is not within the range
501 * specified by the base and limit values, then node n ignores the Sysaddr
502 * (since it does not map to node n). Otherwise continue to step 2 below.
503 *
504 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
505 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
506 * the range of relocated addresses (starting at 0x100000000) from the DRAM
507 * hole. If not, skip to step 3 below. Else get the value of the
508 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
509 * offset defined by this value from the SysAddr.
510 *
511 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
512 * Base register for node n. To obtain the DramAddr, subtract the base
513 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
514 */
515 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
516 {
517 struct amd64_pvt *pvt = mci->pvt_info;
518 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
519 int ret = 0;
520
521 dram_base = get_dram_base(pvt, pvt->mc_node_id);
522
523 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
524 &hole_size);
525 if (!ret) {
526 if ((sys_addr >= (1ull << 32)) &&
527 (sys_addr < ((1ull << 32) + hole_size))) {
528 /* use DHAR to translate SysAddr to DramAddr */
529 dram_addr = sys_addr - hole_offset;
530
531 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
532 (unsigned long)sys_addr,
533 (unsigned long)dram_addr);
534
535 return dram_addr;
536 }
537 }
538
539 /*
540 * Translate the SysAddr to a DramAddr as shown near the start of
541 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
542 * only deals with 40-bit values. Therefore we discard bits 63-40 of
543 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
544 * discard are all 1s. Otherwise the bits we discard are all 0s. See
545 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
546 * Programmer's Manual Volume 1 Application Programming.
547 */
548 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
549
550 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
551 (unsigned long)sys_addr, (unsigned long)dram_addr);
552 return dram_addr;
553 }
554
555 /*
556 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
557 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
558 * for node interleaving.
559 */
560 static int num_node_interleave_bits(unsigned intlv_en)
561 {
562 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
563 int n;
564
565 BUG_ON(intlv_en > 7);
566 n = intlv_shift_table[intlv_en];
567 return n;
568 }
569
570 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
571 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
572 {
573 struct amd64_pvt *pvt;
574 int intlv_shift;
575 u64 input_addr;
576
577 pvt = mci->pvt_info;
578
579 /*
580 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
581 * concerning translating a DramAddr to an InputAddr.
582 */
583 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
584 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
585 (dram_addr & 0xfff);
586
587 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
588 intlv_shift, (unsigned long)dram_addr,
589 (unsigned long)input_addr);
590
591 return input_addr;
592 }
593
594 /*
595 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
596 * assumed that @sys_addr maps to the node given by mci.
597 */
598 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
599 {
600 u64 input_addr;
601
602 input_addr =
603 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
604
605 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
606 (unsigned long)sys_addr, (unsigned long)input_addr);
607
608 return input_addr;
609 }
610
611
612 /*
613 * @input_addr is an InputAddr associated with the node represented by mci.
614 * Translate @input_addr to a DramAddr and return the result.
615 */
616 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
617 {
618 struct amd64_pvt *pvt;
619 unsigned node_id, intlv_shift;
620 u64 bits, dram_addr;
621 u32 intlv_sel;
622
623 /*
624 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
625 * shows how to translate a DramAddr to an InputAddr. Here we reverse
626 * this procedure. When translating from a DramAddr to an InputAddr, the
627 * bits used for node interleaving are discarded. Here we recover these
628 * bits from the IntlvSel field of the DRAM Limit register (section
629 * 3.4.4.2) for the node that input_addr is associated with.
630 */
631 pvt = mci->pvt_info;
632 node_id = pvt->mc_node_id;
633
634 BUG_ON(node_id > 7);
635
636 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
637 if (intlv_shift == 0) {
638 edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
639 (unsigned long)input_addr);
640
641 return input_addr;
642 }
643
644 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
645 (input_addr & 0xfff);
646
647 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
648 dram_addr = bits + (intlv_sel << 12);
649
650 edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
651 (unsigned long)input_addr,
652 (unsigned long)dram_addr, intlv_shift);
653
654 return dram_addr;
655 }
656
657 /*
658 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
659 * @dram_addr to a SysAddr.
660 */
661 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
662 {
663 struct amd64_pvt *pvt = mci->pvt_info;
664 u64 hole_base, hole_offset, hole_size, base, sys_addr;
665 int ret = 0;
666
667 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
668 &hole_size);
669 if (!ret) {
670 if ((dram_addr >= hole_base) &&
671 (dram_addr < (hole_base + hole_size))) {
672 sys_addr = dram_addr + hole_offset;
673
674 edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
675 (unsigned long)dram_addr,
676 (unsigned long)sys_addr);
677
678 return sys_addr;
679 }
680 }
681
682 base = get_dram_base(pvt, pvt->mc_node_id);
683 sys_addr = dram_addr + base;
684
685 /*
686 * The sys_addr we have computed up to this point is a 40-bit value
687 * because the k8 deals with 40-bit values. However, the value we are
688 * supposed to return is a full 64-bit physical address. The AMD
689 * x86-64 architecture specifies that the most significant implemented
690 * address bit through bit 63 of a physical address must be either all
691 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
692 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
693 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
694 * Programming.
695 */
696 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
697
698 edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
699 pvt->mc_node_id, (unsigned long)dram_addr,
700 (unsigned long)sys_addr);
701
702 return sys_addr;
703 }
704
705 /*
706 * @input_addr is an InputAddr associated with the node given by mci. Translate
707 * @input_addr to a SysAddr.
708 */
709 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
710 u64 input_addr)
711 {
712 return dram_addr_to_sys_addr(mci,
713 input_addr_to_dram_addr(mci, input_addr));
714 }
715
716 /* Map the Error address to a PAGE and PAGE OFFSET. */
717 static inline void error_address_to_page_and_offset(u64 error_address,
718 u32 *page, u32 *offset)
719 {
720 *page = (u32) (error_address >> PAGE_SHIFT);
721 *offset = ((u32) error_address) & ~PAGE_MASK;
722 }
723
724 /*
725 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
726 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
727 * of a node that detected an ECC memory error. mci represents the node that
728 * the error address maps to (possibly different from the node that detected
729 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
730 * error.
731 */
732 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
733 {
734 int csrow;
735
736 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
737
738 if (csrow == -1)
739 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
740 "address 0x%lx\n", (unsigned long)sys_addr);
741 return csrow;
742 }
743
744 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
745
746 /*
747 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
748 * are ECC capable.
749 */
750 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
751 {
752 u8 bit;
753 unsigned long edac_cap = EDAC_FLAG_NONE;
754
755 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
756 ? 19
757 : 17;
758
759 if (pvt->dclr0 & BIT(bit))
760 edac_cap = EDAC_FLAG_SECDED;
761
762 return edac_cap;
763 }
764
765 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
766
767 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
768 {
769 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
770
771 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
772 (dclr & BIT(16)) ? "un" : "",
773 (dclr & BIT(19)) ? "yes" : "no");
774
775 edac_dbg(1, " PAR/ERR parity: %s\n",
776 (dclr & BIT(8)) ? "enabled" : "disabled");
777
778 if (boot_cpu_data.x86 == 0x10)
779 edac_dbg(1, " DCT 128bit mode width: %s\n",
780 (dclr & BIT(11)) ? "128b" : "64b");
781
782 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
783 (dclr & BIT(12)) ? "yes" : "no",
784 (dclr & BIT(13)) ? "yes" : "no",
785 (dclr & BIT(14)) ? "yes" : "no",
786 (dclr & BIT(15)) ? "yes" : "no");
787 }
788
789 /* Display and decode various NB registers for debug purposes. */
790 static void dump_misc_regs(struct amd64_pvt *pvt)
791 {
792 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
793
794 edac_dbg(1, " NB two channel DRAM capable: %s\n",
795 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
796
797 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
798 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
799 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
800
801 amd64_dump_dramcfg_low(pvt->dclr0, 0);
802
803 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
804
805 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
806 pvt->dhar, dhar_base(pvt),
807 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
808 : f10_dhar_offset(pvt));
809
810 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
811
812 amd64_debug_display_dimm_sizes(pvt, 0);
813
814 /* everything below this point is Fam10h and above */
815 if (boot_cpu_data.x86 == 0xf)
816 return;
817
818 amd64_debug_display_dimm_sizes(pvt, 1);
819
820 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
821
822 /* Only if NOT ganged does dclr1 have valid info */
823 if (!dct_ganging_enabled(pvt))
824 amd64_dump_dramcfg_low(pvt->dclr1, 1);
825 }
826
827 /*
828 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
829 */
830 static void prep_chip_selects(struct amd64_pvt *pvt)
831 {
832 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
833 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
834 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
835 } else {
836 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
837 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
838 }
839 }
840
841 /*
842 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
843 */
844 static void read_dct_base_mask(struct amd64_pvt *pvt)
845 {
846 int cs;
847
848 prep_chip_selects(pvt);
849
850 for_each_chip_select(cs, 0, pvt) {
851 int reg0 = DCSB0 + (cs * 4);
852 int reg1 = DCSB1 + (cs * 4);
853 u32 *base0 = &pvt->csels[0].csbases[cs];
854 u32 *base1 = &pvt->csels[1].csbases[cs];
855
856 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
857 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
858 cs, *base0, reg0);
859
860 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
861 continue;
862
863 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
864 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
865 cs, *base1, reg1);
866 }
867
868 for_each_chip_select_mask(cs, 0, pvt) {
869 int reg0 = DCSM0 + (cs * 4);
870 int reg1 = DCSM1 + (cs * 4);
871 u32 *mask0 = &pvt->csels[0].csmasks[cs];
872 u32 *mask1 = &pvt->csels[1].csmasks[cs];
873
874 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
875 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
876 cs, *mask0, reg0);
877
878 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
879 continue;
880
881 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
882 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
883 cs, *mask1, reg1);
884 }
885 }
886
887 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
888 {
889 enum mem_type type;
890
891 /* F15h supports only DDR3 */
892 if (boot_cpu_data.x86 >= 0x15)
893 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
894 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
895 if (pvt->dchr0 & DDR3_MODE)
896 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
897 else
898 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
899 } else {
900 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
901 }
902
903 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
904
905 return type;
906 }
907
908 /* Get the number of DCT channels the memory controller is using. */
909 static int k8_early_channel_count(struct amd64_pvt *pvt)
910 {
911 int flag;
912
913 if (pvt->ext_model >= K8_REV_F)
914 /* RevF (NPT) and later */
915 flag = pvt->dclr0 & WIDTH_128;
916 else
917 /* RevE and earlier */
918 flag = pvt->dclr0 & REVE_WIDTH_128;
919
920 /* not used */
921 pvt->dclr1 = 0;
922
923 return (flag) ? 2 : 1;
924 }
925
926 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
927 static u64 get_error_address(struct mce *m)
928 {
929 struct cpuinfo_x86 *c = &boot_cpu_data;
930 u64 addr;
931 u8 start_bit = 1;
932 u8 end_bit = 47;
933
934 if (c->x86 == 0xf) {
935 start_bit = 3;
936 end_bit = 39;
937 }
938
939 addr = m->addr & GENMASK(start_bit, end_bit);
940
941 /*
942 * Erratum 637 workaround
943 */
944 if (c->x86 == 0x15) {
945 struct amd64_pvt *pvt;
946 u64 cc6_base, tmp_addr;
947 u32 tmp;
948 u8 mce_nid, intlv_en;
949
950 if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
951 return addr;
952
953 mce_nid = amd_get_nb_id(m->extcpu);
954 pvt = mcis[mce_nid]->pvt_info;
955
956 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
957 intlv_en = tmp >> 21 & 0x7;
958
959 /* add [47:27] + 3 trailing bits */
960 cc6_base = (tmp & GENMASK(0, 20)) << 3;
961
962 /* reverse and add DramIntlvEn */
963 cc6_base |= intlv_en ^ 0x7;
964
965 /* pin at [47:24] */
966 cc6_base <<= 24;
967
968 if (!intlv_en)
969 return cc6_base | (addr & GENMASK(0, 23));
970
971 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
972
973 /* faster log2 */
974 tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
975
976 /* OR DramIntlvSel into bits [14:12] */
977 tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
978
979 /* add remaining [11:0] bits from original MC4_ADDR */
980 tmp_addr |= addr & GENMASK(0, 11);
981
982 return cc6_base | tmp_addr;
983 }
984
985 return addr;
986 }
987
988 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
989 {
990 struct cpuinfo_x86 *c = &boot_cpu_data;
991 int off = range << 3;
992
993 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
994 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
995
996 if (c->x86 == 0xf)
997 return;
998
999 if (!dram_rw(pvt, range))
1000 return;
1001
1002 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1003 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1004
1005 /* Factor in CC6 save area by reading dst node's limit reg */
1006 if (c->x86 == 0x15) {
1007 struct pci_dev *f1 = NULL;
1008 u8 nid = dram_dst_node(pvt, range);
1009 u32 llim;
1010
1011 f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1012 if (WARN_ON(!f1))
1013 return;
1014
1015 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1016
1017 pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1018
1019 /* {[39:27],111b} */
1020 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1021
1022 pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1023
1024 /* [47:40] */
1025 pvt->ranges[range].lim.hi |= llim >> 13;
1026
1027 pci_dev_put(f1);
1028 }
1029 }
1030
1031 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1032 u16 syndrome)
1033 {
1034 struct mem_ctl_info *src_mci;
1035 struct amd64_pvt *pvt = mci->pvt_info;
1036 int channel, csrow;
1037 u32 page, offset;
1038
1039 error_address_to_page_and_offset(sys_addr, &page, &offset);
1040
1041 /*
1042 * Find out which node the error address belongs to. This may be
1043 * different from the node that detected the error.
1044 */
1045 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1046 if (!src_mci) {
1047 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1048 (unsigned long)sys_addr);
1049 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1050 page, offset, syndrome,
1051 -1, -1, -1,
1052 "failed to map error addr to a node",
1053 "",
1054 NULL);
1055 return;
1056 }
1057
1058 /* Now map the sys_addr to a CSROW */
1059 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1060 if (csrow < 0) {
1061 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1062 page, offset, syndrome,
1063 -1, -1, -1,
1064 "failed to map error addr to a csrow",
1065 "",
1066 NULL);
1067 return;
1068 }
1069
1070 /* CHIPKILL enabled */
1071 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1072 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1073 if (channel < 0) {
1074 /*
1075 * Syndrome didn't map, so we don't know which of the
1076 * 2 DIMMs is in error. So we need to ID 'both' of them
1077 * as suspect.
1078 */
1079 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1080 "possible error reporting race\n",
1081 syndrome);
1082 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1083 page, offset, syndrome,
1084 csrow, -1, -1,
1085 "unknown syndrome - possible error reporting race",
1086 "",
1087 NULL);
1088 return;
1089 }
1090 } else {
1091 /*
1092 * non-chipkill ecc mode
1093 *
1094 * The k8 documentation is unclear about how to determine the
1095 * channel number when using non-chipkill memory. This method
1096 * was obtained from email communication with someone at AMD.
1097 * (Wish the email was placed in this comment - norsk)
1098 */
1099 channel = ((sys_addr & BIT(3)) != 0);
1100 }
1101
1102 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
1103 page, offset, syndrome,
1104 csrow, channel, -1,
1105 "", "", NULL);
1106 }
1107
1108 static int ddr2_cs_size(unsigned i, bool dct_width)
1109 {
1110 unsigned shift = 0;
1111
1112 if (i <= 2)
1113 shift = i;
1114 else if (!(i & 0x1))
1115 shift = i >> 1;
1116 else
1117 shift = (i + 1) >> 1;
1118
1119 return 128 << (shift + !!dct_width);
1120 }
1121
1122 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1123 unsigned cs_mode)
1124 {
1125 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1126
1127 if (pvt->ext_model >= K8_REV_F) {
1128 WARN_ON(cs_mode > 11);
1129 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1130 }
1131 else if (pvt->ext_model >= K8_REV_D) {
1132 unsigned diff;
1133 WARN_ON(cs_mode > 10);
1134
1135 /*
1136 * the below calculation, besides trying to win an obfuscated C
1137 * contest, maps cs_mode values to DIMM chip select sizes. The
1138 * mappings are:
1139 *
1140 * cs_mode CS size (mb)
1141 * ======= ============
1142 * 0 32
1143 * 1 64
1144 * 2 128
1145 * 3 128
1146 * 4 256
1147 * 5 512
1148 * 6 256
1149 * 7 512
1150 * 8 1024
1151 * 9 1024
1152 * 10 2048
1153 *
1154 * Basically, it calculates a value with which to shift the
1155 * smallest CS size of 32MB.
1156 *
1157 * ddr[23]_cs_size have a similar purpose.
1158 */
1159 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1160
1161 return 32 << (cs_mode - diff);
1162 }
1163 else {
1164 WARN_ON(cs_mode > 6);
1165 return 32 << cs_mode;
1166 }
1167 }
1168
1169 /*
1170 * Get the number of DCT channels in use.
1171 *
1172 * Return:
1173 * number of Memory Channels in operation
1174 * Pass back:
1175 * contents of the DCL0_LOW register
1176 */
1177 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1178 {
1179 int i, j, channels = 0;
1180
1181 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1182 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1183 return 2;
1184
1185 /*
1186 * Need to check if in unganged mode: In such, there are 2 channels,
1187 * but they are not in 128 bit mode and thus the above 'dclr0' status
1188 * bit will be OFF.
1189 *
1190 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1191 * their CSEnable bit on. If so, then SINGLE DIMM case.
1192 */
1193 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1194
1195 /*
1196 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1197 * is more than just one DIMM present in unganged mode. Need to check
1198 * both controllers since DIMMs can be placed in either one.
1199 */
1200 for (i = 0; i < 2; i++) {
1201 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1202
1203 for (j = 0; j < 4; j++) {
1204 if (DBAM_DIMM(j, dbam) > 0) {
1205 channels++;
1206 break;
1207 }
1208 }
1209 }
1210
1211 if (channels > 2)
1212 channels = 2;
1213
1214 amd64_info("MCT channel count: %d\n", channels);
1215
1216 return channels;
1217 }
1218
1219 static int ddr3_cs_size(unsigned i, bool dct_width)
1220 {
1221 unsigned shift = 0;
1222 int cs_size = 0;
1223
1224 if (i == 0 || i == 3 || i == 4)
1225 cs_size = -1;
1226 else if (i <= 2)
1227 shift = i;
1228 else if (i == 12)
1229 shift = 7;
1230 else if (!(i & 0x1))
1231 shift = i >> 1;
1232 else
1233 shift = (i + 1) >> 1;
1234
1235 if (cs_size != -1)
1236 cs_size = (128 * (1 << !!dct_width)) << shift;
1237
1238 return cs_size;
1239 }
1240
1241 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1242 unsigned cs_mode)
1243 {
1244 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1245
1246 WARN_ON(cs_mode > 11);
1247
1248 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1249 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1250 else
1251 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1252 }
1253
1254 /*
1255 * F15h supports only 64bit DCT interfaces
1256 */
1257 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1258 unsigned cs_mode)
1259 {
1260 WARN_ON(cs_mode > 12);
1261
1262 return ddr3_cs_size(cs_mode, false);
1263 }
1264
1265 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1266 {
1267
1268 if (boot_cpu_data.x86 == 0xf)
1269 return;
1270
1271 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1272 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1273 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1274
1275 edac_dbg(0, " DCTs operate in %s mode\n",
1276 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1277
1278 if (!dct_ganging_enabled(pvt))
1279 edac_dbg(0, " Address range split per DCT: %s\n",
1280 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1281
1282 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1283 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1284 (dct_memory_cleared(pvt) ? "yes" : "no"));
1285
1286 edac_dbg(0, " channel interleave: %s, "
1287 "interleave bits selector: 0x%x\n",
1288 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1289 dct_sel_interleave_addr(pvt));
1290 }
1291
1292 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1293 }
1294
1295 /*
1296 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1297 * Interleaving Modes.
1298 */
1299 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1300 bool hi_range_sel, u8 intlv_en)
1301 {
1302 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1303
1304 if (dct_ganging_enabled(pvt))
1305 return 0;
1306
1307 if (hi_range_sel)
1308 return dct_sel_high;
1309
1310 /*
1311 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1312 */
1313 if (dct_interleave_enabled(pvt)) {
1314 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1315
1316 /* return DCT select function: 0=DCT0, 1=DCT1 */
1317 if (!intlv_addr)
1318 return sys_addr >> 6 & 1;
1319
1320 if (intlv_addr & 0x2) {
1321 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1322 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1323
1324 return ((sys_addr >> shift) & 1) ^ temp;
1325 }
1326
1327 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1328 }
1329
1330 if (dct_high_range_enabled(pvt))
1331 return ~dct_sel_high & 1;
1332
1333 return 0;
1334 }
1335
1336 /* Convert the sys_addr to the normalized DCT address */
1337 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1338 u64 sys_addr, bool hi_rng,
1339 u32 dct_sel_base_addr)
1340 {
1341 u64 chan_off;
1342 u64 dram_base = get_dram_base(pvt, range);
1343 u64 hole_off = f10_dhar_offset(pvt);
1344 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1345
1346 if (hi_rng) {
1347 /*
1348 * if
1349 * base address of high range is below 4Gb
1350 * (bits [47:27] at [31:11])
1351 * DRAM address space on this DCT is hoisted above 4Gb &&
1352 * sys_addr > 4Gb
1353 *
1354 * remove hole offset from sys_addr
1355 * else
1356 * remove high range offset from sys_addr
1357 */
1358 if ((!(dct_sel_base_addr >> 16) ||
1359 dct_sel_base_addr < dhar_base(pvt)) &&
1360 dhar_valid(pvt) &&
1361 (sys_addr >= BIT_64(32)))
1362 chan_off = hole_off;
1363 else
1364 chan_off = dct_sel_base_off;
1365 } else {
1366 /*
1367 * if
1368 * we have a valid hole &&
1369 * sys_addr > 4Gb
1370 *
1371 * remove hole
1372 * else
1373 * remove dram base to normalize to DCT address
1374 */
1375 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1376 chan_off = hole_off;
1377 else
1378 chan_off = dram_base;
1379 }
1380
1381 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1382 }
1383
1384 /*
1385 * checks if the csrow passed in is marked as SPARED, if so returns the new
1386 * spare row
1387 */
1388 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1389 {
1390 int tmp_cs;
1391
1392 if (online_spare_swap_done(pvt, dct) &&
1393 csrow == online_spare_bad_dramcs(pvt, dct)) {
1394
1395 for_each_chip_select(tmp_cs, dct, pvt) {
1396 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1397 csrow = tmp_cs;
1398 break;
1399 }
1400 }
1401 }
1402 return csrow;
1403 }
1404
1405 /*
1406 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1407 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1408 *
1409 * Return:
1410 * -EINVAL: NOT FOUND
1411 * 0..csrow = Chip-Select Row
1412 */
1413 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1414 {
1415 struct mem_ctl_info *mci;
1416 struct amd64_pvt *pvt;
1417 u64 cs_base, cs_mask;
1418 int cs_found = -EINVAL;
1419 int csrow;
1420
1421 mci = mcis[nid];
1422 if (!mci)
1423 return cs_found;
1424
1425 pvt = mci->pvt_info;
1426
1427 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1428
1429 for_each_chip_select(csrow, dct, pvt) {
1430 if (!csrow_enabled(csrow, dct, pvt))
1431 continue;
1432
1433 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1434
1435 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1436 csrow, cs_base, cs_mask);
1437
1438 cs_mask = ~cs_mask;
1439
1440 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1441 (in_addr & cs_mask), (cs_base & cs_mask));
1442
1443 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1444 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1445
1446 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1447 break;
1448 }
1449 }
1450 return cs_found;
1451 }
1452
1453 /*
1454 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1455 * swapped with a region located at the bottom of memory so that the GPU can use
1456 * the interleaved region and thus two channels.
1457 */
1458 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1459 {
1460 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1461
1462 if (boot_cpu_data.x86 == 0x10) {
1463 /* only revC3 and revE have that feature */
1464 if (boot_cpu_data.x86_model < 4 ||
1465 (boot_cpu_data.x86_model < 0xa &&
1466 boot_cpu_data.x86_mask < 3))
1467 return sys_addr;
1468 }
1469
1470 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1471
1472 if (!(swap_reg & 0x1))
1473 return sys_addr;
1474
1475 swap_base = (swap_reg >> 3) & 0x7f;
1476 swap_limit = (swap_reg >> 11) & 0x7f;
1477 rgn_size = (swap_reg >> 20) & 0x7f;
1478 tmp_addr = sys_addr >> 27;
1479
1480 if (!(sys_addr >> 34) &&
1481 (((tmp_addr >= swap_base) &&
1482 (tmp_addr <= swap_limit)) ||
1483 (tmp_addr < rgn_size)))
1484 return sys_addr ^ (u64)swap_base << 27;
1485
1486 return sys_addr;
1487 }
1488
1489 /* For a given @dram_range, check if @sys_addr falls within it. */
1490 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1491 u64 sys_addr, int *nid, int *chan_sel)
1492 {
1493 int cs_found = -EINVAL;
1494 u64 chan_addr;
1495 u32 dct_sel_base;
1496 u8 channel;
1497 bool high_range = false;
1498
1499 u8 node_id = dram_dst_node(pvt, range);
1500 u8 intlv_en = dram_intlv_en(pvt, range);
1501 u32 intlv_sel = dram_intlv_sel(pvt, range);
1502
1503 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1504 range, sys_addr, get_dram_limit(pvt, range));
1505
1506 if (dhar_valid(pvt) &&
1507 dhar_base(pvt) <= sys_addr &&
1508 sys_addr < BIT_64(32)) {
1509 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1510 sys_addr);
1511 return -EINVAL;
1512 }
1513
1514 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1515 return -EINVAL;
1516
1517 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1518
1519 dct_sel_base = dct_sel_baseaddr(pvt);
1520
1521 /*
1522 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1523 * select between DCT0 and DCT1.
1524 */
1525 if (dct_high_range_enabled(pvt) &&
1526 !dct_ganging_enabled(pvt) &&
1527 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1528 high_range = true;
1529
1530 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1531
1532 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1533 high_range, dct_sel_base);
1534
1535 /* Remove node interleaving, see F1x120 */
1536 if (intlv_en)
1537 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1538 (chan_addr & 0xfff);
1539
1540 /* remove channel interleave */
1541 if (dct_interleave_enabled(pvt) &&
1542 !dct_high_range_enabled(pvt) &&
1543 !dct_ganging_enabled(pvt)) {
1544
1545 if (dct_sel_interleave_addr(pvt) != 1) {
1546 if (dct_sel_interleave_addr(pvt) == 0x3)
1547 /* hash 9 */
1548 chan_addr = ((chan_addr >> 10) << 9) |
1549 (chan_addr & 0x1ff);
1550 else
1551 /* A[6] or hash 6 */
1552 chan_addr = ((chan_addr >> 7) << 6) |
1553 (chan_addr & 0x3f);
1554 } else
1555 /* A[12] */
1556 chan_addr = ((chan_addr >> 13) << 12) |
1557 (chan_addr & 0xfff);
1558 }
1559
1560 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1561
1562 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1563
1564 if (cs_found >= 0) {
1565 *nid = node_id;
1566 *chan_sel = channel;
1567 }
1568 return cs_found;
1569 }
1570
1571 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1572 int *node, int *chan_sel)
1573 {
1574 int cs_found = -EINVAL;
1575 unsigned range;
1576
1577 for (range = 0; range < DRAM_RANGES; range++) {
1578
1579 if (!dram_rw(pvt, range))
1580 continue;
1581
1582 if ((get_dram_base(pvt, range) <= sys_addr) &&
1583 (get_dram_limit(pvt, range) >= sys_addr)) {
1584
1585 cs_found = f1x_match_to_this_node(pvt, range,
1586 sys_addr, node,
1587 chan_sel);
1588 if (cs_found >= 0)
1589 break;
1590 }
1591 }
1592 return cs_found;
1593 }
1594
1595 /*
1596 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1597 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1598 *
1599 * The @sys_addr is usually an error address received from the hardware
1600 * (MCX_ADDR).
1601 */
1602 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1603 u16 syndrome)
1604 {
1605 struct amd64_pvt *pvt = mci->pvt_info;
1606 u32 page, offset;
1607 int nid, csrow, chan = 0;
1608
1609 error_address_to_page_and_offset(sys_addr, &page, &offset);
1610
1611 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1612
1613 if (csrow < 0) {
1614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1615 page, offset, syndrome,
1616 -1, -1, -1,
1617 "failed to map error addr to a csrow",
1618 "",
1619 NULL);
1620 return;
1621 }
1622
1623 /*
1624 * We need the syndromes for channel detection only when we're
1625 * ganged. Otherwise @chan should already contain the channel at
1626 * this point.
1627 */
1628 if (dct_ganging_enabled(pvt))
1629 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1630
1631 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1632 page, offset, syndrome,
1633 csrow, chan, -1,
1634 "", "", NULL);
1635 }
1636
1637 /*
1638 * debug routine to display the memory sizes of all logical DIMMs and its
1639 * CSROWs
1640 */
1641 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1642 {
1643 int dimm, size0, size1, factor = 0;
1644 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1645 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1646
1647 if (boot_cpu_data.x86 == 0xf) {
1648 if (pvt->dclr0 & WIDTH_128)
1649 factor = 1;
1650
1651 /* K8 families < revF not supported yet */
1652 if (pvt->ext_model < K8_REV_F)
1653 return;
1654 else
1655 WARN_ON(ctrl != 0);
1656 }
1657
1658 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1659 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1660 : pvt->csels[0].csbases;
1661
1662 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1663 ctrl, dbam);
1664
1665 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1666
1667 /* Dump memory sizes for DIMM and its CSROWs */
1668 for (dimm = 0; dimm < 4; dimm++) {
1669
1670 size0 = 0;
1671 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1672 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1673 DBAM_DIMM(dimm, dbam));
1674
1675 size1 = 0;
1676 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1677 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1678 DBAM_DIMM(dimm, dbam));
1679
1680 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1681 dimm * 2, size0 << factor,
1682 dimm * 2 + 1, size1 << factor);
1683 }
1684 }
1685
1686 static struct amd64_family_type amd64_family_types[] = {
1687 [K8_CPUS] = {
1688 .ctl_name = "K8",
1689 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1690 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1691 .ops = {
1692 .early_channel_count = k8_early_channel_count,
1693 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1694 .dbam_to_cs = k8_dbam_to_chip_select,
1695 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1696 }
1697 },
1698 [F10_CPUS] = {
1699 .ctl_name = "F10h",
1700 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1701 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1702 .ops = {
1703 .early_channel_count = f1x_early_channel_count,
1704 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1705 .dbam_to_cs = f10_dbam_to_chip_select,
1706 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1707 }
1708 },
1709 [F15_CPUS] = {
1710 .ctl_name = "F15h",
1711 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1712 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1713 .ops = {
1714 .early_channel_count = f1x_early_channel_count,
1715 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1716 .dbam_to_cs = f15_dbam_to_chip_select,
1717 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1718 }
1719 },
1720 };
1721
1722 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1723 unsigned int device,
1724 struct pci_dev *related)
1725 {
1726 struct pci_dev *dev = NULL;
1727
1728 dev = pci_get_device(vendor, device, dev);
1729 while (dev) {
1730 if ((dev->bus->number == related->bus->number) &&
1731 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1732 break;
1733 dev = pci_get_device(vendor, device, dev);
1734 }
1735
1736 return dev;
1737 }
1738
1739 /*
1740 * These are tables of eigenvectors (one per line) which can be used for the
1741 * construction of the syndrome tables. The modified syndrome search algorithm
1742 * uses those to find the symbol in error and thus the DIMM.
1743 *
1744 * Algorithm courtesy of Ross LaFetra from AMD.
1745 */
1746 static u16 x4_vectors[] = {
1747 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1748 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1749 0x0001, 0x0002, 0x0004, 0x0008,
1750 0x1013, 0x3032, 0x4044, 0x8088,
1751 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1752 0x4857, 0xc4fe, 0x13cc, 0x3288,
1753 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1754 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1755 0x15c1, 0x2a42, 0x89ac, 0x4758,
1756 0x2b03, 0x1602, 0x4f0c, 0xca08,
1757 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1758 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1759 0x2b87, 0x164e, 0x642c, 0xdc18,
1760 0x40b9, 0x80de, 0x1094, 0x20e8,
1761 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1762 0x11c1, 0x2242, 0x84ac, 0x4c58,
1763 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1764 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1765 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1766 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1767 0x16b3, 0x3d62, 0x4f34, 0x8518,
1768 0x1e2f, 0x391a, 0x5cac, 0xf858,
1769 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1770 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1771 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1772 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1773 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1774 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1775 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1776 0x185d, 0x2ca6, 0x7914, 0x9e28,
1777 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1778 0x4199, 0x82ee, 0x19f4, 0x2e58,
1779 0x4807, 0xc40e, 0x130c, 0x3208,
1780 0x1905, 0x2e0a, 0x5804, 0xac08,
1781 0x213f, 0x132a, 0xadfc, 0x5ba8,
1782 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1783 };
1784
1785 static u16 x8_vectors[] = {
1786 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1787 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1788 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1789 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1790 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1791 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1792 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1793 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1794 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1795 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1796 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1797 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1798 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1799 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1800 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1801 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1802 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1803 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1804 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1805 };
1806
1807 static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1808 unsigned v_dim)
1809 {
1810 unsigned int i, err_sym;
1811
1812 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1813 u16 s = syndrome;
1814 unsigned v_idx = err_sym * v_dim;
1815 unsigned v_end = (err_sym + 1) * v_dim;
1816
1817 /* walk over all 16 bits of the syndrome */
1818 for (i = 1; i < (1U << 16); i <<= 1) {
1819
1820 /* if bit is set in that eigenvector... */
1821 if (v_idx < v_end && vectors[v_idx] & i) {
1822 u16 ev_comp = vectors[v_idx++];
1823
1824 /* ... and bit set in the modified syndrome, */
1825 if (s & i) {
1826 /* remove it. */
1827 s ^= ev_comp;
1828
1829 if (!s)
1830 return err_sym;
1831 }
1832
1833 } else if (s & i)
1834 /* can't get to zero, move to next symbol */
1835 break;
1836 }
1837 }
1838
1839 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1840 return -1;
1841 }
1842
1843 static int map_err_sym_to_channel(int err_sym, int sym_size)
1844 {
1845 if (sym_size == 4)
1846 switch (err_sym) {
1847 case 0x20:
1848 case 0x21:
1849 return 0;
1850 break;
1851 case 0x22:
1852 case 0x23:
1853 return 1;
1854 break;
1855 default:
1856 return err_sym >> 4;
1857 break;
1858 }
1859 /* x8 symbols */
1860 else
1861 switch (err_sym) {
1862 /* imaginary bits not in a DIMM */
1863 case 0x10:
1864 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1865 err_sym);
1866 return -1;
1867 break;
1868
1869 case 0x11:
1870 return 0;
1871 break;
1872 case 0x12:
1873 return 1;
1874 break;
1875 default:
1876 return err_sym >> 3;
1877 break;
1878 }
1879 return -1;
1880 }
1881
1882 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1883 {
1884 struct amd64_pvt *pvt = mci->pvt_info;
1885 int err_sym = -1;
1886
1887 if (pvt->ecc_sym_sz == 8)
1888 err_sym = decode_syndrome(syndrome, x8_vectors,
1889 ARRAY_SIZE(x8_vectors),
1890 pvt->ecc_sym_sz);
1891 else if (pvt->ecc_sym_sz == 4)
1892 err_sym = decode_syndrome(syndrome, x4_vectors,
1893 ARRAY_SIZE(x4_vectors),
1894 pvt->ecc_sym_sz);
1895 else {
1896 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1897 return err_sym;
1898 }
1899
1900 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1901 }
1902
1903 /*
1904 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1905 * ADDRESS and process.
1906 */
1907 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1908 {
1909 struct amd64_pvt *pvt = mci->pvt_info;
1910 u64 sys_addr;
1911 u16 syndrome;
1912
1913 /* Ensure that the Error Address is VALID */
1914 if (!(m->status & MCI_STATUS_ADDRV)) {
1915 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1916 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1917 0, 0, 0,
1918 -1, -1, -1,
1919 "HW has no ERROR_ADDRESS available",
1920 "",
1921 NULL);
1922 return;
1923 }
1924
1925 sys_addr = get_error_address(m);
1926 syndrome = extract_syndrome(m->status);
1927
1928 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1929
1930 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1931 }
1932
1933 /* Handle any Un-correctable Errors (UEs) */
1934 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1935 {
1936 struct mem_ctl_info *log_mci, *src_mci = NULL;
1937 int csrow;
1938 u64 sys_addr;
1939 u32 page, offset;
1940
1941 log_mci = mci;
1942
1943 if (!(m->status & MCI_STATUS_ADDRV)) {
1944 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1945 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1946 0, 0, 0,
1947 -1, -1, -1,
1948 "HW has no ERROR_ADDRESS available",
1949 "",
1950 NULL);
1951 return;
1952 }
1953
1954 sys_addr = get_error_address(m);
1955 error_address_to_page_and_offset(sys_addr, &page, &offset);
1956
1957 /*
1958 * Find out which node the error address belongs to. This may be
1959 * different from the node that detected the error.
1960 */
1961 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1962 if (!src_mci) {
1963 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1964 (unsigned long)sys_addr);
1965 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1966 page, offset, 0,
1967 -1, -1, -1,
1968 "ERROR ADDRESS NOT mapped to a MC",
1969 "",
1970 NULL);
1971 return;
1972 }
1973
1974 log_mci = src_mci;
1975
1976 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1977 if (csrow < 0) {
1978 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1979 (unsigned long)sys_addr);
1980 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1981 page, offset, 0,
1982 -1, -1, -1,
1983 "ERROR ADDRESS NOT mapped to CS",
1984 "",
1985 NULL);
1986 } else {
1987 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1988 page, offset, 0,
1989 csrow, -1, -1,
1990 "", "", NULL);
1991 }
1992 }
1993
1994 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1995 struct mce *m)
1996 {
1997 u16 ec = EC(m->status);
1998 u8 xec = XEC(m->status, 0x1f);
1999 u8 ecc_type = (m->status >> 45) & 0x3;
2000
2001 /* Bail early out if this was an 'observed' error */
2002 if (PP(ec) == NBSL_PP_OBS)
2003 return;
2004
2005 /* Do only ECC errors */
2006 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2007 return;
2008
2009 if (ecc_type == 2)
2010 amd64_handle_ce(mci, m);
2011 else if (ecc_type == 1)
2012 amd64_handle_ue(mci, m);
2013 }
2014
2015 void amd64_decode_bus_error(int node_id, struct mce *m)
2016 {
2017 __amd64_decode_bus_error(mcis[node_id], m);
2018 }
2019
2020 /*
2021 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2022 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2023 */
2024 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2025 {
2026 /* Reserve the ADDRESS MAP Device */
2027 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2028 if (!pvt->F1) {
2029 amd64_err("error address map device not found: "
2030 "vendor %x device 0x%x (broken BIOS?)\n",
2031 PCI_VENDOR_ID_AMD, f1_id);
2032 return -ENODEV;
2033 }
2034
2035 /* Reserve the MISC Device */
2036 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2037 if (!pvt->F3) {
2038 pci_dev_put(pvt->F1);
2039 pvt->F1 = NULL;
2040
2041 amd64_err("error F3 device not found: "
2042 "vendor %x device 0x%x (broken BIOS?)\n",
2043 PCI_VENDOR_ID_AMD, f3_id);
2044
2045 return -ENODEV;
2046 }
2047 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2048 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2049 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2050
2051 return 0;
2052 }
2053
2054 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2055 {
2056 pci_dev_put(pvt->F1);
2057 pci_dev_put(pvt->F3);
2058 }
2059
2060 /*
2061 * Retrieve the hardware registers of the memory controller (this includes the
2062 * 'Address Map' and 'Misc' device regs)
2063 */
2064 static void read_mc_regs(struct amd64_pvt *pvt)
2065 {
2066 struct cpuinfo_x86 *c = &boot_cpu_data;
2067 u64 msr_val;
2068 u32 tmp;
2069 unsigned range;
2070
2071 /*
2072 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2073 * those are Read-As-Zero
2074 */
2075 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2076 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2077
2078 /* check first whether TOP_MEM2 is enabled */
2079 rdmsrl(MSR_K8_SYSCFG, msr_val);
2080 if (msr_val & (1U << 21)) {
2081 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2082 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2083 } else
2084 edac_dbg(0, " TOP_MEM2 disabled\n");
2085
2086 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2087
2088 read_dram_ctl_register(pvt);
2089
2090 for (range = 0; range < DRAM_RANGES; range++) {
2091 u8 rw;
2092
2093 /* read settings for this DRAM range */
2094 read_dram_base_limit_regs(pvt, range);
2095
2096 rw = dram_rw(pvt, range);
2097 if (!rw)
2098 continue;
2099
2100 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2101 range,
2102 get_dram_base(pvt, range),
2103 get_dram_limit(pvt, range));
2104
2105 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2106 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2107 (rw & 0x1) ? "R" : "-",
2108 (rw & 0x2) ? "W" : "-",
2109 dram_intlv_sel(pvt, range),
2110 dram_dst_node(pvt, range));
2111 }
2112
2113 read_dct_base_mask(pvt);
2114
2115 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2116 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2117
2118 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2119
2120 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2121 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2122
2123 if (!dct_ganging_enabled(pvt)) {
2124 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2125 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2126 }
2127
2128 pvt->ecc_sym_sz = 4;
2129
2130 if (c->x86 >= 0x10) {
2131 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2132 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2133
2134 /* F10h, revD and later can do x8 ECC too */
2135 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2136 pvt->ecc_sym_sz = 8;
2137 }
2138 dump_misc_regs(pvt);
2139 }
2140
2141 /*
2142 * NOTE: CPU Revision Dependent code
2143 *
2144 * Input:
2145 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2146 * k8 private pointer to -->
2147 * DRAM Bank Address mapping register
2148 * node_id
2149 * DCL register where dual_channel_active is
2150 *
2151 * The DBAM register consists of 4 sets of 4 bits each definitions:
2152 *
2153 * Bits: CSROWs
2154 * 0-3 CSROWs 0 and 1
2155 * 4-7 CSROWs 2 and 3
2156 * 8-11 CSROWs 4 and 5
2157 * 12-15 CSROWs 6 and 7
2158 *
2159 * Values range from: 0 to 15
2160 * The meaning of the values depends on CPU revision and dual-channel state,
2161 * see relevant BKDG more info.
2162 *
2163 * The memory controller provides for total of only 8 CSROWs in its current
2164 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2165 * single channel or two (2) DIMMs in dual channel mode.
2166 *
2167 * The following code logic collapses the various tables for CSROW based on CPU
2168 * revision.
2169 *
2170 * Returns:
2171 * The number of PAGE_SIZE pages on the specified CSROW number it
2172 * encompasses
2173 *
2174 */
2175 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2176 {
2177 u32 cs_mode, nr_pages;
2178 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2179
2180 /*
2181 * The math on this doesn't look right on the surface because x/2*4 can
2182 * be simplified to x*2 but this expression makes use of the fact that
2183 * it is integral math where 1/2=0. This intermediate value becomes the
2184 * number of bits to shift the DBAM register to extract the proper CSROW
2185 * field.
2186 */
2187 cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
2188
2189 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2190
2191 edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2192 edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
2193 nr_pages, pvt->channel_count);
2194
2195 return nr_pages;
2196 }
2197
2198 /*
2199 * Initialize the array of csrow attribute instances, based on the values
2200 * from pci config hardware registers.
2201 */
2202 static int init_csrows(struct mem_ctl_info *mci)
2203 {
2204 struct csrow_info *csrow;
2205 struct dimm_info *dimm;
2206 struct amd64_pvt *pvt = mci->pvt_info;
2207 u64 base, mask;
2208 u32 val;
2209 int i, j, empty = 1;
2210 enum mem_type mtype;
2211 enum edac_type edac_mode;
2212 int nr_pages = 0;
2213
2214 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2215
2216 pvt->nbcfg = val;
2217
2218 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2219 pvt->mc_node_id, val,
2220 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2221
2222 for_each_chip_select(i, 0, pvt) {
2223 csrow = mci->csrows[i];
2224
2225 if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2226 edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
2227 i, pvt->mc_node_id);
2228 continue;
2229 }
2230
2231 empty = 0;
2232 if (csrow_enabled(i, 0, pvt))
2233 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2234 if (csrow_enabled(i, 1, pvt))
2235 nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2236
2237 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2238 /* 8 bytes of resolution */
2239
2240 mtype = amd64_determine_memory_type(pvt, i);
2241
2242 edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2243 edac_dbg(1, " nr_pages: %u\n",
2244 nr_pages * pvt->channel_count);
2245
2246 /*
2247 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2248 */
2249 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2250 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2251 EDAC_S4ECD4ED : EDAC_SECDED;
2252 else
2253 edac_mode = EDAC_NONE;
2254
2255 for (j = 0; j < pvt->channel_count; j++) {
2256 dimm = csrow->channels[j]->dimm;
2257 dimm->mtype = mtype;
2258 dimm->edac_mode = edac_mode;
2259 dimm->nr_pages = nr_pages;
2260 }
2261 }
2262
2263 return empty;
2264 }
2265
2266 /* get all cores on this DCT */
2267 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2268 {
2269 int cpu;
2270
2271 for_each_online_cpu(cpu)
2272 if (amd_get_nb_id(cpu) == nid)
2273 cpumask_set_cpu(cpu, mask);
2274 }
2275
2276 /* check MCG_CTL on all the cpus on this node */
2277 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2278 {
2279 cpumask_var_t mask;
2280 int cpu, nbe;
2281 bool ret = false;
2282
2283 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2284 amd64_warn("%s: Error allocating mask\n", __func__);
2285 return false;
2286 }
2287
2288 get_cpus_on_this_dct_cpumask(mask, nid);
2289
2290 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2291
2292 for_each_cpu(cpu, mask) {
2293 struct msr *reg = per_cpu_ptr(msrs, cpu);
2294 nbe = reg->l & MSR_MCGCTL_NBE;
2295
2296 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2297 cpu, reg->q,
2298 (nbe ? "enabled" : "disabled"));
2299
2300 if (!nbe)
2301 goto out;
2302 }
2303 ret = true;
2304
2305 out:
2306 free_cpumask_var(mask);
2307 return ret;
2308 }
2309
2310 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2311 {
2312 cpumask_var_t cmask;
2313 int cpu;
2314
2315 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2316 amd64_warn("%s: error allocating mask\n", __func__);
2317 return false;
2318 }
2319
2320 get_cpus_on_this_dct_cpumask(cmask, nid);
2321
2322 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2323
2324 for_each_cpu(cpu, cmask) {
2325
2326 struct msr *reg = per_cpu_ptr(msrs, cpu);
2327
2328 if (on) {
2329 if (reg->l & MSR_MCGCTL_NBE)
2330 s->flags.nb_mce_enable = 1;
2331
2332 reg->l |= MSR_MCGCTL_NBE;
2333 } else {
2334 /*
2335 * Turn off NB MCE reporting only when it was off before
2336 */
2337 if (!s->flags.nb_mce_enable)
2338 reg->l &= ~MSR_MCGCTL_NBE;
2339 }
2340 }
2341 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2342
2343 free_cpumask_var(cmask);
2344
2345 return 0;
2346 }
2347
2348 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2349 struct pci_dev *F3)
2350 {
2351 bool ret = true;
2352 u32 value, mask = 0x3; /* UECC/CECC enable */
2353
2354 if (toggle_ecc_err_reporting(s, nid, ON)) {
2355 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2356 return false;
2357 }
2358
2359 amd64_read_pci_cfg(F3, NBCTL, &value);
2360
2361 s->old_nbctl = value & mask;
2362 s->nbctl_valid = true;
2363
2364 value |= mask;
2365 amd64_write_pci_cfg(F3, NBCTL, value);
2366
2367 amd64_read_pci_cfg(F3, NBCFG, &value);
2368
2369 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2370 nid, value, !!(value & NBCFG_ECC_ENABLE));
2371
2372 if (!(value & NBCFG_ECC_ENABLE)) {
2373 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2374
2375 s->flags.nb_ecc_prev = 0;
2376
2377 /* Attempt to turn on DRAM ECC Enable */
2378 value |= NBCFG_ECC_ENABLE;
2379 amd64_write_pci_cfg(F3, NBCFG, value);
2380
2381 amd64_read_pci_cfg(F3, NBCFG, &value);
2382
2383 if (!(value & NBCFG_ECC_ENABLE)) {
2384 amd64_warn("Hardware rejected DRAM ECC enable,"
2385 "check memory DIMM configuration.\n");
2386 ret = false;
2387 } else {
2388 amd64_info("Hardware accepted DRAM ECC Enable\n");
2389 }
2390 } else {
2391 s->flags.nb_ecc_prev = 1;
2392 }
2393
2394 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2395 nid, value, !!(value & NBCFG_ECC_ENABLE));
2396
2397 return ret;
2398 }
2399
2400 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2401 struct pci_dev *F3)
2402 {
2403 u32 value, mask = 0x3; /* UECC/CECC enable */
2404
2405
2406 if (!s->nbctl_valid)
2407 return;
2408
2409 amd64_read_pci_cfg(F3, NBCTL, &value);
2410 value &= ~mask;
2411 value |= s->old_nbctl;
2412
2413 amd64_write_pci_cfg(F3, NBCTL, value);
2414
2415 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2416 if (!s->flags.nb_ecc_prev) {
2417 amd64_read_pci_cfg(F3, NBCFG, &value);
2418 value &= ~NBCFG_ECC_ENABLE;
2419 amd64_write_pci_cfg(F3, NBCFG, value);
2420 }
2421
2422 /* restore the NB Enable MCGCTL bit */
2423 if (toggle_ecc_err_reporting(s, nid, OFF))
2424 amd64_warn("Error restoring NB MCGCTL settings!\n");
2425 }
2426
2427 /*
2428 * EDAC requires that the BIOS have ECC enabled before
2429 * taking over the processing of ECC errors. A command line
2430 * option allows to force-enable hardware ECC later in
2431 * enable_ecc_error_reporting().
2432 */
2433 static const char *ecc_msg =
2434 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2435 " Either enable ECC checking or force module loading by setting "
2436 "'ecc_enable_override'.\n"
2437 " (Note that use of the override may cause unknown side effects.)\n";
2438
2439 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2440 {
2441 u32 value;
2442 u8 ecc_en = 0;
2443 bool nb_mce_en = false;
2444
2445 amd64_read_pci_cfg(F3, NBCFG, &value);
2446
2447 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2448 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2449
2450 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2451 if (!nb_mce_en)
2452 amd64_notice("NB MCE bank disabled, set MSR "
2453 "0x%08x[4] on node %d to enable.\n",
2454 MSR_IA32_MCG_CTL, nid);
2455
2456 if (!ecc_en || !nb_mce_en) {
2457 amd64_notice("%s", ecc_msg);
2458 return false;
2459 }
2460 return true;
2461 }
2462
2463 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2464 {
2465 int rc;
2466
2467 rc = amd64_create_sysfs_dbg_files(mci);
2468 if (rc < 0)
2469 return rc;
2470
2471 if (boot_cpu_data.x86 >= 0x10) {
2472 rc = amd64_create_sysfs_inject_files(mci);
2473 if (rc < 0)
2474 return rc;
2475 }
2476
2477 return 0;
2478 }
2479
2480 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2481 {
2482 amd64_remove_sysfs_dbg_files(mci);
2483
2484 if (boot_cpu_data.x86 >= 0x10)
2485 amd64_remove_sysfs_inject_files(mci);
2486 }
2487
2488 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2489 struct amd64_family_type *fam)
2490 {
2491 struct amd64_pvt *pvt = mci->pvt_info;
2492
2493 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2494 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2495
2496 if (pvt->nbcap & NBCAP_SECDED)
2497 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2498
2499 if (pvt->nbcap & NBCAP_CHIPKILL)
2500 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2501
2502 mci->edac_cap = amd64_determine_edac_cap(pvt);
2503 mci->mod_name = EDAC_MOD_STR;
2504 mci->mod_ver = EDAC_AMD64_VERSION;
2505 mci->ctl_name = fam->ctl_name;
2506 mci->dev_name = pci_name(pvt->F2);
2507 mci->ctl_page_to_phys = NULL;
2508
2509 /* memory scrubber interface */
2510 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2511 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2512 }
2513
2514 /*
2515 * returns a pointer to the family descriptor on success, NULL otherwise.
2516 */
2517 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2518 {
2519 u8 fam = boot_cpu_data.x86;
2520 struct amd64_family_type *fam_type = NULL;
2521
2522 switch (fam) {
2523 case 0xf:
2524 fam_type = &amd64_family_types[K8_CPUS];
2525 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2526 break;
2527
2528 case 0x10:
2529 fam_type = &amd64_family_types[F10_CPUS];
2530 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2531 break;
2532
2533 case 0x15:
2534 fam_type = &amd64_family_types[F15_CPUS];
2535 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2536 break;
2537
2538 default:
2539 amd64_err("Unsupported family!\n");
2540 return NULL;
2541 }
2542
2543 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2544
2545 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2546 (fam == 0xf ?
2547 (pvt->ext_model >= K8_REV_F ? "revF or later "
2548 : "revE or earlier ")
2549 : ""), pvt->mc_node_id);
2550 return fam_type;
2551 }
2552
2553 static int amd64_init_one_instance(struct pci_dev *F2)
2554 {
2555 struct amd64_pvt *pvt = NULL;
2556 struct amd64_family_type *fam_type = NULL;
2557 struct mem_ctl_info *mci = NULL;
2558 struct edac_mc_layer layers[2];
2559 int err = 0, ret;
2560 u8 nid = get_node_id(F2);
2561
2562 ret = -ENOMEM;
2563 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2564 if (!pvt)
2565 goto err_ret;
2566
2567 pvt->mc_node_id = nid;
2568 pvt->F2 = F2;
2569
2570 ret = -EINVAL;
2571 fam_type = amd64_per_family_init(pvt);
2572 if (!fam_type)
2573 goto err_free;
2574
2575 ret = -ENODEV;
2576 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2577 if (err)
2578 goto err_free;
2579
2580 read_mc_regs(pvt);
2581
2582 /*
2583 * We need to determine how many memory channels there are. Then use
2584 * that information for calculating the size of the dynamic instance
2585 * tables in the 'mci' structure.
2586 */
2587 ret = -EINVAL;
2588 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2589 if (pvt->channel_count < 0)
2590 goto err_siblings;
2591
2592 ret = -ENOMEM;
2593 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2594 layers[0].size = pvt->csels[0].b_cnt;
2595 layers[0].is_virt_csrow = true;
2596 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2597 layers[1].size = pvt->channel_count;
2598 layers[1].is_virt_csrow = false;
2599 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2600 if (!mci)
2601 goto err_siblings;
2602
2603 mci->pvt_info = pvt;
2604 mci->pdev = &pvt->F2->dev;
2605
2606 setup_mci_misc_attrs(mci, fam_type);
2607
2608 if (init_csrows(mci))
2609 mci->edac_cap = EDAC_FLAG_NONE;
2610
2611 ret = -ENODEV;
2612 if (edac_mc_add_mc(mci)) {
2613 edac_dbg(1, "failed edac_mc_add_mc()\n");
2614 goto err_add_mc;
2615 }
2616 if (set_mc_sysfs_attrs(mci)) {
2617 edac_dbg(1, "failed edac_mc_add_mc()\n");
2618 goto err_add_sysfs;
2619 }
2620
2621 /* register stuff with EDAC MCE */
2622 if (report_gart_errors)
2623 amd_report_gart_errors(true);
2624
2625 amd_register_ecc_decoder(amd64_decode_bus_error);
2626
2627 mcis[nid] = mci;
2628
2629 atomic_inc(&drv_instances);
2630
2631 return 0;
2632
2633 err_add_sysfs:
2634 edac_mc_del_mc(mci->pdev);
2635 err_add_mc:
2636 edac_mc_free(mci);
2637
2638 err_siblings:
2639 free_mc_sibling_devs(pvt);
2640
2641 err_free:
2642 kfree(pvt);
2643
2644 err_ret:
2645 return ret;
2646 }
2647
2648 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2649 const struct pci_device_id *mc_type)
2650 {
2651 u8 nid = get_node_id(pdev);
2652 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2653 struct ecc_settings *s;
2654 int ret = 0;
2655
2656 ret = pci_enable_device(pdev);
2657 if (ret < 0) {
2658 edac_dbg(0, "ret=%d\n", ret);
2659 return -EIO;
2660 }
2661
2662 ret = -ENOMEM;
2663 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2664 if (!s)
2665 goto err_out;
2666
2667 ecc_stngs[nid] = s;
2668
2669 if (!ecc_enabled(F3, nid)) {
2670 ret = -ENODEV;
2671
2672 if (!ecc_enable_override)
2673 goto err_enable;
2674
2675 amd64_warn("Forcing ECC on!\n");
2676
2677 if (!enable_ecc_error_reporting(s, nid, F3))
2678 goto err_enable;
2679 }
2680
2681 ret = amd64_init_one_instance(pdev);
2682 if (ret < 0) {
2683 amd64_err("Error probing instance: %d\n", nid);
2684 restore_ecc_error_reporting(s, nid, F3);
2685 }
2686
2687 return ret;
2688
2689 err_enable:
2690 kfree(s);
2691 ecc_stngs[nid] = NULL;
2692
2693 err_out:
2694 return ret;
2695 }
2696
2697 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2698 {
2699 struct mem_ctl_info *mci;
2700 struct amd64_pvt *pvt;
2701 u8 nid = get_node_id(pdev);
2702 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2703 struct ecc_settings *s = ecc_stngs[nid];
2704
2705 mci = find_mci_by_dev(&pdev->dev);
2706 del_mc_sysfs_attrs(mci);
2707 /* Remove from EDAC CORE tracking list */
2708 mci = edac_mc_del_mc(&pdev->dev);
2709 if (!mci)
2710 return;
2711
2712 pvt = mci->pvt_info;
2713
2714 restore_ecc_error_reporting(s, nid, F3);
2715
2716 free_mc_sibling_devs(pvt);
2717
2718 /* unregister from EDAC MCE */
2719 amd_report_gart_errors(false);
2720 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2721
2722 kfree(ecc_stngs[nid]);
2723 ecc_stngs[nid] = NULL;
2724
2725 /* Free the EDAC CORE resources */
2726 mci->pvt_info = NULL;
2727 mcis[nid] = NULL;
2728
2729 kfree(pvt);
2730 edac_mc_free(mci);
2731 }
2732
2733 /*
2734 * This table is part of the interface for loading drivers for PCI devices. The
2735 * PCI core identifies what devices are on a system during boot, and then
2736 * inquiry this table to see if this driver is for a given device found.
2737 */
2738 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2739 {
2740 .vendor = PCI_VENDOR_ID_AMD,
2741 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2742 .subvendor = PCI_ANY_ID,
2743 .subdevice = PCI_ANY_ID,
2744 .class = 0,
2745 .class_mask = 0,
2746 },
2747 {
2748 .vendor = PCI_VENDOR_ID_AMD,
2749 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2750 .subvendor = PCI_ANY_ID,
2751 .subdevice = PCI_ANY_ID,
2752 .class = 0,
2753 .class_mask = 0,
2754 },
2755 {
2756 .vendor = PCI_VENDOR_ID_AMD,
2757 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2758 .subvendor = PCI_ANY_ID,
2759 .subdevice = PCI_ANY_ID,
2760 .class = 0,
2761 .class_mask = 0,
2762 },
2763
2764 {0, }
2765 };
2766 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2767
2768 static struct pci_driver amd64_pci_driver = {
2769 .name = EDAC_MOD_STR,
2770 .probe = amd64_probe_one_instance,
2771 .remove = __devexit_p(amd64_remove_one_instance),
2772 .id_table = amd64_pci_table,
2773 };
2774
2775 static void setup_pci_device(void)
2776 {
2777 struct mem_ctl_info *mci;
2778 struct amd64_pvt *pvt;
2779
2780 if (amd64_ctl_pci)
2781 return;
2782
2783 mci = mcis[0];
2784 if (mci) {
2785
2786 pvt = mci->pvt_info;
2787 amd64_ctl_pci =
2788 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2789
2790 if (!amd64_ctl_pci) {
2791 pr_warning("%s(): Unable to create PCI control\n",
2792 __func__);
2793
2794 pr_warning("%s(): PCI error report via EDAC not set\n",
2795 __func__);
2796 }
2797 }
2798 }
2799
2800 static int __init amd64_edac_init(void)
2801 {
2802 int err = -ENODEV;
2803
2804 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2805
2806 opstate_init();
2807
2808 if (amd_cache_northbridges() < 0)
2809 goto err_ret;
2810
2811 err = -ENOMEM;
2812 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2813 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2814 if (!(mcis && ecc_stngs))
2815 goto err_free;
2816
2817 msrs = msrs_alloc();
2818 if (!msrs)
2819 goto err_free;
2820
2821 err = pci_register_driver(&amd64_pci_driver);
2822 if (err)
2823 goto err_pci;
2824
2825 err = -ENODEV;
2826 if (!atomic_read(&drv_instances))
2827 goto err_no_instances;
2828
2829 setup_pci_device();
2830 return 0;
2831
2832 err_no_instances:
2833 pci_unregister_driver(&amd64_pci_driver);
2834
2835 err_pci:
2836 msrs_free(msrs);
2837 msrs = NULL;
2838
2839 err_free:
2840 kfree(mcis);
2841 mcis = NULL;
2842
2843 kfree(ecc_stngs);
2844 ecc_stngs = NULL;
2845
2846 err_ret:
2847 return err;
2848 }
2849
2850 static void __exit amd64_edac_exit(void)
2851 {
2852 if (amd64_ctl_pci)
2853 edac_pci_release_generic_ctl(amd64_ctl_pci);
2854
2855 pci_unregister_driver(&amd64_pci_driver);
2856
2857 kfree(ecc_stngs);
2858 ecc_stngs = NULL;
2859
2860 kfree(mcis);
2861 mcis = NULL;
2862
2863 msrs_free(msrs);
2864 msrs = NULL;
2865 }
2866
2867 module_init(amd64_edac_init);
2868 module_exit(amd64_edac_exit);
2869
2870 MODULE_LICENSE("GPL");
2871 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2872 "Dave Peterson, Thayne Harbaugh");
2873 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2874 EDAC_AMD64_VERSION);
2875
2876 module_param(edac_op_state, int, 0444);
2877 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
This page took 0.105603 seconds and 6 git commands to generate.