1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info
*amd64_ctl_pci
;
6 static int report_gart_errors
;
7 module_param(report_gart_errors
, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override
;
14 module_param(ecc_enable_override
, int, 0644);
16 static struct msr __percpu
*msrs
;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances
= ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info
**mcis
;
25 static struct ecc_settings
**ecc_stngs
;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
35 u32 scrubval
; /* bit pattern for scrub rate */
36 u32 bandwidth
; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 static int __amd64_read_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
64 u32
*val
, const char *func
)
68 err
= pci_read_config_dword(pdev
, offset
, val
);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func
, PCI_FUNC(pdev
->devfn
), offset
);
76 int __amd64_write_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
77 u32 val
, const char *func
)
81 err
= pci_write_config_dword(pdev
, offset
, val
);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func
, PCI_FUNC(pdev
->devfn
), offset
);
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 static int k8_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
108 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
111 static int f10_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
114 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
118 * Select DCT to which PCI cfg accesses are routed
120 static void f15h_select_dct(struct amd64_pvt
*pvt
, u8 dct
)
124 amd64_read_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, ®
);
127 amd64_write_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, reg
);
130 static int f15_read_dct_pci_cfg(struct amd64_pvt
*pvt
, int addr
, u32
*val
,
135 if (addr
>= 0x140 && addr
<= 0x1a0) {
140 f15h_select_dct(pvt
, dct
);
142 return __amd64_read_pci_cfg_dword(pvt
->F2
, addr
, val
, func
);
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
163 static int __amd64_set_scrub_rate(struct pci_dev
*ctl
, u32 new_bw
, u32 min_rate
)
169 * map the configured rate (new_bw) to a value specific to the AMD64
170 * memory controller and apply to register. Search for the first
171 * bandwidth entry that is greater or equal than the setting requested
172 * and program that. If at last entry, turn off DRAM scrubbing.
174 for (i
= 0; i
< ARRAY_SIZE(scrubrates
); i
++) {
176 * skip scrub rates which aren't recommended
177 * (see F10 BKDG, F3x58)
179 if (scrubrates
[i
].scrubval
< min_rate
)
182 if (scrubrates
[i
].bandwidth
<= new_bw
)
186 * if no suitable bandwidth found, turn off DRAM scrubbing
187 * entirely by falling back to the last element in the
192 scrubval
= scrubrates
[i
].scrubval
;
194 pci_write_bits32(ctl
, SCRCTRL
, scrubval
, 0x001F);
197 return scrubrates
[i
].bandwidth
;
202 static int amd64_set_scrub_rate(struct mem_ctl_info
*mci
, u32 bw
)
204 struct amd64_pvt
*pvt
= mci
->pvt_info
;
205 u32 min_scrubrate
= 0x5;
207 if (boot_cpu_data
.x86
== 0xf)
210 /* F15h Erratum #505 */
211 if (boot_cpu_data
.x86
== 0x15)
212 f15h_select_dct(pvt
, 0);
214 return __amd64_set_scrub_rate(pvt
->F3
, bw
, min_scrubrate
);
217 static int amd64_get_scrub_rate(struct mem_ctl_info
*mci
)
219 struct amd64_pvt
*pvt
= mci
->pvt_info
;
221 int i
, retval
= -EINVAL
;
223 /* F15h Erratum #505 */
224 if (boot_cpu_data
.x86
== 0x15)
225 f15h_select_dct(pvt
, 0);
227 amd64_read_pci_cfg(pvt
->F3
, SCRCTRL
, &scrubval
);
229 scrubval
= scrubval
& 0x001F;
231 for (i
= 0; i
< ARRAY_SIZE(scrubrates
); i
++) {
232 if (scrubrates
[i
].scrubval
== scrubval
) {
233 retval
= scrubrates
[i
].bandwidth
;
241 * returns true if the SysAddr given by sys_addr matches the
242 * DRAM base/limit associated with node_id
244 static bool amd64_base_limit_match(struct amd64_pvt
*pvt
, u64 sys_addr
,
249 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
250 * all ones if the most significant implemented address bit is 1.
251 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
252 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
253 * Application Programming.
255 addr
= sys_addr
& 0x000000ffffffffffull
;
257 return ((addr
>= get_dram_base(pvt
, nid
)) &&
258 (addr
<= get_dram_limit(pvt
, nid
)));
262 * Attempt to map a SysAddr to a node. On success, return a pointer to the
263 * mem_ctl_info structure for the node that the SysAddr maps to.
265 * On failure, return NULL.
267 static struct mem_ctl_info
*find_mc_by_sys_addr(struct mem_ctl_info
*mci
,
270 struct amd64_pvt
*pvt
;
275 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
276 * 3.4.4.2) registers to map the SysAddr to a node ID.
281 * The value of this field should be the same for all DRAM Base
282 * registers. Therefore we arbitrarily choose to read it from the
283 * register for node 0.
285 intlv_en
= dram_intlv_en(pvt
, 0);
288 for (node_id
= 0; node_id
< DRAM_RANGES
; node_id
++) {
289 if (amd64_base_limit_match(pvt
, sys_addr
, node_id
))
295 if (unlikely((intlv_en
!= 0x01) &&
296 (intlv_en
!= 0x03) &&
297 (intlv_en
!= 0x07))) {
298 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en
);
302 bits
= (((u32
) sys_addr
) >> 12) & intlv_en
;
304 for (node_id
= 0; ; ) {
305 if ((dram_intlv_sel(pvt
, node_id
) & intlv_en
) == bits
)
306 break; /* intlv_sel field matches */
308 if (++node_id
>= DRAM_RANGES
)
312 /* sanity test for sys_addr */
313 if (unlikely(!amd64_base_limit_match(pvt
, sys_addr
, node_id
))) {
314 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
315 "range for node %d with node interleaving enabled.\n",
316 __func__
, sys_addr
, node_id
);
321 return edac_mc_find((int)node_id
);
324 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
325 (unsigned long)sys_addr
);
331 * compute the CS base address of the @csrow on the DRAM controller @dct.
332 * For details see F2x[5C:40] in the processor's BKDG
334 static void get_cs_base_and_mask(struct amd64_pvt
*pvt
, int csrow
, u8 dct
,
335 u64
*base
, u64
*mask
)
337 u64 csbase
, csmask
, base_bits
, mask_bits
;
340 if (boot_cpu_data
.x86
== 0xf && pvt
->ext_model
< K8_REV_F
) {
341 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
342 csmask
= pvt
->csels
[dct
].csmasks
[csrow
];
343 base_bits
= GENMASK(21, 31) | GENMASK(9, 15);
344 mask_bits
= GENMASK(21, 29) | GENMASK(9, 15);
347 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
348 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
351 if (boot_cpu_data
.x86
== 0x15)
352 base_bits
= mask_bits
= GENMASK(19,30) | GENMASK(5,13);
354 base_bits
= mask_bits
= GENMASK(19,28) | GENMASK(5,13);
357 *base
= (csbase
& base_bits
) << addr_shift
;
360 /* poke holes for the csmask */
361 *mask
&= ~(mask_bits
<< addr_shift
);
363 *mask
|= (csmask
& mask_bits
) << addr_shift
;
366 #define for_each_chip_select(i, dct, pvt) \
367 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
369 #define chip_select_base(i, dct, pvt) \
370 pvt->csels[dct].csbases[i]
372 #define for_each_chip_select_mask(i, dct, pvt) \
373 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
376 * @input_addr is an InputAddr associated with the node given by mci. Return the
377 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
379 static int input_addr_to_csrow(struct mem_ctl_info
*mci
, u64 input_addr
)
381 struct amd64_pvt
*pvt
;
387 for_each_chip_select(csrow
, 0, pvt
) {
388 if (!csrow_enabled(csrow
, 0, pvt
))
391 get_cs_base_and_mask(pvt
, csrow
, 0, &base
, &mask
);
395 if ((input_addr
& mask
) == (base
& mask
)) {
396 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
397 (unsigned long)input_addr
, csrow
,
403 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404 (unsigned long)input_addr
, pvt
->mc_node_id
);
410 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
411 * for the node represented by mci. Info is passed back in *hole_base,
412 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
413 * info is invalid. Info may be invalid for either of the following reasons:
415 * - The revision of the node is not E or greater. In this case, the DRAM Hole
416 * Address Register does not exist.
418 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
419 * indicating that its contents are not valid.
421 * The values passed back in *hole_base, *hole_offset, and *hole_size are
422 * complete 32-bit values despite the fact that the bitfields in the DHAR
423 * only represent bits 31-24 of the base and offset values.
425 int amd64_get_dram_hole_info(struct mem_ctl_info
*mci
, u64
*hole_base
,
426 u64
*hole_offset
, u64
*hole_size
)
428 struct amd64_pvt
*pvt
= mci
->pvt_info
;
431 /* only revE and later have the DRAM Hole Address Register */
432 if (boot_cpu_data
.x86
== 0xf && pvt
->ext_model
< K8_REV_E
) {
433 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
434 pvt
->ext_model
, pvt
->mc_node_id
);
438 /* valid for Fam10h and above */
439 if (boot_cpu_data
.x86
>= 0x10 && !dhar_mem_hoist_valid(pvt
)) {
440 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
444 if (!dhar_valid(pvt
)) {
445 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
450 /* This node has Memory Hoisting */
452 /* +------------------+--------------------+--------------------+-----
453 * | memory | DRAM hole | relocated |
454 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
456 * | | | [0x100000000, |
457 * | | | (0x100000000+ |
458 * | | | (0xffffffff-x))] |
459 * +------------------+--------------------+--------------------+-----
461 * Above is a diagram of physical memory showing the DRAM hole and the
462 * relocated addresses from the DRAM hole. As shown, the DRAM hole
463 * starts at address x (the base address) and extends through address
464 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
465 * addresses in the hole so that they start at 0x100000000.
468 base
= dhar_base(pvt
);
471 *hole_size
= (0x1ull
<< 32) - base
;
473 if (boot_cpu_data
.x86
> 0xf)
474 *hole_offset
= f10_dhar_offset(pvt
);
476 *hole_offset
= k8_dhar_offset(pvt
);
478 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479 pvt
->mc_node_id
, (unsigned long)*hole_base
,
480 (unsigned long)*hole_offset
, (unsigned long)*hole_size
);
484 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info
);
487 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
488 * assumed that sys_addr maps to the node given by mci.
490 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
491 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
492 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
493 * then it is also involved in translating a SysAddr to a DramAddr. Sections
494 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
495 * These parts of the documentation are unclear. I interpret them as follows:
497 * When node n receives a SysAddr, it processes the SysAddr as follows:
499 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
500 * Limit registers for node n. If the SysAddr is not within the range
501 * specified by the base and limit values, then node n ignores the Sysaddr
502 * (since it does not map to node n). Otherwise continue to step 2 below.
504 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
505 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
506 * the range of relocated addresses (starting at 0x100000000) from the DRAM
507 * hole. If not, skip to step 3 below. Else get the value of the
508 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
509 * offset defined by this value from the SysAddr.
511 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
512 * Base register for node n. To obtain the DramAddr, subtract the base
513 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
515 static u64
sys_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
517 struct amd64_pvt
*pvt
= mci
->pvt_info
;
518 u64 dram_base
, hole_base
, hole_offset
, hole_size
, dram_addr
;
521 dram_base
= get_dram_base(pvt
, pvt
->mc_node_id
);
523 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
526 if ((sys_addr
>= (1ull << 32)) &&
527 (sys_addr
< ((1ull << 32) + hole_size
))) {
528 /* use DHAR to translate SysAddr to DramAddr */
529 dram_addr
= sys_addr
- hole_offset
;
531 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
532 (unsigned long)sys_addr
,
533 (unsigned long)dram_addr
);
540 * Translate the SysAddr to a DramAddr as shown near the start of
541 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
542 * only deals with 40-bit values. Therefore we discard bits 63-40 of
543 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
544 * discard are all 1s. Otherwise the bits we discard are all 0s. See
545 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
546 * Programmer's Manual Volume 1 Application Programming.
548 dram_addr
= (sys_addr
& GENMASK(0, 39)) - dram_base
;
550 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
551 (unsigned long)sys_addr
, (unsigned long)dram_addr
);
556 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
557 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
558 * for node interleaving.
560 static int num_node_interleave_bits(unsigned intlv_en
)
562 static const int intlv_shift_table
[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
565 BUG_ON(intlv_en
> 7);
566 n
= intlv_shift_table
[intlv_en
];
570 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
571 static u64
dram_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
573 struct amd64_pvt
*pvt
;
580 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
581 * concerning translating a DramAddr to an InputAddr.
583 intlv_shift
= num_node_interleave_bits(dram_intlv_en(pvt
, 0));
584 input_addr
= ((dram_addr
>> intlv_shift
) & GENMASK(12, 35)) +
587 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
588 intlv_shift
, (unsigned long)dram_addr
,
589 (unsigned long)input_addr
);
595 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
596 * assumed that @sys_addr maps to the node given by mci.
598 static u64
sys_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
603 dram_addr_to_input_addr(mci
, sys_addr_to_dram_addr(mci
, sys_addr
));
605 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
606 (unsigned long)sys_addr
, (unsigned long)input_addr
);
613 * @input_addr is an InputAddr associated with the node represented by mci.
614 * Translate @input_addr to a DramAddr and return the result.
616 static u64
input_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 input_addr
)
618 struct amd64_pvt
*pvt
;
619 unsigned node_id
, intlv_shift
;
624 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
625 * shows how to translate a DramAddr to an InputAddr. Here we reverse
626 * this procedure. When translating from a DramAddr to an InputAddr, the
627 * bits used for node interleaving are discarded. Here we recover these
628 * bits from the IntlvSel field of the DRAM Limit register (section
629 * 3.4.4.2) for the node that input_addr is associated with.
632 node_id
= pvt
->mc_node_id
;
636 intlv_shift
= num_node_interleave_bits(dram_intlv_en(pvt
, 0));
637 if (intlv_shift
== 0) {
638 edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
639 (unsigned long)input_addr
);
644 bits
= ((input_addr
& GENMASK(12, 35)) << intlv_shift
) +
645 (input_addr
& 0xfff);
647 intlv_sel
= dram_intlv_sel(pvt
, node_id
) & ((1 << intlv_shift
) - 1);
648 dram_addr
= bits
+ (intlv_sel
<< 12);
650 edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
651 (unsigned long)input_addr
,
652 (unsigned long)dram_addr
, intlv_shift
);
658 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
659 * @dram_addr to a SysAddr.
661 static u64
dram_addr_to_sys_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
663 struct amd64_pvt
*pvt
= mci
->pvt_info
;
664 u64 hole_base
, hole_offset
, hole_size
, base
, sys_addr
;
667 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
670 if ((dram_addr
>= hole_base
) &&
671 (dram_addr
< (hole_base
+ hole_size
))) {
672 sys_addr
= dram_addr
+ hole_offset
;
674 edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
675 (unsigned long)dram_addr
,
676 (unsigned long)sys_addr
);
682 base
= get_dram_base(pvt
, pvt
->mc_node_id
);
683 sys_addr
= dram_addr
+ base
;
686 * The sys_addr we have computed up to this point is a 40-bit value
687 * because the k8 deals with 40-bit values. However, the value we are
688 * supposed to return is a full 64-bit physical address. The AMD
689 * x86-64 architecture specifies that the most significant implemented
690 * address bit through bit 63 of a physical address must be either all
691 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
692 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
693 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
696 sys_addr
|= ~((sys_addr
& (1ull << 39)) - 1);
698 edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
699 pvt
->mc_node_id
, (unsigned long)dram_addr
,
700 (unsigned long)sys_addr
);
706 * @input_addr is an InputAddr associated with the node given by mci. Translate
707 * @input_addr to a SysAddr.
709 static inline u64
input_addr_to_sys_addr(struct mem_ctl_info
*mci
,
712 return dram_addr_to_sys_addr(mci
,
713 input_addr_to_dram_addr(mci
, input_addr
));
716 /* Map the Error address to a PAGE and PAGE OFFSET. */
717 static inline void error_address_to_page_and_offset(u64 error_address
,
718 u32
*page
, u32
*offset
)
720 *page
= (u32
) (error_address
>> PAGE_SHIFT
);
721 *offset
= ((u32
) error_address
) & ~PAGE_MASK
;
725 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
726 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
727 * of a node that detected an ECC memory error. mci represents the node that
728 * the error address maps to (possibly different from the node that detected
729 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
732 static int sys_addr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
)
736 csrow
= input_addr_to_csrow(mci
, sys_addr_to_input_addr(mci
, sys_addr
));
739 amd64_mc_err(mci
, "Failed to translate InputAddr to csrow for "
740 "address 0x%lx\n", (unsigned long)sys_addr
);
744 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*, u16
);
747 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
750 static unsigned long amd64_determine_edac_cap(struct amd64_pvt
*pvt
)
753 unsigned long edac_cap
= EDAC_FLAG_NONE
;
755 bit
= (boot_cpu_data
.x86
> 0xf || pvt
->ext_model
>= K8_REV_F
)
759 if (pvt
->dclr0
& BIT(bit
))
760 edac_cap
= EDAC_FLAG_SECDED
;
765 static void amd64_debug_display_dimm_sizes(struct amd64_pvt
*, u8
);
767 static void amd64_dump_dramcfg_low(u32 dclr
, int chan
)
769 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan
, dclr
);
771 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
772 (dclr
& BIT(16)) ? "un" : "",
773 (dclr
& BIT(19)) ? "yes" : "no");
775 edac_dbg(1, " PAR/ERR parity: %s\n",
776 (dclr
& BIT(8)) ? "enabled" : "disabled");
778 if (boot_cpu_data
.x86
== 0x10)
779 edac_dbg(1, " DCT 128bit mode width: %s\n",
780 (dclr
& BIT(11)) ? "128b" : "64b");
782 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
783 (dclr
& BIT(12)) ? "yes" : "no",
784 (dclr
& BIT(13)) ? "yes" : "no",
785 (dclr
& BIT(14)) ? "yes" : "no",
786 (dclr
& BIT(15)) ? "yes" : "no");
789 /* Display and decode various NB registers for debug purposes. */
790 static void dump_misc_regs(struct amd64_pvt
*pvt
)
792 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt
->nbcap
);
794 edac_dbg(1, " NB two channel DRAM capable: %s\n",
795 (pvt
->nbcap
& NBCAP_DCT_DUAL
) ? "yes" : "no");
797 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
798 (pvt
->nbcap
& NBCAP_SECDED
) ? "yes" : "no",
799 (pvt
->nbcap
& NBCAP_CHIPKILL
) ? "yes" : "no");
801 amd64_dump_dramcfg_low(pvt
->dclr0
, 0);
803 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt
->online_spare
);
805 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
806 pvt
->dhar
, dhar_base(pvt
),
807 (boot_cpu_data
.x86
== 0xf) ? k8_dhar_offset(pvt
)
808 : f10_dhar_offset(pvt
));
810 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt
) ? "yes" : "no");
812 amd64_debug_display_dimm_sizes(pvt
, 0);
814 /* everything below this point is Fam10h and above */
815 if (boot_cpu_data
.x86
== 0xf)
818 amd64_debug_display_dimm_sizes(pvt
, 1);
820 amd64_info("using %s syndromes.\n", ((pvt
->ecc_sym_sz
== 8) ? "x8" : "x4"));
822 /* Only if NOT ganged does dclr1 have valid info */
823 if (!dct_ganging_enabled(pvt
))
824 amd64_dump_dramcfg_low(pvt
->dclr1
, 1);
828 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
830 static void prep_chip_selects(struct amd64_pvt
*pvt
)
832 if (boot_cpu_data
.x86
== 0xf && pvt
->ext_model
< K8_REV_F
) {
833 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
834 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 8;
836 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
837 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 4;
842 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
844 static void read_dct_base_mask(struct amd64_pvt
*pvt
)
848 prep_chip_selects(pvt
);
850 for_each_chip_select(cs
, 0, pvt
) {
851 int reg0
= DCSB0
+ (cs
* 4);
852 int reg1
= DCSB1
+ (cs
* 4);
853 u32
*base0
= &pvt
->csels
[0].csbases
[cs
];
854 u32
*base1
= &pvt
->csels
[1].csbases
[cs
];
856 if (!amd64_read_dct_pci_cfg(pvt
, reg0
, base0
))
857 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
860 if (boot_cpu_data
.x86
== 0xf || dct_ganging_enabled(pvt
))
863 if (!amd64_read_dct_pci_cfg(pvt
, reg1
, base1
))
864 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
868 for_each_chip_select_mask(cs
, 0, pvt
) {
869 int reg0
= DCSM0
+ (cs
* 4);
870 int reg1
= DCSM1
+ (cs
* 4);
871 u32
*mask0
= &pvt
->csels
[0].csmasks
[cs
];
872 u32
*mask1
= &pvt
->csels
[1].csmasks
[cs
];
874 if (!amd64_read_dct_pci_cfg(pvt
, reg0
, mask0
))
875 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
878 if (boot_cpu_data
.x86
== 0xf || dct_ganging_enabled(pvt
))
881 if (!amd64_read_dct_pci_cfg(pvt
, reg1
, mask1
))
882 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
887 static enum mem_type
amd64_determine_memory_type(struct amd64_pvt
*pvt
, int cs
)
891 /* F15h supports only DDR3 */
892 if (boot_cpu_data
.x86
>= 0x15)
893 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
894 else if (boot_cpu_data
.x86
== 0x10 || pvt
->ext_model
>= K8_REV_F
) {
895 if (pvt
->dchr0
& DDR3_MODE
)
896 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
898 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR2
: MEM_RDDR2
;
900 type
= (pvt
->dclr0
& BIT(18)) ? MEM_DDR
: MEM_RDDR
;
903 amd64_info("CS%d: %s\n", cs
, edac_mem_types
[type
]);
908 /* Get the number of DCT channels the memory controller is using. */
909 static int k8_early_channel_count(struct amd64_pvt
*pvt
)
913 if (pvt
->ext_model
>= K8_REV_F
)
914 /* RevF (NPT) and later */
915 flag
= pvt
->dclr0
& WIDTH_128
;
917 /* RevE and earlier */
918 flag
= pvt
->dclr0
& REVE_WIDTH_128
;
923 return (flag
) ? 2 : 1;
926 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
927 static u64
get_error_address(struct mce
*m
)
929 struct cpuinfo_x86
*c
= &boot_cpu_data
;
939 addr
= m
->addr
& GENMASK(start_bit
, end_bit
);
942 * Erratum 637 workaround
944 if (c
->x86
== 0x15) {
945 struct amd64_pvt
*pvt
;
946 u64 cc6_base
, tmp_addr
;
948 u8 mce_nid
, intlv_en
;
950 if ((addr
& GENMASK(24, 47)) >> 24 != 0x00fdf7)
953 mce_nid
= amd_get_nb_id(m
->extcpu
);
954 pvt
= mcis
[mce_nid
]->pvt_info
;
956 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_LIM
, &tmp
);
957 intlv_en
= tmp
>> 21 & 0x7;
959 /* add [47:27] + 3 trailing bits */
960 cc6_base
= (tmp
& GENMASK(0, 20)) << 3;
962 /* reverse and add DramIntlvEn */
963 cc6_base
|= intlv_en
^ 0x7;
969 return cc6_base
| (addr
& GENMASK(0, 23));
971 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_BASE
, &tmp
);
974 tmp_addr
= (addr
& GENMASK(12, 23)) << __fls(intlv_en
+ 1);
976 /* OR DramIntlvSel into bits [14:12] */
977 tmp_addr
|= (tmp
& GENMASK(21, 23)) >> 9;
979 /* add remaining [11:0] bits from original MC4_ADDR */
980 tmp_addr
|= addr
& GENMASK(0, 11);
982 return cc6_base
| tmp_addr
;
988 static void read_dram_base_limit_regs(struct amd64_pvt
*pvt
, unsigned range
)
990 struct cpuinfo_x86
*c
= &boot_cpu_data
;
991 int off
= range
<< 3;
993 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_LO
+ off
, &pvt
->ranges
[range
].base
.lo
);
994 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_LO
+ off
, &pvt
->ranges
[range
].lim
.lo
);
999 if (!dram_rw(pvt
, range
))
1002 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_HI
+ off
, &pvt
->ranges
[range
].base
.hi
);
1003 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_HI
+ off
, &pvt
->ranges
[range
].lim
.hi
);
1005 /* Factor in CC6 save area by reading dst node's limit reg */
1006 if (c
->x86
== 0x15) {
1007 struct pci_dev
*f1
= NULL
;
1008 u8 nid
= dram_dst_node(pvt
, range
);
1011 f1
= pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid
, 1));
1015 amd64_read_pci_cfg(f1
, DRAM_LOCAL_NODE_LIM
, &llim
);
1017 pvt
->ranges
[range
].lim
.lo
&= GENMASK(0, 15);
1019 /* {[39:27],111b} */
1020 pvt
->ranges
[range
].lim
.lo
|= ((llim
& 0x1fff) << 3 | 0x7) << 16;
1022 pvt
->ranges
[range
].lim
.hi
&= GENMASK(0, 7);
1025 pvt
->ranges
[range
].lim
.hi
|= llim
>> 13;
1031 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
1034 struct mem_ctl_info
*src_mci
;
1035 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1039 error_address_to_page_and_offset(sys_addr
, &page
, &offset
);
1042 * Find out which node the error address belongs to. This may be
1043 * different from the node that detected the error.
1045 src_mci
= find_mc_by_sys_addr(mci
, sys_addr
);
1047 amd64_mc_err(mci
, "failed to map error addr 0x%lx to a node\n",
1048 (unsigned long)sys_addr
);
1049 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
,
1050 page
, offset
, syndrome
,
1052 "failed to map error addr to a node",
1058 /* Now map the sys_addr to a CSROW */
1059 csrow
= sys_addr_to_csrow(src_mci
, sys_addr
);
1061 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
,
1062 page
, offset
, syndrome
,
1064 "failed to map error addr to a csrow",
1070 /* CHIPKILL enabled */
1071 if (pvt
->nbcfg
& NBCFG_CHIPKILL
) {
1072 channel
= get_channel_from_ecc_syndrome(mci
, syndrome
);
1075 * Syndrome didn't map, so we don't know which of the
1076 * 2 DIMMs is in error. So we need to ID 'both' of them
1079 amd64_mc_warn(src_mci
, "unknown syndrome 0x%04x - "
1080 "possible error reporting race\n",
1082 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
,
1083 page
, offset
, syndrome
,
1085 "unknown syndrome - possible error reporting race",
1092 * non-chipkill ecc mode
1094 * The k8 documentation is unclear about how to determine the
1095 * channel number when using non-chipkill memory. This method
1096 * was obtained from email communication with someone at AMD.
1097 * (Wish the email was placed in this comment - norsk)
1099 channel
= ((sys_addr
& BIT(3)) != 0);
1102 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, src_mci
,
1103 page
, offset
, syndrome
,
1108 static int ddr2_cs_size(unsigned i
, bool dct_width
)
1114 else if (!(i
& 0x1))
1117 shift
= (i
+ 1) >> 1;
1119 return 128 << (shift
+ !!dct_width
);
1122 static int k8_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1125 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1127 if (pvt
->ext_model
>= K8_REV_F
) {
1128 WARN_ON(cs_mode
> 11);
1129 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1131 else if (pvt
->ext_model
>= K8_REV_D
) {
1133 WARN_ON(cs_mode
> 10);
1136 * the below calculation, besides trying to win an obfuscated C
1137 * contest, maps cs_mode values to DIMM chip select sizes. The
1140 * cs_mode CS size (mb)
1141 * ======= ============
1154 * Basically, it calculates a value with which to shift the
1155 * smallest CS size of 32MB.
1157 * ddr[23]_cs_size have a similar purpose.
1159 diff
= cs_mode
/3 + (unsigned)(cs_mode
> 5);
1161 return 32 << (cs_mode
- diff
);
1164 WARN_ON(cs_mode
> 6);
1165 return 32 << cs_mode
;
1170 * Get the number of DCT channels in use.
1173 * number of Memory Channels in operation
1175 * contents of the DCL0_LOW register
1177 static int f1x_early_channel_count(struct amd64_pvt
*pvt
)
1179 int i
, j
, channels
= 0;
1181 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1182 if (boot_cpu_data
.x86
== 0x10 && (pvt
->dclr0
& WIDTH_128
))
1186 * Need to check if in unganged mode: In such, there are 2 channels,
1187 * but they are not in 128 bit mode and thus the above 'dclr0' status
1190 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1191 * their CSEnable bit on. If so, then SINGLE DIMM case.
1193 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1196 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1197 * is more than just one DIMM present in unganged mode. Need to check
1198 * both controllers since DIMMs can be placed in either one.
1200 for (i
= 0; i
< 2; i
++) {
1201 u32 dbam
= (i
? pvt
->dbam1
: pvt
->dbam0
);
1203 for (j
= 0; j
< 4; j
++) {
1204 if (DBAM_DIMM(j
, dbam
) > 0) {
1214 amd64_info("MCT channel count: %d\n", channels
);
1219 static int ddr3_cs_size(unsigned i
, bool dct_width
)
1224 if (i
== 0 || i
== 3 || i
== 4)
1230 else if (!(i
& 0x1))
1233 shift
= (i
+ 1) >> 1;
1236 cs_size
= (128 * (1 << !!dct_width
)) << shift
;
1241 static int f10_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1244 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1246 WARN_ON(cs_mode
> 11);
1248 if (pvt
->dchr0
& DDR3_MODE
|| pvt
->dchr1
& DDR3_MODE
)
1249 return ddr3_cs_size(cs_mode
, dclr
& WIDTH_128
);
1251 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1255 * F15h supports only 64bit DCT interfaces
1257 static int f15_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1260 WARN_ON(cs_mode
> 12);
1262 return ddr3_cs_size(cs_mode
, false);
1265 static void read_dram_ctl_register(struct amd64_pvt
*pvt
)
1268 if (boot_cpu_data
.x86
== 0xf)
1271 if (!amd64_read_dct_pci_cfg(pvt
, DCT_SEL_LO
, &pvt
->dct_sel_lo
)) {
1272 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1273 pvt
->dct_sel_lo
, dct_sel_baseaddr(pvt
));
1275 edac_dbg(0, " DCTs operate in %s mode\n",
1276 (dct_ganging_enabled(pvt
) ? "ganged" : "unganged"));
1278 if (!dct_ganging_enabled(pvt
))
1279 edac_dbg(0, " Address range split per DCT: %s\n",
1280 (dct_high_range_enabled(pvt
) ? "yes" : "no"));
1282 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1283 (dct_data_intlv_enabled(pvt
) ? "enabled" : "disabled"),
1284 (dct_memory_cleared(pvt
) ? "yes" : "no"));
1286 edac_dbg(0, " channel interleave: %s, "
1287 "interleave bits selector: 0x%x\n",
1288 (dct_interleave_enabled(pvt
) ? "enabled" : "disabled"),
1289 dct_sel_interleave_addr(pvt
));
1292 amd64_read_dct_pci_cfg(pvt
, DCT_SEL_HI
, &pvt
->dct_sel_hi
);
1296 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1297 * Interleaving Modes.
1299 static u8
f1x_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1300 bool hi_range_sel
, u8 intlv_en
)
1302 u8 dct_sel_high
= (pvt
->dct_sel_lo
>> 1) & 1;
1304 if (dct_ganging_enabled(pvt
))
1308 return dct_sel_high
;
1311 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1313 if (dct_interleave_enabled(pvt
)) {
1314 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1316 /* return DCT select function: 0=DCT0, 1=DCT1 */
1318 return sys_addr
>> 6 & 1;
1320 if (intlv_addr
& 0x2) {
1321 u8 shift
= intlv_addr
& 0x1 ? 9 : 6;
1322 u32 temp
= hweight_long((u32
) ((sys_addr
>> 16) & 0x1F)) % 2;
1324 return ((sys_addr
>> shift
) & 1) ^ temp
;
1327 return (sys_addr
>> (12 + hweight8(intlv_en
))) & 1;
1330 if (dct_high_range_enabled(pvt
))
1331 return ~dct_sel_high
& 1;
1336 /* Convert the sys_addr to the normalized DCT address */
1337 static u64
f1x_get_norm_dct_addr(struct amd64_pvt
*pvt
, unsigned range
,
1338 u64 sys_addr
, bool hi_rng
,
1339 u32 dct_sel_base_addr
)
1342 u64 dram_base
= get_dram_base(pvt
, range
);
1343 u64 hole_off
= f10_dhar_offset(pvt
);
1344 u64 dct_sel_base_off
= (pvt
->dct_sel_hi
& 0xFFFFFC00) << 16;
1349 * base address of high range is below 4Gb
1350 * (bits [47:27] at [31:11])
1351 * DRAM address space on this DCT is hoisted above 4Gb &&
1354 * remove hole offset from sys_addr
1356 * remove high range offset from sys_addr
1358 if ((!(dct_sel_base_addr
>> 16) ||
1359 dct_sel_base_addr
< dhar_base(pvt
)) &&
1361 (sys_addr
>= BIT_64(32)))
1362 chan_off
= hole_off
;
1364 chan_off
= dct_sel_base_off
;
1368 * we have a valid hole &&
1373 * remove dram base to normalize to DCT address
1375 if (dhar_valid(pvt
) && (sys_addr
>= BIT_64(32)))
1376 chan_off
= hole_off
;
1378 chan_off
= dram_base
;
1381 return (sys_addr
& GENMASK(6,47)) - (chan_off
& GENMASK(23,47));
1385 * checks if the csrow passed in is marked as SPARED, if so returns the new
1388 static int f10_process_possible_spare(struct amd64_pvt
*pvt
, u8 dct
, int csrow
)
1392 if (online_spare_swap_done(pvt
, dct
) &&
1393 csrow
== online_spare_bad_dramcs(pvt
, dct
)) {
1395 for_each_chip_select(tmp_cs
, dct
, pvt
) {
1396 if (chip_select_base(tmp_cs
, dct
, pvt
) & 0x2) {
1406 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1407 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1410 * -EINVAL: NOT FOUND
1411 * 0..csrow = Chip-Select Row
1413 static int f1x_lookup_addr_in_dct(u64 in_addr
, u32 nid
, u8 dct
)
1415 struct mem_ctl_info
*mci
;
1416 struct amd64_pvt
*pvt
;
1417 u64 cs_base
, cs_mask
;
1418 int cs_found
= -EINVAL
;
1425 pvt
= mci
->pvt_info
;
1427 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr
, dct
);
1429 for_each_chip_select(csrow
, dct
, pvt
) {
1430 if (!csrow_enabled(csrow
, dct
, pvt
))
1433 get_cs_base_and_mask(pvt
, csrow
, dct
, &cs_base
, &cs_mask
);
1435 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1436 csrow
, cs_base
, cs_mask
);
1440 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1441 (in_addr
& cs_mask
), (cs_base
& cs_mask
));
1443 if ((in_addr
& cs_mask
) == (cs_base
& cs_mask
)) {
1444 cs_found
= f10_process_possible_spare(pvt
, dct
, csrow
);
1446 edac_dbg(1, " MATCH csrow=%d\n", cs_found
);
1454 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1455 * swapped with a region located at the bottom of memory so that the GPU can use
1456 * the interleaved region and thus two channels.
1458 static u64
f1x_swap_interleaved_region(struct amd64_pvt
*pvt
, u64 sys_addr
)
1460 u32 swap_reg
, swap_base
, swap_limit
, rgn_size
, tmp_addr
;
1462 if (boot_cpu_data
.x86
== 0x10) {
1463 /* only revC3 and revE have that feature */
1464 if (boot_cpu_data
.x86_model
< 4 ||
1465 (boot_cpu_data
.x86_model
< 0xa &&
1466 boot_cpu_data
.x86_mask
< 3))
1470 amd64_read_dct_pci_cfg(pvt
, SWAP_INTLV_REG
, &swap_reg
);
1472 if (!(swap_reg
& 0x1))
1475 swap_base
= (swap_reg
>> 3) & 0x7f;
1476 swap_limit
= (swap_reg
>> 11) & 0x7f;
1477 rgn_size
= (swap_reg
>> 20) & 0x7f;
1478 tmp_addr
= sys_addr
>> 27;
1480 if (!(sys_addr
>> 34) &&
1481 (((tmp_addr
>= swap_base
) &&
1482 (tmp_addr
<= swap_limit
)) ||
1483 (tmp_addr
< rgn_size
)))
1484 return sys_addr
^ (u64
)swap_base
<< 27;
1489 /* For a given @dram_range, check if @sys_addr falls within it. */
1490 static int f1x_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1491 u64 sys_addr
, int *nid
, int *chan_sel
)
1493 int cs_found
= -EINVAL
;
1497 bool high_range
= false;
1499 u8 node_id
= dram_dst_node(pvt
, range
);
1500 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1501 u32 intlv_sel
= dram_intlv_sel(pvt
, range
);
1503 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1504 range
, sys_addr
, get_dram_limit(pvt
, range
));
1506 if (dhar_valid(pvt
) &&
1507 dhar_base(pvt
) <= sys_addr
&&
1508 sys_addr
< BIT_64(32)) {
1509 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1514 if (intlv_en
&& (intlv_sel
!= ((sys_addr
>> 12) & intlv_en
)))
1517 sys_addr
= f1x_swap_interleaved_region(pvt
, sys_addr
);
1519 dct_sel_base
= dct_sel_baseaddr(pvt
);
1522 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1523 * select between DCT0 and DCT1.
1525 if (dct_high_range_enabled(pvt
) &&
1526 !dct_ganging_enabled(pvt
) &&
1527 ((sys_addr
>> 27) >= (dct_sel_base
>> 11)))
1530 channel
= f1x_determine_channel(pvt
, sys_addr
, high_range
, intlv_en
);
1532 chan_addr
= f1x_get_norm_dct_addr(pvt
, range
, sys_addr
,
1533 high_range
, dct_sel_base
);
1535 /* Remove node interleaving, see F1x120 */
1537 chan_addr
= ((chan_addr
>> (12 + hweight8(intlv_en
))) << 12) |
1538 (chan_addr
& 0xfff);
1540 /* remove channel interleave */
1541 if (dct_interleave_enabled(pvt
) &&
1542 !dct_high_range_enabled(pvt
) &&
1543 !dct_ganging_enabled(pvt
)) {
1545 if (dct_sel_interleave_addr(pvt
) != 1) {
1546 if (dct_sel_interleave_addr(pvt
) == 0x3)
1548 chan_addr
= ((chan_addr
>> 10) << 9) |
1549 (chan_addr
& 0x1ff);
1551 /* A[6] or hash 6 */
1552 chan_addr
= ((chan_addr
>> 7) << 6) |
1556 chan_addr
= ((chan_addr
>> 13) << 12) |
1557 (chan_addr
& 0xfff);
1560 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1562 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, channel
);
1564 if (cs_found
>= 0) {
1566 *chan_sel
= channel
;
1571 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt
*pvt
, u64 sys_addr
,
1572 int *node
, int *chan_sel
)
1574 int cs_found
= -EINVAL
;
1577 for (range
= 0; range
< DRAM_RANGES
; range
++) {
1579 if (!dram_rw(pvt
, range
))
1582 if ((get_dram_base(pvt
, range
) <= sys_addr
) &&
1583 (get_dram_limit(pvt
, range
) >= sys_addr
)) {
1585 cs_found
= f1x_match_to_this_node(pvt
, range
,
1596 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1597 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1599 * The @sys_addr is usually an error address received from the hardware
1602 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
1605 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1607 int nid
, csrow
, chan
= 0;
1609 error_address_to_page_and_offset(sys_addr
, &page
, &offset
);
1611 csrow
= f1x_translate_sysaddr_to_cs(pvt
, sys_addr
, &nid
, &chan
);
1614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
,
1615 page
, offset
, syndrome
,
1617 "failed to map error addr to a csrow",
1624 * We need the syndromes for channel detection only when we're
1625 * ganged. Otherwise @chan should already contain the channel at
1628 if (dct_ganging_enabled(pvt
))
1629 chan
= get_channel_from_ecc_syndrome(mci
, syndrome
);
1631 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
,
1632 page
, offset
, syndrome
,
1638 * debug routine to display the memory sizes of all logical DIMMs and its
1641 static void amd64_debug_display_dimm_sizes(struct amd64_pvt
*pvt
, u8 ctrl
)
1643 int dimm
, size0
, size1
, factor
= 0;
1644 u32
*dcsb
= ctrl
? pvt
->csels
[1].csbases
: pvt
->csels
[0].csbases
;
1645 u32 dbam
= ctrl
? pvt
->dbam1
: pvt
->dbam0
;
1647 if (boot_cpu_data
.x86
== 0xf) {
1648 if (pvt
->dclr0
& WIDTH_128
)
1651 /* K8 families < revF not supported yet */
1652 if (pvt
->ext_model
< K8_REV_F
)
1658 dbam
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->dbam1
: pvt
->dbam0
;
1659 dcsb
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->csels
[1].csbases
1660 : pvt
->csels
[0].csbases
;
1662 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1665 edac_printk(KERN_DEBUG
, EDAC_MC
, "DCT%d chip selects:\n", ctrl
);
1667 /* Dump memory sizes for DIMM and its CSROWs */
1668 for (dimm
= 0; dimm
< 4; dimm
++) {
1671 if (dcsb
[dimm
*2] & DCSB_CS_ENABLE
)
1672 size0
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1673 DBAM_DIMM(dimm
, dbam
));
1676 if (dcsb
[dimm
*2 + 1] & DCSB_CS_ENABLE
)
1677 size1
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
1678 DBAM_DIMM(dimm
, dbam
));
1680 amd64_info(EDAC_MC
": %d: %5dMB %d: %5dMB\n",
1681 dimm
* 2, size0
<< factor
,
1682 dimm
* 2 + 1, size1
<< factor
);
1686 static struct amd64_family_type amd64_family_types
[] = {
1689 .f1_id
= PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP
,
1690 .f3_id
= PCI_DEVICE_ID_AMD_K8_NB_MISC
,
1692 .early_channel_count
= k8_early_channel_count
,
1693 .map_sysaddr_to_csrow
= k8_map_sysaddr_to_csrow
,
1694 .dbam_to_cs
= k8_dbam_to_chip_select
,
1695 .read_dct_pci_cfg
= k8_read_dct_pci_cfg
,
1700 .f1_id
= PCI_DEVICE_ID_AMD_10H_NB_MAP
,
1701 .f3_id
= PCI_DEVICE_ID_AMD_10H_NB_MISC
,
1703 .early_channel_count
= f1x_early_channel_count
,
1704 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1705 .dbam_to_cs
= f10_dbam_to_chip_select
,
1706 .read_dct_pci_cfg
= f10_read_dct_pci_cfg
,
1711 .f1_id
= PCI_DEVICE_ID_AMD_15H_NB_F1
,
1712 .f3_id
= PCI_DEVICE_ID_AMD_15H_NB_F3
,
1714 .early_channel_count
= f1x_early_channel_count
,
1715 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
1716 .dbam_to_cs
= f15_dbam_to_chip_select
,
1717 .read_dct_pci_cfg
= f15_read_dct_pci_cfg
,
1722 static struct pci_dev
*pci_get_related_function(unsigned int vendor
,
1723 unsigned int device
,
1724 struct pci_dev
*related
)
1726 struct pci_dev
*dev
= NULL
;
1728 dev
= pci_get_device(vendor
, device
, dev
);
1730 if ((dev
->bus
->number
== related
->bus
->number
) &&
1731 (PCI_SLOT(dev
->devfn
) == PCI_SLOT(related
->devfn
)))
1733 dev
= pci_get_device(vendor
, device
, dev
);
1740 * These are tables of eigenvectors (one per line) which can be used for the
1741 * construction of the syndrome tables. The modified syndrome search algorithm
1742 * uses those to find the symbol in error and thus the DIMM.
1744 * Algorithm courtesy of Ross LaFetra from AMD.
1746 static u16 x4_vectors
[] = {
1747 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1748 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1749 0x0001, 0x0002, 0x0004, 0x0008,
1750 0x1013, 0x3032, 0x4044, 0x8088,
1751 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1752 0x4857, 0xc4fe, 0x13cc, 0x3288,
1753 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1754 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1755 0x15c1, 0x2a42, 0x89ac, 0x4758,
1756 0x2b03, 0x1602, 0x4f0c, 0xca08,
1757 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1758 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1759 0x2b87, 0x164e, 0x642c, 0xdc18,
1760 0x40b9, 0x80de, 0x1094, 0x20e8,
1761 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1762 0x11c1, 0x2242, 0x84ac, 0x4c58,
1763 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1764 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1765 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1766 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1767 0x16b3, 0x3d62, 0x4f34, 0x8518,
1768 0x1e2f, 0x391a, 0x5cac, 0xf858,
1769 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1770 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1771 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1772 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1773 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1774 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1775 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1776 0x185d, 0x2ca6, 0x7914, 0x9e28,
1777 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1778 0x4199, 0x82ee, 0x19f4, 0x2e58,
1779 0x4807, 0xc40e, 0x130c, 0x3208,
1780 0x1905, 0x2e0a, 0x5804, 0xac08,
1781 0x213f, 0x132a, 0xadfc, 0x5ba8,
1782 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1785 static u16 x8_vectors
[] = {
1786 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1787 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1788 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1789 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1790 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1791 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1792 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1793 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1794 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1795 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1796 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1797 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1798 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1799 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1800 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1801 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1802 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1803 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1804 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1807 static int decode_syndrome(u16 syndrome
, u16
*vectors
, unsigned num_vecs
,
1810 unsigned int i
, err_sym
;
1812 for (err_sym
= 0; err_sym
< num_vecs
/ v_dim
; err_sym
++) {
1814 unsigned v_idx
= err_sym
* v_dim
;
1815 unsigned v_end
= (err_sym
+ 1) * v_dim
;
1817 /* walk over all 16 bits of the syndrome */
1818 for (i
= 1; i
< (1U << 16); i
<<= 1) {
1820 /* if bit is set in that eigenvector... */
1821 if (v_idx
< v_end
&& vectors
[v_idx
] & i
) {
1822 u16 ev_comp
= vectors
[v_idx
++];
1824 /* ... and bit set in the modified syndrome, */
1834 /* can't get to zero, move to next symbol */
1839 edac_dbg(0, "syndrome(%x) not found\n", syndrome
);
1843 static int map_err_sym_to_channel(int err_sym
, int sym_size
)
1856 return err_sym
>> 4;
1862 /* imaginary bits not in a DIMM */
1864 WARN(1, KERN_ERR
"Invalid error symbol: 0x%x\n",
1876 return err_sym
>> 3;
1882 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*mci
, u16 syndrome
)
1884 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1887 if (pvt
->ecc_sym_sz
== 8)
1888 err_sym
= decode_syndrome(syndrome
, x8_vectors
,
1889 ARRAY_SIZE(x8_vectors
),
1891 else if (pvt
->ecc_sym_sz
== 4)
1892 err_sym
= decode_syndrome(syndrome
, x4_vectors
,
1893 ARRAY_SIZE(x4_vectors
),
1896 amd64_warn("Illegal syndrome type: %u\n", pvt
->ecc_sym_sz
);
1900 return map_err_sym_to_channel(err_sym
, pvt
->ecc_sym_sz
);
1904 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1905 * ADDRESS and process.
1907 static void amd64_handle_ce(struct mem_ctl_info
*mci
, struct mce
*m
)
1909 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1913 /* Ensure that the Error Address is VALID */
1914 if (!(m
->status
& MCI_STATUS_ADDRV
)) {
1915 amd64_mc_err(mci
, "HW has no ERROR_ADDRESS available\n");
1916 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED
, mci
,
1919 "HW has no ERROR_ADDRESS available",
1925 sys_addr
= get_error_address(m
);
1926 syndrome
= extract_syndrome(m
->status
);
1928 amd64_mc_err(mci
, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr
);
1930 pvt
->ops
->map_sysaddr_to_csrow(mci
, sys_addr
, syndrome
);
1933 /* Handle any Un-correctable Errors (UEs) */
1934 static void amd64_handle_ue(struct mem_ctl_info
*mci
, struct mce
*m
)
1936 struct mem_ctl_info
*log_mci
, *src_mci
= NULL
;
1943 if (!(m
->status
& MCI_STATUS_ADDRV
)) {
1944 amd64_mc_err(mci
, "HW has no ERROR_ADDRESS available\n");
1945 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED
, mci
,
1948 "HW has no ERROR_ADDRESS available",
1954 sys_addr
= get_error_address(m
);
1955 error_address_to_page_and_offset(sys_addr
, &page
, &offset
);
1958 * Find out which node the error address belongs to. This may be
1959 * different from the node that detected the error.
1961 src_mci
= find_mc_by_sys_addr(mci
, sys_addr
);
1963 amd64_mc_err(mci
, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1964 (unsigned long)sys_addr
);
1965 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED
, mci
,
1968 "ERROR ADDRESS NOT mapped to a MC",
1976 csrow
= sys_addr_to_csrow(log_mci
, sys_addr
);
1978 amd64_mc_err(mci
, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1979 (unsigned long)sys_addr
);
1980 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED
, mci
,
1983 "ERROR ADDRESS NOT mapped to CS",
1987 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED
, mci
,
1994 static inline void __amd64_decode_bus_error(struct mem_ctl_info
*mci
,
1997 u16 ec
= EC(m
->status
);
1998 u8 xec
= XEC(m
->status
, 0x1f);
1999 u8 ecc_type
= (m
->status
>> 45) & 0x3;
2001 /* Bail early out if this was an 'observed' error */
2002 if (PP(ec
) == NBSL_PP_OBS
)
2005 /* Do only ECC errors */
2006 if (xec
&& xec
!= F10_NBSL_EXT_ERR_ECC
)
2010 amd64_handle_ce(mci
, m
);
2011 else if (ecc_type
== 1)
2012 amd64_handle_ue(mci
, m
);
2015 void amd64_decode_bus_error(int node_id
, struct mce
*m
)
2017 __amd64_decode_bus_error(mcis
[node_id
], m
);
2021 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2022 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2024 static int reserve_mc_sibling_devs(struct amd64_pvt
*pvt
, u16 f1_id
, u16 f3_id
)
2026 /* Reserve the ADDRESS MAP Device */
2027 pvt
->F1
= pci_get_related_function(pvt
->F2
->vendor
, f1_id
, pvt
->F2
);
2029 amd64_err("error address map device not found: "
2030 "vendor %x device 0x%x (broken BIOS?)\n",
2031 PCI_VENDOR_ID_AMD
, f1_id
);
2035 /* Reserve the MISC Device */
2036 pvt
->F3
= pci_get_related_function(pvt
->F2
->vendor
, f3_id
, pvt
->F2
);
2038 pci_dev_put(pvt
->F1
);
2041 amd64_err("error F3 device not found: "
2042 "vendor %x device 0x%x (broken BIOS?)\n",
2043 PCI_VENDOR_ID_AMD
, f3_id
);
2047 edac_dbg(1, "F1: %s\n", pci_name(pvt
->F1
));
2048 edac_dbg(1, "F2: %s\n", pci_name(pvt
->F2
));
2049 edac_dbg(1, "F3: %s\n", pci_name(pvt
->F3
));
2054 static void free_mc_sibling_devs(struct amd64_pvt
*pvt
)
2056 pci_dev_put(pvt
->F1
);
2057 pci_dev_put(pvt
->F3
);
2061 * Retrieve the hardware registers of the memory controller (this includes the
2062 * 'Address Map' and 'Misc' device regs)
2064 static void read_mc_regs(struct amd64_pvt
*pvt
)
2066 struct cpuinfo_x86
*c
= &boot_cpu_data
;
2072 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2073 * those are Read-As-Zero
2075 rdmsrl(MSR_K8_TOP_MEM1
, pvt
->top_mem
);
2076 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt
->top_mem
);
2078 /* check first whether TOP_MEM2 is enabled */
2079 rdmsrl(MSR_K8_SYSCFG
, msr_val
);
2080 if (msr_val
& (1U << 21)) {
2081 rdmsrl(MSR_K8_TOP_MEM2
, pvt
->top_mem2
);
2082 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt
->top_mem2
);
2084 edac_dbg(0, " TOP_MEM2 disabled\n");
2086 amd64_read_pci_cfg(pvt
->F3
, NBCAP
, &pvt
->nbcap
);
2088 read_dram_ctl_register(pvt
);
2090 for (range
= 0; range
< DRAM_RANGES
; range
++) {
2093 /* read settings for this DRAM range */
2094 read_dram_base_limit_regs(pvt
, range
);
2096 rw
= dram_rw(pvt
, range
);
2100 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2102 get_dram_base(pvt
, range
),
2103 get_dram_limit(pvt
, range
));
2105 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2106 dram_intlv_en(pvt
, range
) ? "Enabled" : "Disabled",
2107 (rw
& 0x1) ? "R" : "-",
2108 (rw
& 0x2) ? "W" : "-",
2109 dram_intlv_sel(pvt
, range
),
2110 dram_dst_node(pvt
, range
));
2113 read_dct_base_mask(pvt
);
2115 amd64_read_pci_cfg(pvt
->F1
, DHAR
, &pvt
->dhar
);
2116 amd64_read_dct_pci_cfg(pvt
, DBAM0
, &pvt
->dbam0
);
2118 amd64_read_pci_cfg(pvt
->F3
, F10_ONLINE_SPARE
, &pvt
->online_spare
);
2120 amd64_read_dct_pci_cfg(pvt
, DCLR0
, &pvt
->dclr0
);
2121 amd64_read_dct_pci_cfg(pvt
, DCHR0
, &pvt
->dchr0
);
2123 if (!dct_ganging_enabled(pvt
)) {
2124 amd64_read_dct_pci_cfg(pvt
, DCLR1
, &pvt
->dclr1
);
2125 amd64_read_dct_pci_cfg(pvt
, DCHR1
, &pvt
->dchr1
);
2128 pvt
->ecc_sym_sz
= 4;
2130 if (c
->x86
>= 0x10) {
2131 amd64_read_pci_cfg(pvt
->F3
, EXT_NB_MCA_CFG
, &tmp
);
2132 amd64_read_dct_pci_cfg(pvt
, DBAM1
, &pvt
->dbam1
);
2134 /* F10h, revD and later can do x8 ECC too */
2135 if ((c
->x86
> 0x10 || c
->x86_model
> 7) && tmp
& BIT(25))
2136 pvt
->ecc_sym_sz
= 8;
2138 dump_misc_regs(pvt
);
2142 * NOTE: CPU Revision Dependent code
2145 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2146 * k8 private pointer to -->
2147 * DRAM Bank Address mapping register
2149 * DCL register where dual_channel_active is
2151 * The DBAM register consists of 4 sets of 4 bits each definitions:
2154 * 0-3 CSROWs 0 and 1
2155 * 4-7 CSROWs 2 and 3
2156 * 8-11 CSROWs 4 and 5
2157 * 12-15 CSROWs 6 and 7
2159 * Values range from: 0 to 15
2160 * The meaning of the values depends on CPU revision and dual-channel state,
2161 * see relevant BKDG more info.
2163 * The memory controller provides for total of only 8 CSROWs in its current
2164 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2165 * single channel or two (2) DIMMs in dual channel mode.
2167 * The following code logic collapses the various tables for CSROW based on CPU
2171 * The number of PAGE_SIZE pages on the specified CSROW number it
2175 static u32
amd64_csrow_nr_pages(struct amd64_pvt
*pvt
, u8 dct
, int csrow_nr
)
2177 u32 cs_mode
, nr_pages
;
2178 u32 dbam
= dct
? pvt
->dbam1
: pvt
->dbam0
;
2181 * The math on this doesn't look right on the surface because x/2*4 can
2182 * be simplified to x*2 but this expression makes use of the fact that
2183 * it is integral math where 1/2=0. This intermediate value becomes the
2184 * number of bits to shift the DBAM register to extract the proper CSROW
2187 cs_mode
= (dbam
>> ((csrow_nr
/ 2) * 4)) & 0xF;
2189 nr_pages
= pvt
->ops
->dbam_to_cs(pvt
, dct
, cs_mode
) << (20 - PAGE_SHIFT
);
2191 edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr
, cs_mode
);
2192 edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
2193 nr_pages
, pvt
->channel_count
);
2199 * Initialize the array of csrow attribute instances, based on the values
2200 * from pci config hardware registers.
2202 static int init_csrows(struct mem_ctl_info
*mci
)
2204 struct csrow_info
*csrow
;
2205 struct dimm_info
*dimm
;
2206 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2209 int i
, j
, empty
= 1;
2210 enum mem_type mtype
;
2211 enum edac_type edac_mode
;
2214 amd64_read_pci_cfg(pvt
->F3
, NBCFG
, &val
);
2218 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2219 pvt
->mc_node_id
, val
,
2220 !!(val
& NBCFG_CHIPKILL
), !!(val
& NBCFG_ECC_ENABLE
));
2222 for_each_chip_select(i
, 0, pvt
) {
2223 csrow
= mci
->csrows
[i
];
2225 if (!csrow_enabled(i
, 0, pvt
) && !csrow_enabled(i
, 1, pvt
)) {
2226 edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
2227 i
, pvt
->mc_node_id
);
2232 if (csrow_enabled(i
, 0, pvt
))
2233 nr_pages
= amd64_csrow_nr_pages(pvt
, 0, i
);
2234 if (csrow_enabled(i
, 1, pvt
))
2235 nr_pages
+= amd64_csrow_nr_pages(pvt
, 1, i
);
2237 get_cs_base_and_mask(pvt
, i
, 0, &base
, &mask
);
2238 /* 8 bytes of resolution */
2240 mtype
= amd64_determine_memory_type(pvt
, i
);
2242 edac_dbg(1, " for MC node %d csrow %d:\n", pvt
->mc_node_id
, i
);
2243 edac_dbg(1, " nr_pages: %u\n",
2244 nr_pages
* pvt
->channel_count
);
2247 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2249 if (pvt
->nbcfg
& NBCFG_ECC_ENABLE
)
2250 edac_mode
= (pvt
->nbcfg
& NBCFG_CHIPKILL
) ?
2251 EDAC_S4ECD4ED
: EDAC_SECDED
;
2253 edac_mode
= EDAC_NONE
;
2255 for (j
= 0; j
< pvt
->channel_count
; j
++) {
2256 dimm
= csrow
->channels
[j
]->dimm
;
2257 dimm
->mtype
= mtype
;
2258 dimm
->edac_mode
= edac_mode
;
2259 dimm
->nr_pages
= nr_pages
;
2266 /* get all cores on this DCT */
2267 static void get_cpus_on_this_dct_cpumask(struct cpumask
*mask
, unsigned nid
)
2271 for_each_online_cpu(cpu
)
2272 if (amd_get_nb_id(cpu
) == nid
)
2273 cpumask_set_cpu(cpu
, mask
);
2276 /* check MCG_CTL on all the cpus on this node */
2277 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid
)
2283 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
)) {
2284 amd64_warn("%s: Error allocating mask\n", __func__
);
2288 get_cpus_on_this_dct_cpumask(mask
, nid
);
2290 rdmsr_on_cpus(mask
, MSR_IA32_MCG_CTL
, msrs
);
2292 for_each_cpu(cpu
, mask
) {
2293 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2294 nbe
= reg
->l
& MSR_MCGCTL_NBE
;
2296 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2298 (nbe
? "enabled" : "disabled"));
2306 free_cpumask_var(mask
);
2310 static int toggle_ecc_err_reporting(struct ecc_settings
*s
, u8 nid
, bool on
)
2312 cpumask_var_t cmask
;
2315 if (!zalloc_cpumask_var(&cmask
, GFP_KERNEL
)) {
2316 amd64_warn("%s: error allocating mask\n", __func__
);
2320 get_cpus_on_this_dct_cpumask(cmask
, nid
);
2322 rdmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2324 for_each_cpu(cpu
, cmask
) {
2326 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2329 if (reg
->l
& MSR_MCGCTL_NBE
)
2330 s
->flags
.nb_mce_enable
= 1;
2332 reg
->l
|= MSR_MCGCTL_NBE
;
2335 * Turn off NB MCE reporting only when it was off before
2337 if (!s
->flags
.nb_mce_enable
)
2338 reg
->l
&= ~MSR_MCGCTL_NBE
;
2341 wrmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2343 free_cpumask_var(cmask
);
2348 static bool enable_ecc_error_reporting(struct ecc_settings
*s
, u8 nid
,
2352 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2354 if (toggle_ecc_err_reporting(s
, nid
, ON
)) {
2355 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2359 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2361 s
->old_nbctl
= value
& mask
;
2362 s
->nbctl_valid
= true;
2365 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2367 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2369 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2370 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2372 if (!(value
& NBCFG_ECC_ENABLE
)) {
2373 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2375 s
->flags
.nb_ecc_prev
= 0;
2377 /* Attempt to turn on DRAM ECC Enable */
2378 value
|= NBCFG_ECC_ENABLE
;
2379 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2381 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2383 if (!(value
& NBCFG_ECC_ENABLE
)) {
2384 amd64_warn("Hardware rejected DRAM ECC enable,"
2385 "check memory DIMM configuration.\n");
2388 amd64_info("Hardware accepted DRAM ECC Enable\n");
2391 s
->flags
.nb_ecc_prev
= 1;
2394 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2395 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2400 static void restore_ecc_error_reporting(struct ecc_settings
*s
, u8 nid
,
2403 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2406 if (!s
->nbctl_valid
)
2409 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2411 value
|= s
->old_nbctl
;
2413 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2415 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2416 if (!s
->flags
.nb_ecc_prev
) {
2417 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2418 value
&= ~NBCFG_ECC_ENABLE
;
2419 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2422 /* restore the NB Enable MCGCTL bit */
2423 if (toggle_ecc_err_reporting(s
, nid
, OFF
))
2424 amd64_warn("Error restoring NB MCGCTL settings!\n");
2428 * EDAC requires that the BIOS have ECC enabled before
2429 * taking over the processing of ECC errors. A command line
2430 * option allows to force-enable hardware ECC later in
2431 * enable_ecc_error_reporting().
2433 static const char *ecc_msg
=
2434 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2435 " Either enable ECC checking or force module loading by setting "
2436 "'ecc_enable_override'.\n"
2437 " (Note that use of the override may cause unknown side effects.)\n";
2439 static bool ecc_enabled(struct pci_dev
*F3
, u8 nid
)
2443 bool nb_mce_en
= false;
2445 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2447 ecc_en
= !!(value
& NBCFG_ECC_ENABLE
);
2448 amd64_info("DRAM ECC %s.\n", (ecc_en
? "enabled" : "disabled"));
2450 nb_mce_en
= amd64_nb_mce_bank_enabled_on_node(nid
);
2452 amd64_notice("NB MCE bank disabled, set MSR "
2453 "0x%08x[4] on node %d to enable.\n",
2454 MSR_IA32_MCG_CTL
, nid
);
2456 if (!ecc_en
|| !nb_mce_en
) {
2457 amd64_notice("%s", ecc_msg
);
2463 static int set_mc_sysfs_attrs(struct mem_ctl_info
*mci
)
2467 rc
= amd64_create_sysfs_dbg_files(mci
);
2471 if (boot_cpu_data
.x86
>= 0x10) {
2472 rc
= amd64_create_sysfs_inject_files(mci
);
2480 static void del_mc_sysfs_attrs(struct mem_ctl_info
*mci
)
2482 amd64_remove_sysfs_dbg_files(mci
);
2484 if (boot_cpu_data
.x86
>= 0x10)
2485 amd64_remove_sysfs_inject_files(mci
);
2488 static void setup_mci_misc_attrs(struct mem_ctl_info
*mci
,
2489 struct amd64_family_type
*fam
)
2491 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2493 mci
->mtype_cap
= MEM_FLAG_DDR2
| MEM_FLAG_RDDR2
;
2494 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
2496 if (pvt
->nbcap
& NBCAP_SECDED
)
2497 mci
->edac_ctl_cap
|= EDAC_FLAG_SECDED
;
2499 if (pvt
->nbcap
& NBCAP_CHIPKILL
)
2500 mci
->edac_ctl_cap
|= EDAC_FLAG_S4ECD4ED
;
2502 mci
->edac_cap
= amd64_determine_edac_cap(pvt
);
2503 mci
->mod_name
= EDAC_MOD_STR
;
2504 mci
->mod_ver
= EDAC_AMD64_VERSION
;
2505 mci
->ctl_name
= fam
->ctl_name
;
2506 mci
->dev_name
= pci_name(pvt
->F2
);
2507 mci
->ctl_page_to_phys
= NULL
;
2509 /* memory scrubber interface */
2510 mci
->set_sdram_scrub_rate
= amd64_set_scrub_rate
;
2511 mci
->get_sdram_scrub_rate
= amd64_get_scrub_rate
;
2515 * returns a pointer to the family descriptor on success, NULL otherwise.
2517 static struct amd64_family_type
*amd64_per_family_init(struct amd64_pvt
*pvt
)
2519 u8 fam
= boot_cpu_data
.x86
;
2520 struct amd64_family_type
*fam_type
= NULL
;
2524 fam_type
= &amd64_family_types
[K8_CPUS
];
2525 pvt
->ops
= &amd64_family_types
[K8_CPUS
].ops
;
2529 fam_type
= &amd64_family_types
[F10_CPUS
];
2530 pvt
->ops
= &amd64_family_types
[F10_CPUS
].ops
;
2534 fam_type
= &amd64_family_types
[F15_CPUS
];
2535 pvt
->ops
= &amd64_family_types
[F15_CPUS
].ops
;
2539 amd64_err("Unsupported family!\n");
2543 pvt
->ext_model
= boot_cpu_data
.x86_model
>> 4;
2545 amd64_info("%s %sdetected (node %d).\n", fam_type
->ctl_name
,
2547 (pvt
->ext_model
>= K8_REV_F
? "revF or later "
2548 : "revE or earlier ")
2549 : ""), pvt
->mc_node_id
);
2553 static int amd64_init_one_instance(struct pci_dev
*F2
)
2555 struct amd64_pvt
*pvt
= NULL
;
2556 struct amd64_family_type
*fam_type
= NULL
;
2557 struct mem_ctl_info
*mci
= NULL
;
2558 struct edac_mc_layer layers
[2];
2560 u8 nid
= get_node_id(F2
);
2563 pvt
= kzalloc(sizeof(struct amd64_pvt
), GFP_KERNEL
);
2567 pvt
->mc_node_id
= nid
;
2571 fam_type
= amd64_per_family_init(pvt
);
2576 err
= reserve_mc_sibling_devs(pvt
, fam_type
->f1_id
, fam_type
->f3_id
);
2583 * We need to determine how many memory channels there are. Then use
2584 * that information for calculating the size of the dynamic instance
2585 * tables in the 'mci' structure.
2588 pvt
->channel_count
= pvt
->ops
->early_channel_count(pvt
);
2589 if (pvt
->channel_count
< 0)
2593 layers
[0].type
= EDAC_MC_LAYER_CHIP_SELECT
;
2594 layers
[0].size
= pvt
->csels
[0].b_cnt
;
2595 layers
[0].is_virt_csrow
= true;
2596 layers
[1].type
= EDAC_MC_LAYER_CHANNEL
;
2597 layers
[1].size
= pvt
->channel_count
;
2598 layers
[1].is_virt_csrow
= false;
2599 mci
= edac_mc_alloc(nid
, ARRAY_SIZE(layers
), layers
, 0);
2603 mci
->pvt_info
= pvt
;
2604 mci
->pdev
= &pvt
->F2
->dev
;
2606 setup_mci_misc_attrs(mci
, fam_type
);
2608 if (init_csrows(mci
))
2609 mci
->edac_cap
= EDAC_FLAG_NONE
;
2612 if (edac_mc_add_mc(mci
)) {
2613 edac_dbg(1, "failed edac_mc_add_mc()\n");
2616 if (set_mc_sysfs_attrs(mci
)) {
2617 edac_dbg(1, "failed edac_mc_add_mc()\n");
2621 /* register stuff with EDAC MCE */
2622 if (report_gart_errors
)
2623 amd_report_gart_errors(true);
2625 amd_register_ecc_decoder(amd64_decode_bus_error
);
2629 atomic_inc(&drv_instances
);
2634 edac_mc_del_mc(mci
->pdev
);
2639 free_mc_sibling_devs(pvt
);
2648 static int __devinit
amd64_probe_one_instance(struct pci_dev
*pdev
,
2649 const struct pci_device_id
*mc_type
)
2651 u8 nid
= get_node_id(pdev
);
2652 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2653 struct ecc_settings
*s
;
2656 ret
= pci_enable_device(pdev
);
2658 edac_dbg(0, "ret=%d\n", ret
);
2663 s
= kzalloc(sizeof(struct ecc_settings
), GFP_KERNEL
);
2669 if (!ecc_enabled(F3
, nid
)) {
2672 if (!ecc_enable_override
)
2675 amd64_warn("Forcing ECC on!\n");
2677 if (!enable_ecc_error_reporting(s
, nid
, F3
))
2681 ret
= amd64_init_one_instance(pdev
);
2683 amd64_err("Error probing instance: %d\n", nid
);
2684 restore_ecc_error_reporting(s
, nid
, F3
);
2691 ecc_stngs
[nid
] = NULL
;
2697 static void __devexit
amd64_remove_one_instance(struct pci_dev
*pdev
)
2699 struct mem_ctl_info
*mci
;
2700 struct amd64_pvt
*pvt
;
2701 u8 nid
= get_node_id(pdev
);
2702 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
2703 struct ecc_settings
*s
= ecc_stngs
[nid
];
2705 mci
= find_mci_by_dev(&pdev
->dev
);
2706 del_mc_sysfs_attrs(mci
);
2707 /* Remove from EDAC CORE tracking list */
2708 mci
= edac_mc_del_mc(&pdev
->dev
);
2712 pvt
= mci
->pvt_info
;
2714 restore_ecc_error_reporting(s
, nid
, F3
);
2716 free_mc_sibling_devs(pvt
);
2718 /* unregister from EDAC MCE */
2719 amd_report_gart_errors(false);
2720 amd_unregister_ecc_decoder(amd64_decode_bus_error
);
2722 kfree(ecc_stngs
[nid
]);
2723 ecc_stngs
[nid
] = NULL
;
2725 /* Free the EDAC CORE resources */
2726 mci
->pvt_info
= NULL
;
2734 * This table is part of the interface for loading drivers for PCI devices. The
2735 * PCI core identifies what devices are on a system during boot, and then
2736 * inquiry this table to see if this driver is for a given device found.
2738 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table
) = {
2740 .vendor
= PCI_VENDOR_ID_AMD
,
2741 .device
= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL
,
2742 .subvendor
= PCI_ANY_ID
,
2743 .subdevice
= PCI_ANY_ID
,
2748 .vendor
= PCI_VENDOR_ID_AMD
,
2749 .device
= PCI_DEVICE_ID_AMD_10H_NB_DRAM
,
2750 .subvendor
= PCI_ANY_ID
,
2751 .subdevice
= PCI_ANY_ID
,
2756 .vendor
= PCI_VENDOR_ID_AMD
,
2757 .device
= PCI_DEVICE_ID_AMD_15H_NB_F2
,
2758 .subvendor
= PCI_ANY_ID
,
2759 .subdevice
= PCI_ANY_ID
,
2766 MODULE_DEVICE_TABLE(pci
, amd64_pci_table
);
2768 static struct pci_driver amd64_pci_driver
= {
2769 .name
= EDAC_MOD_STR
,
2770 .probe
= amd64_probe_one_instance
,
2771 .remove
= __devexit_p(amd64_remove_one_instance
),
2772 .id_table
= amd64_pci_table
,
2775 static void setup_pci_device(void)
2777 struct mem_ctl_info
*mci
;
2778 struct amd64_pvt
*pvt
;
2786 pvt
= mci
->pvt_info
;
2788 edac_pci_create_generic_ctl(&pvt
->F2
->dev
, EDAC_MOD_STR
);
2790 if (!amd64_ctl_pci
) {
2791 pr_warning("%s(): Unable to create PCI control\n",
2794 pr_warning("%s(): PCI error report via EDAC not set\n",
2800 static int __init
amd64_edac_init(void)
2804 printk(KERN_INFO
"AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION
);
2808 if (amd_cache_northbridges() < 0)
2812 mcis
= kzalloc(amd_nb_num() * sizeof(mcis
[0]), GFP_KERNEL
);
2813 ecc_stngs
= kzalloc(amd_nb_num() * sizeof(ecc_stngs
[0]), GFP_KERNEL
);
2814 if (!(mcis
&& ecc_stngs
))
2817 msrs
= msrs_alloc();
2821 err
= pci_register_driver(&amd64_pci_driver
);
2826 if (!atomic_read(&drv_instances
))
2827 goto err_no_instances
;
2833 pci_unregister_driver(&amd64_pci_driver
);
2850 static void __exit
amd64_edac_exit(void)
2853 edac_pci_release_generic_ctl(amd64_ctl_pci
);
2855 pci_unregister_driver(&amd64_pci_driver
);
2867 module_init(amd64_edac_init
);
2868 module_exit(amd64_edac_exit
);
2870 MODULE_LICENSE("GPL");
2871 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2872 "Dave Peterson, Thayne Harbaugh");
2873 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2874 EDAC_AMD64_VERSION
);
2876 module_param(edac_op_state
, int, 0444);
2877 MODULE_PARM_DESC(edac_op_state
, "EDAC Error Reporting state: 0=Poll,1=NMI");