1 #include "amd64_edac.h"
3 static struct edac_pci_ctl_info
*amd64_ctl_pci
;
5 static int report_gart_errors
;
6 module_param(report_gart_errors
, int, 0644);
9 * Set by command line parameter. If BIOS has enabled the ECC, this override is
10 * cleared to prevent re-enabling the hardware by this driver.
12 static int ecc_enable_override
;
13 module_param(ecc_enable_override
, int, 0644);
15 /* Lookup table for all possible MC control instances */
17 static struct mem_ctl_info
*mci_lookup
[MAX_NUMNODES
];
18 static struct amd64_pvt
*pvt_lookup
[MAX_NUMNODES
];
21 * Memory scrubber control interface. For K8, memory scrubbing is handled by
22 * hardware and can involve L2 cache, dcache as well as the main memory. With
23 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
26 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
27 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
28 * bytes/sec for the setting.
30 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
31 * other archs, we might not have access to the caches directly.
35 * scan the scrub rate mapping table for a close or matching bandwidth value to
36 * issue. If requested is too big, then use last maximum value found.
38 static int amd64_search_set_scrub_rate(struct pci_dev
*ctl
, u32 new_bw
,
45 * map the configured rate (new_bw) to a value specific to the AMD64
46 * memory controller and apply to register. Search for the first
47 * bandwidth entry that is greater or equal than the setting requested
48 * and program that. If at last entry, turn off DRAM scrubbing.
50 for (i
= 0; i
< ARRAY_SIZE(scrubrates
); i
++) {
52 * skip scrub rates which aren't recommended
53 * (see F10 BKDG, F3x58)
55 if (scrubrates
[i
].scrubval
< min_scrubrate
)
58 if (scrubrates
[i
].bandwidth
<= new_bw
)
62 * if no suitable bandwidth found, turn off DRAM scrubbing
63 * entirely by falling back to the last element in the
68 scrubval
= scrubrates
[i
].scrubval
;
70 edac_printk(KERN_DEBUG
, EDAC_MC
,
71 "Setting scrub rate bandwidth: %u\n",
72 scrubrates
[i
].bandwidth
);
74 edac_printk(KERN_DEBUG
, EDAC_MC
, "Turning scrubbing off.\n");
76 pci_write_bits32(ctl
, K8_SCRCTRL
, scrubval
, 0x001F);
81 static int amd64_set_scrub_rate(struct mem_ctl_info
*mci
, u32
*bandwidth
)
83 struct amd64_pvt
*pvt
= mci
->pvt_info
;
84 u32 min_scrubrate
= 0x0;
86 switch (boot_cpu_data
.x86
) {
88 min_scrubrate
= K8_MIN_SCRUB_RATE_BITS
;
91 min_scrubrate
= F10_MIN_SCRUB_RATE_BITS
;
94 min_scrubrate
= F11_MIN_SCRUB_RATE_BITS
;
98 amd64_printk(KERN_ERR
, "Unsupported family!\n");
101 return amd64_search_set_scrub_rate(pvt
->misc_f3_ctl
, *bandwidth
,
105 static int amd64_get_scrub_rate(struct mem_ctl_info
*mci
, u32
*bw
)
107 struct amd64_pvt
*pvt
= mci
->pvt_info
;
109 int status
= -1, i
, ret
= 0;
111 ret
= pci_read_config_dword(pvt
->misc_f3_ctl
, K8_SCRCTRL
, &scrubval
);
113 debugf0("Reading K8_SCRCTRL failed\n");
115 scrubval
= scrubval
& 0x001F;
117 edac_printk(KERN_DEBUG
, EDAC_MC
,
118 "pci-read, sdram scrub control value: %d \n", scrubval
);
120 for (i
= 0; ARRAY_SIZE(scrubrates
); i
++) {
121 if (scrubrates
[i
].scrubval
== scrubval
) {
122 *bw
= scrubrates
[i
].bandwidth
;
131 /* Map from a CSROW entry to the mask entry that operates on it */
132 static inline u32
amd64_map_to_dcs_mask(struct amd64_pvt
*pvt
, int csrow
)
134 return csrow
>> (pvt
->num_dcsm
>> 3);
137 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
138 static u32
amd64_get_dct_base(struct amd64_pvt
*pvt
, int dct
, int csrow
)
141 return pvt
->dcsb0
[csrow
];
143 return pvt
->dcsb1
[csrow
];
147 * Return the 'mask' address the i'th CS entry. This function is needed because
148 * there number of DCSM registers on Rev E and prior vs Rev F and later is
151 static u32
amd64_get_dct_mask(struct amd64_pvt
*pvt
, int dct
, int csrow
)
154 return pvt
->dcsm0
[amd64_map_to_dcs_mask(pvt
, csrow
)];
156 return pvt
->dcsm1
[amd64_map_to_dcs_mask(pvt
, csrow
)];
161 * In *base and *limit, pass back the full 40-bit base and limit physical
162 * addresses for the node given by node_id. This information is obtained from
163 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
164 * base and limit addresses are of type SysAddr, as defined at the start of
165 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
166 * in the address range they represent.
168 static void amd64_get_base_and_limit(struct amd64_pvt
*pvt
, int node_id
,
169 u64
*base
, u64
*limit
)
171 *base
= pvt
->dram_base
[node_id
];
172 *limit
= pvt
->dram_limit
[node_id
];
176 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
179 static int amd64_base_limit_match(struct amd64_pvt
*pvt
,
180 u64 sys_addr
, int node_id
)
182 u64 base
, limit
, addr
;
184 amd64_get_base_and_limit(pvt
, node_id
, &base
, &limit
);
186 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
187 * all ones if the most significant implemented address bit is 1.
188 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
189 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
190 * Application Programming.
192 addr
= sys_addr
& 0x000000ffffffffffull
;
194 return (addr
>= base
) && (addr
<= limit
);
198 * Attempt to map a SysAddr to a node. On success, return a pointer to the
199 * mem_ctl_info structure for the node that the SysAddr maps to.
201 * On failure, return NULL.
203 static struct mem_ctl_info
*find_mc_by_sys_addr(struct mem_ctl_info
*mci
,
206 struct amd64_pvt
*pvt
;
211 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
212 * 3.4.4.2) registers to map the SysAddr to a node ID.
217 * The value of this field should be the same for all DRAM Base
218 * registers. Therefore we arbitrarily choose to read it from the
219 * register for node 0.
221 intlv_en
= pvt
->dram_IntlvEn
[0];
224 for (node_id
= 0; ; ) {
225 if (amd64_base_limit_match(pvt
, sys_addr
, node_id
))
228 if (++node_id
>= DRAM_REG_COUNT
)
234 if (unlikely((intlv_en
!= (0x01 << 8)) &&
235 (intlv_en
!= (0x03 << 8)) &&
236 (intlv_en
!= (0x07 << 8)))) {
237 amd64_printk(KERN_WARNING
, "junk value of 0x%x extracted from "
238 "IntlvEn field of DRAM Base Register for node 0: "
239 "This probably indicates a BIOS bug.\n", intlv_en
);
243 bits
= (((u32
) sys_addr
) >> 12) & intlv_en
;
245 for (node_id
= 0; ; ) {
246 if ((pvt
->dram_limit
[node_id
] & intlv_en
) == bits
)
247 break; /* intlv_sel field matches */
249 if (++node_id
>= DRAM_REG_COUNT
)
253 /* sanity test for sys_addr */
254 if (unlikely(!amd64_base_limit_match(pvt
, sys_addr
, node_id
))) {
255 amd64_printk(KERN_WARNING
,
256 "%s(): sys_addr 0x%lx falls outside base/limit "
257 "address range for node %d with node interleaving "
258 "enabled.\n", __func__
, (unsigned long)sys_addr
,
264 return edac_mc_find(node_id
);
267 debugf2("sys_addr 0x%lx doesn't match any node\n",
268 (unsigned long)sys_addr
);
274 * Extract the DRAM CS base address from selected csrow register.
276 static u64
base_from_dct_base(struct amd64_pvt
*pvt
, int csrow
)
278 return ((u64
) (amd64_get_dct_base(pvt
, 0, csrow
) & pvt
->dcsb_base
)) <<
283 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
285 static u64
mask_from_dct_mask(struct amd64_pvt
*pvt
, int csrow
)
287 u64 dcsm_bits
, other_bits
;
290 /* Extract bits from DRAM CS Mask. */
291 dcsm_bits
= amd64_get_dct_mask(pvt
, 0, csrow
) & pvt
->dcsm_mask
;
293 other_bits
= pvt
->dcsm_mask
;
294 other_bits
= ~(other_bits
<< pvt
->dcs_shift
);
297 * The extracted bits from DCSM belong in the spaces represented by
298 * the cleared bits in other_bits.
300 mask
= (dcsm_bits
<< pvt
->dcs_shift
) | other_bits
;
306 * @input_addr is an InputAddr associated with the node given by mci. Return the
307 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
309 static int input_addr_to_csrow(struct mem_ctl_info
*mci
, u64 input_addr
)
311 struct amd64_pvt
*pvt
;
318 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
319 * base/mask register pair, test the condition shown near the start of
320 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
322 for (csrow
= 0; csrow
< CHIPSELECT_COUNT
; csrow
++) {
324 /* This DRAM chip select is disabled on this node */
325 if ((pvt
->dcsb0
[csrow
] & K8_DCSB_CS_ENABLE
) == 0)
328 base
= base_from_dct_base(pvt
, csrow
);
329 mask
= ~mask_from_dct_mask(pvt
, csrow
);
331 if ((input_addr
& mask
) == (base
& mask
)) {
332 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
333 (unsigned long)input_addr
, csrow
,
340 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
341 (unsigned long)input_addr
, pvt
->mc_node_id
);
347 * Return the base value defined by the DRAM Base register for the node
348 * represented by mci. This function returns the full 40-bit value despite the
349 * fact that the register only stores bits 39-24 of the value. See section
350 * 3.4.4.1 (BKDG #26094, K8, revA-E)
352 static inline u64
get_dram_base(struct mem_ctl_info
*mci
)
354 struct amd64_pvt
*pvt
= mci
->pvt_info
;
356 return pvt
->dram_base
[pvt
->mc_node_id
];
360 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
361 * for the node represented by mci. Info is passed back in *hole_base,
362 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
363 * info is invalid. Info may be invalid for either of the following reasons:
365 * - The revision of the node is not E or greater. In this case, the DRAM Hole
366 * Address Register does not exist.
368 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
369 * indicating that its contents are not valid.
371 * The values passed back in *hole_base, *hole_offset, and *hole_size are
372 * complete 32-bit values despite the fact that the bitfields in the DHAR
373 * only represent bits 31-24 of the base and offset values.
375 int amd64_get_dram_hole_info(struct mem_ctl_info
*mci
, u64
*hole_base
,
376 u64
*hole_offset
, u64
*hole_size
)
378 struct amd64_pvt
*pvt
= mci
->pvt_info
;
381 /* only revE and later have the DRAM Hole Address Register */
382 if (boot_cpu_data
.x86
== 0xf && pvt
->ext_model
< OPTERON_CPU_REV_E
) {
383 debugf1(" revision %d for node %d does not support DHAR\n",
384 pvt
->ext_model
, pvt
->mc_node_id
);
388 /* only valid for Fam10h */
389 if (boot_cpu_data
.x86
== 0x10 &&
390 (pvt
->dhar
& F10_DRAM_MEM_HOIST_VALID
) == 0) {
391 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
395 if ((pvt
->dhar
& DHAR_VALID
) == 0) {
396 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
401 /* This node has Memory Hoisting */
403 /* +------------------+--------------------+--------------------+-----
404 * | memory | DRAM hole | relocated |
405 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
407 * | | | [0x100000000, |
408 * | | | (0x100000000+ |
409 * | | | (0xffffffff-x))] |
410 * +------------------+--------------------+--------------------+-----
412 * Above is a diagram of physical memory showing the DRAM hole and the
413 * relocated addresses from the DRAM hole. As shown, the DRAM hole
414 * starts at address x (the base address) and extends through address
415 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
416 * addresses in the hole so that they start at 0x100000000.
419 base
= dhar_base(pvt
->dhar
);
422 *hole_size
= (0x1ull
<< 32) - base
;
424 if (boot_cpu_data
.x86
> 0xf)
425 *hole_offset
= f10_dhar_offset(pvt
->dhar
);
427 *hole_offset
= k8_dhar_offset(pvt
->dhar
);
429 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
430 pvt
->mc_node_id
, (unsigned long)*hole_base
,
431 (unsigned long)*hole_offset
, (unsigned long)*hole_size
);
435 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info
);
438 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
439 * assumed that sys_addr maps to the node given by mci.
441 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
442 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
443 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
444 * then it is also involved in translating a SysAddr to a DramAddr. Sections
445 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
446 * These parts of the documentation are unclear. I interpret them as follows:
448 * When node n receives a SysAddr, it processes the SysAddr as follows:
450 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
451 * Limit registers for node n. If the SysAddr is not within the range
452 * specified by the base and limit values, then node n ignores the Sysaddr
453 * (since it does not map to node n). Otherwise continue to step 2 below.
455 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
456 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
457 * the range of relocated addresses (starting at 0x100000000) from the DRAM
458 * hole. If not, skip to step 3 below. Else get the value of the
459 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
460 * offset defined by this value from the SysAddr.
462 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
463 * Base register for node n. To obtain the DramAddr, subtract the base
464 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
466 static u64
sys_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
468 u64 dram_base
, hole_base
, hole_offset
, hole_size
, dram_addr
;
471 dram_base
= get_dram_base(mci
);
473 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
476 if ((sys_addr
>= (1ull << 32)) &&
477 (sys_addr
< ((1ull << 32) + hole_size
))) {
478 /* use DHAR to translate SysAddr to DramAddr */
479 dram_addr
= sys_addr
- hole_offset
;
481 debugf2("using DHAR to translate SysAddr 0x%lx to "
483 (unsigned long)sys_addr
,
484 (unsigned long)dram_addr
);
491 * Translate the SysAddr to a DramAddr as shown near the start of
492 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
493 * only deals with 40-bit values. Therefore we discard bits 63-40 of
494 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
495 * discard are all 1s. Otherwise the bits we discard are all 0s. See
496 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
497 * Programmer's Manual Volume 1 Application Programming.
499 dram_addr
= (sys_addr
& 0xffffffffffull
) - dram_base
;
501 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
502 "DramAddr 0x%lx\n", (unsigned long)sys_addr
,
503 (unsigned long)dram_addr
);
508 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
509 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
510 * for node interleaving.
512 static int num_node_interleave_bits(unsigned intlv_en
)
514 static const int intlv_shift_table
[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
517 BUG_ON(intlv_en
> 7);
518 n
= intlv_shift_table
[intlv_en
];
522 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
523 static u64
dram_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
525 struct amd64_pvt
*pvt
;
532 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
533 * concerning translating a DramAddr to an InputAddr.
535 intlv_shift
= num_node_interleave_bits(pvt
->dram_IntlvEn
[0]);
536 input_addr
= ((dram_addr
>> intlv_shift
) & 0xffffff000ull
) +
539 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
540 intlv_shift
, (unsigned long)dram_addr
,
541 (unsigned long)input_addr
);
547 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
548 * assumed that @sys_addr maps to the node given by mci.
550 static u64
sys_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
555 dram_addr_to_input_addr(mci
, sys_addr_to_dram_addr(mci
, sys_addr
));
557 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
558 (unsigned long)sys_addr
, (unsigned long)input_addr
);
565 * @input_addr is an InputAddr associated with the node represented by mci.
566 * Translate @input_addr to a DramAddr and return the result.
568 static u64
input_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 input_addr
)
570 struct amd64_pvt
*pvt
;
571 int node_id
, intlv_shift
;
576 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
577 * shows how to translate a DramAddr to an InputAddr. Here we reverse
578 * this procedure. When translating from a DramAddr to an InputAddr, the
579 * bits used for node interleaving are discarded. Here we recover these
580 * bits from the IntlvSel field of the DRAM Limit register (section
581 * 3.4.4.2) for the node that input_addr is associated with.
584 node_id
= pvt
->mc_node_id
;
585 BUG_ON((node_id
< 0) || (node_id
> 7));
587 intlv_shift
= num_node_interleave_bits(pvt
->dram_IntlvEn
[0]);
589 if (intlv_shift
== 0) {
590 debugf1(" InputAddr 0x%lx translates to DramAddr of "
591 "same value\n", (unsigned long)input_addr
);
596 bits
= ((input_addr
& 0xffffff000ull
) << intlv_shift
) +
597 (input_addr
& 0xfff);
599 intlv_sel
= pvt
->dram_IntlvSel
[node_id
] & ((1 << intlv_shift
) - 1);
600 dram_addr
= bits
+ (intlv_sel
<< 12);
602 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
603 "(%d node interleave bits)\n", (unsigned long)input_addr
,
604 (unsigned long)dram_addr
, intlv_shift
);
610 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
611 * @dram_addr to a SysAddr.
613 static u64
dram_addr_to_sys_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
615 struct amd64_pvt
*pvt
= mci
->pvt_info
;
616 u64 hole_base
, hole_offset
, hole_size
, base
, limit
, sys_addr
;
619 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
622 if ((dram_addr
>= hole_base
) &&
623 (dram_addr
< (hole_base
+ hole_size
))) {
624 sys_addr
= dram_addr
+ hole_offset
;
626 debugf1("using DHAR to translate DramAddr 0x%lx to "
627 "SysAddr 0x%lx\n", (unsigned long)dram_addr
,
628 (unsigned long)sys_addr
);
634 amd64_get_base_and_limit(pvt
, pvt
->mc_node_id
, &base
, &limit
);
635 sys_addr
= dram_addr
+ base
;
638 * The sys_addr we have computed up to this point is a 40-bit value
639 * because the k8 deals with 40-bit values. However, the value we are
640 * supposed to return is a full 64-bit physical address. The AMD
641 * x86-64 architecture specifies that the most significant implemented
642 * address bit through bit 63 of a physical address must be either all
643 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
644 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
645 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
648 sys_addr
|= ~((sys_addr
& (1ull << 39)) - 1);
650 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
651 pvt
->mc_node_id
, (unsigned long)dram_addr
,
652 (unsigned long)sys_addr
);
658 * @input_addr is an InputAddr associated with the node given by mci. Translate
659 * @input_addr to a SysAddr.
661 static inline u64
input_addr_to_sys_addr(struct mem_ctl_info
*mci
,
664 return dram_addr_to_sys_addr(mci
,
665 input_addr_to_dram_addr(mci
, input_addr
));
669 * Find the minimum and maximum InputAddr values that map to the given @csrow.
670 * Pass back these values in *input_addr_min and *input_addr_max.
672 static void find_csrow_limits(struct mem_ctl_info
*mci
, int csrow
,
673 u64
*input_addr_min
, u64
*input_addr_max
)
675 struct amd64_pvt
*pvt
;
679 BUG_ON((csrow
< 0) || (csrow
>= CHIPSELECT_COUNT
));
681 base
= base_from_dct_base(pvt
, csrow
);
682 mask
= mask_from_dct_mask(pvt
, csrow
);
684 *input_addr_min
= base
& ~mask
;
685 *input_addr_max
= base
| mask
| pvt
->dcs_mask_notused
;
689 * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
690 * Address High (section 3.6.4.6) register values and return the result. Address
691 * is located in the info structure (nbeah and nbeal), the encoding is device
694 static u64
extract_error_address(struct mem_ctl_info
*mci
,
695 struct amd64_error_info_regs
*info
)
697 struct amd64_pvt
*pvt
= mci
->pvt_info
;
699 return pvt
->ops
->get_error_address(mci
, info
);
703 /* Map the Error address to a PAGE and PAGE OFFSET. */
704 static inline void error_address_to_page_and_offset(u64 error_address
,
705 u32
*page
, u32
*offset
)
707 *page
= (u32
) (error_address
>> PAGE_SHIFT
);
708 *offset
= ((u32
) error_address
) & ~PAGE_MASK
;
712 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
713 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
714 * of a node that detected an ECC memory error. mci represents the node that
715 * the error address maps to (possibly different from the node that detected
716 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
719 static int sys_addr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
)
723 csrow
= input_addr_to_csrow(mci
, sys_addr_to_input_addr(mci
, sys_addr
));
726 amd64_mc_printk(mci
, KERN_ERR
,
727 "Failed to translate InputAddr to csrow for "
728 "address 0x%lx\n", (unsigned long)sys_addr
);
732 static int get_channel_from_ecc_syndrome(unsigned short syndrome
);
734 static void amd64_cpu_display_info(struct amd64_pvt
*pvt
)
736 if (boot_cpu_data
.x86
== 0x11)
737 edac_printk(KERN_DEBUG
, EDAC_MC
, "F11h CPU detected\n");
738 else if (boot_cpu_data
.x86
== 0x10)
739 edac_printk(KERN_DEBUG
, EDAC_MC
, "F10h CPU detected\n");
740 else if (boot_cpu_data
.x86
== 0xf)
741 edac_printk(KERN_DEBUG
, EDAC_MC
, "%s detected\n",
742 (pvt
->ext_model
>= OPTERON_CPU_REV_F
) ?
743 "Rev F or later" : "Rev E or earlier");
745 /* we'll hardly ever ever get here */
746 edac_printk(KERN_ERR
, EDAC_MC
, "Unknown cpu!\n");
750 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
753 static enum edac_type
amd64_determine_edac_cap(struct amd64_pvt
*pvt
)
756 enum dev_type edac_cap
= EDAC_NONE
;
758 bit
= (boot_cpu_data
.x86
> 0xf || pvt
->ext_model
>= OPTERON_CPU_REV_F
)
762 if (pvt
->dclr0
>> BIT(bit
))
763 edac_cap
= EDAC_FLAG_SECDED
;
769 static void f10_debug_display_dimm_sizes(int ctrl
, struct amd64_pvt
*pvt
,
772 /* Display and decode various NB registers for debug purposes. */
773 static void amd64_dump_misc_regs(struct amd64_pvt
*pvt
)
777 debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n",
779 (pvt
->nbcap
& K8_NBCAP_DCT_DUAL
) ? "True" : "False",
780 (pvt
->nbcap
& K8_NBCAP_DUAL_NODE
) ? "True" : "False",
781 (pvt
->nbcap
& K8_NBCAP_8_NODE
) ? "True" : "False");
782 debugf1(" ECC Capable=%s ChipKill Capable=%s\n",
783 (pvt
->nbcap
& K8_NBCAP_SECDED
) ? "True" : "False",
784 (pvt
->nbcap
& K8_NBCAP_CHIPKILL
) ? "True" : "False");
785 debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
787 (pvt
->dclr0
& BIT(19)) ? "Enabled" : "Disabled",
788 (pvt
->dclr0
& BIT(8)) ? "Enabled" : "Disabled",
789 (pvt
->dclr0
& BIT(11)) ? "128b" : "64b");
790 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n",
791 (pvt
->dclr0
& BIT(12)) ? "Y" : "N",
792 (pvt
->dclr0
& BIT(13)) ? "Y" : "N",
793 (pvt
->dclr0
& BIT(14)) ? "Y" : "N",
794 (pvt
->dclr0
& BIT(15)) ? "Y" : "N",
795 (pvt
->dclr0
& BIT(16)) ? "UN-Buffered" : "Buffered");
798 debugf1(" online-spare: 0x%8.08x\n", pvt
->online_spare
);
800 if (boot_cpu_data
.x86
== 0xf) {
801 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
802 pvt
->dhar
, dhar_base(pvt
->dhar
),
803 k8_dhar_offset(pvt
->dhar
));
804 debugf1(" DramHoleValid=%s\n",
805 (pvt
->dhar
& DHAR_VALID
) ? "True" : "False");
807 debugf1(" dbam-dkt: 0x%8.08x\n", pvt
->dbam0
);
809 /* everything below this point is Fam10h and above */
813 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
814 pvt
->dhar
, dhar_base(pvt
->dhar
),
815 f10_dhar_offset(pvt
->dhar
));
816 debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
817 (pvt
->dhar
& F10_DRAM_MEM_HOIST_VALID
) ?
819 (pvt
->dhar
& DHAR_VALID
) ?
823 /* Only if NOT ganged does dcl1 have valid info */
824 if (!dct_ganging_enabled(pvt
)) {
825 debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s "
826 "Width=%s\n", pvt
->dclr1
,
827 (pvt
->dclr1
& BIT(19)) ? "Enabled" : "Disabled",
828 (pvt
->dclr1
& BIT(8)) ? "Enabled" : "Disabled",
829 (pvt
->dclr1
& BIT(11)) ? "128b" : "64b");
830 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s "
832 (pvt
->dclr1
& BIT(12)) ? "Y" : "N",
833 (pvt
->dclr1
& BIT(13)) ? "Y" : "N",
834 (pvt
->dclr1
& BIT(14)) ? "Y" : "N",
835 (pvt
->dclr1
& BIT(15)) ? "Y" : "N",
836 (pvt
->dclr1
& BIT(16)) ? "UN-Buffered" : "Buffered");
840 * Determine if ganged and then dump memory sizes for first controller,
841 * and if NOT ganged dump info for 2nd controller.
843 ganged
= dct_ganging_enabled(pvt
);
845 f10_debug_display_dimm_sizes(0, pvt
, ganged
);
848 f10_debug_display_dimm_sizes(1, pvt
, ganged
);
851 /* Read in both of DBAM registers */
852 static void amd64_read_dbam_reg(struct amd64_pvt
*pvt
)
858 err
= pci_read_config_dword(pvt
->dram_f2_ctl
, reg
, &pvt
->dbam0
);
862 if (boot_cpu_data
.x86
>= 0x10) {
864 err
= pci_read_config_dword(pvt
->dram_f2_ctl
, reg
, &pvt
->dbam1
);
871 debugf0("Error reading F2x%03x.\n", reg
);
875 * NOTE: CPU Revision Dependent code: Rev E and Rev F
877 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
878 * set the shift factor for the DCSB and DCSM values.
880 * ->dcs_mask_notused, RevE:
882 * To find the max InputAddr for the csrow, start with the base address and set
883 * all bits that are "don't care" bits in the test at the start of section
886 * The "don't care" bits are all set bits in the mask and all bits in the gaps
887 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
888 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
891 * ->dcs_mask_notused, RevF and later:
893 * To find the max InputAddr for the csrow, start with the base address and set
894 * all bits that are "don't care" bits in the test at the start of NPT section
897 * The "don't care" bits are all set bits in the mask and all bits in the gaps
898 * between bit ranges [36:27] and [21:13].
900 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
901 * which are all bits in the above-mentioned gaps.
903 static void amd64_set_dct_base_and_mask(struct amd64_pvt
*pvt
)
905 if (pvt
->ext_model
>= OPTERON_CPU_REV_F
) {
906 pvt
->dcsb_base
= REV_F_F1Xh_DCSB_BASE_BITS
;
907 pvt
->dcsm_mask
= REV_F_F1Xh_DCSM_MASK_BITS
;
908 pvt
->dcs_mask_notused
= REV_F_F1Xh_DCS_NOTUSED_BITS
;
909 pvt
->dcs_shift
= REV_F_F1Xh_DCS_SHIFT
;
911 switch (boot_cpu_data
.x86
) {
913 pvt
->num_dcsm
= REV_F_DCSM_COUNT
;
917 pvt
->num_dcsm
= F10_DCSM_COUNT
;
921 pvt
->num_dcsm
= F11_DCSM_COUNT
;
925 amd64_printk(KERN_ERR
, "Unsupported family!\n");
929 pvt
->dcsb_base
= REV_E_DCSB_BASE_BITS
;
930 pvt
->dcsm_mask
= REV_E_DCSM_MASK_BITS
;
931 pvt
->dcs_mask_notused
= REV_E_DCS_NOTUSED_BITS
;
932 pvt
->dcs_shift
= REV_E_DCS_SHIFT
;
933 pvt
->num_dcsm
= REV_E_DCSM_COUNT
;
938 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
940 static void amd64_read_dct_base_mask(struct amd64_pvt
*pvt
)
942 int cs
, reg
, err
= 0;
944 amd64_set_dct_base_and_mask(pvt
);
946 for (cs
= 0; cs
< CHIPSELECT_COUNT
; cs
++) {
947 reg
= K8_DCSB0
+ (cs
* 4);
948 err
= pci_read_config_dword(pvt
->dram_f2_ctl
, reg
,
951 debugf0("Reading K8_DCSB0[%d] failed\n", cs
);
953 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
954 cs
, pvt
->dcsb0
[cs
], reg
);
956 /* If DCT are NOT ganged, then read in DCT1's base */
957 if (boot_cpu_data
.x86
>= 0x10 && !dct_ganging_enabled(pvt
)) {
958 reg
= F10_DCSB1
+ (cs
* 4);
959 err
= pci_read_config_dword(pvt
->dram_f2_ctl
, reg
,
962 debugf0("Reading F10_DCSB1[%d] failed\n", cs
);
964 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
965 cs
, pvt
->dcsb1
[cs
], reg
);
971 for (cs
= 0; cs
< pvt
->num_dcsm
; cs
++) {
972 reg
= K8_DCSB0
+ (cs
* 4);
973 err
= pci_read_config_dword(pvt
->dram_f2_ctl
, reg
,
976 debugf0("Reading K8_DCSM0 failed\n");
978 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
979 cs
, pvt
->dcsm0
[cs
], reg
);
981 /* If DCT are NOT ganged, then read in DCT1's mask */
982 if (boot_cpu_data
.x86
>= 0x10 && !dct_ganging_enabled(pvt
)) {
983 reg
= F10_DCSM1
+ (cs
* 4);
984 err
= pci_read_config_dword(pvt
->dram_f2_ctl
, reg
,
987 debugf0("Reading F10_DCSM1[%d] failed\n", cs
);
989 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
990 cs
, pvt
->dcsm1
[cs
], reg
);
996 static enum mem_type
amd64_determine_memory_type(struct amd64_pvt
*pvt
)
1000 if (boot_cpu_data
.x86
>= 0x10 || pvt
->ext_model
>= OPTERON_CPU_REV_F
) {
1001 /* Rev F and later */
1002 type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR2
: MEM_RDDR2
;
1004 /* Rev E and earlier */
1005 type
= (pvt
->dclr0
& BIT(18)) ? MEM_DDR
: MEM_RDDR
;
1008 debugf1(" Memory type is: %s\n",
1009 (type
== MEM_DDR2
) ? "MEM_DDR2" :
1010 (type
== MEM_RDDR2
) ? "MEM_RDDR2" :
1011 (type
== MEM_DDR
) ? "MEM_DDR" : "MEM_RDDR");