1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
18 /* Per-node driver instances */
19 static struct mem_ctl_info **mcis;
20 static struct amd64_pvt **pvts;
21 static struct ecc_settings **ecc_stngs;
24 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
27 static int ddr2_dbam_revCG[] = {
37 static int ddr2_dbam_revD[] = {
49 static int ddr2_dbam[] = { [0] = 128,
58 static int ddr3_dbam[] = { [0] = -1,
69 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
70 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
73 *FIXME: Produce a better mapping/linearisation.
76 struct scrubrate scrubrates[] = {
77 { 0x01, 1600000000UL},
99 { 0x00, 0UL}, /* scrubbing off */
103 * Memory scrubber control interface. For K8, memory scrubbing is handled by
104 * hardware and can involve L2 cache, dcache as well as the main memory. With
105 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
108 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
109 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
110 * bytes/sec for the setting.
112 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
113 * other archs, we might not have access to the caches directly.
117 * scan the scrub rate mapping table for a close or matching bandwidth value to
118 * issue. If requested is too big, then use last maximum value found.
120 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
126 * map the configured rate (new_bw) to a value specific to the AMD64
127 * memory controller and apply to register. Search for the first
128 * bandwidth entry that is greater or equal than the setting requested
129 * and program that. If at last entry, turn off DRAM scrubbing.
131 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
133 * skip scrub rates which aren't recommended
134 * (see F10 BKDG, F3x58)
136 if (scrubrates[i].scrubval < min_rate)
139 if (scrubrates[i].bandwidth <= new_bw)
143 * if no suitable bandwidth found, turn off DRAM scrubbing
144 * entirely by falling back to the last element in the
149 scrubval = scrubrates[i].scrubval;
151 amd64_info("Setting scrub rate bandwidth: %u\n",
152 scrubrates[i].bandwidth);
154 amd64_info("Turning scrubbing off.\n");
156 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
161 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
163 struct amd64_pvt *pvt = mci->pvt_info;
165 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
168 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
170 struct amd64_pvt *pvt = mci->pvt_info;
174 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
176 scrubval = scrubval & 0x001F;
178 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
180 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
181 if (scrubrates[i].scrubval == scrubval) {
182 *bw = scrubrates[i].bandwidth;
191 /* Map from a CSROW entry to the mask entry that operates on it */
192 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
194 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
200 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
201 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
204 return pvt->dcsb0[csrow];
206 return pvt->dcsb1[csrow];
210 * Return the 'mask' address the i'th CS entry. This function is needed because
211 * there number of DCSM registers on Rev E and prior vs Rev F and later is
214 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
217 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
219 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
224 * In *base and *limit, pass back the full 40-bit base and limit physical
225 * addresses for the node given by node_id. This information is obtained from
226 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
227 * base and limit addresses are of type SysAddr, as defined at the start of
228 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
229 * in the address range they represent.
231 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
232 u64 *base, u64 *limit)
234 *base = pvt->dram_base[node_id];
235 *limit = pvt->dram_limit[node_id];
239 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
242 static int amd64_base_limit_match(struct amd64_pvt *pvt,
243 u64 sys_addr, int node_id)
245 u64 base, limit, addr;
247 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
249 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
250 * all ones if the most significant implemented address bit is 1.
251 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
252 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
253 * Application Programming.
255 addr = sys_addr & 0x000000ffffffffffull;
257 return (addr >= base) && (addr <= limit);
261 * Attempt to map a SysAddr to a node. On success, return a pointer to the
262 * mem_ctl_info structure for the node that the SysAddr maps to.
264 * On failure, return NULL.
266 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
269 struct amd64_pvt *pvt;
274 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
275 * 3.4.4.2) registers to map the SysAddr to a node ID.
280 * The value of this field should be the same for all DRAM Base
281 * registers. Therefore we arbitrarily choose to read it from the
282 * register for node 0.
284 intlv_en = pvt->dram_IntlvEn[0];
287 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
288 if (amd64_base_limit_match(pvt, sys_addr, node_id))
294 if (unlikely((intlv_en != 0x01) &&
295 (intlv_en != 0x03) &&
296 (intlv_en != 0x07))) {
297 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
301 bits = (((u32) sys_addr) >> 12) & intlv_en;
303 for (node_id = 0; ; ) {
304 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
305 break; /* intlv_sel field matches */
307 if (++node_id >= DRAM_REG_COUNT)
311 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
314 "range for node %d with node interleaving enabled.\n",
315 __func__, sys_addr, node_id);
320 return edac_mc_find(node_id);
323 debugf2("sys_addr 0x%lx doesn't match any node\n",
324 (unsigned long)sys_addr);
330 * Extract the DRAM CS base address from selected csrow register.
332 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
334 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
339 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
341 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
343 u64 dcsm_bits, other_bits;
346 /* Extract bits from DRAM CS Mask. */
347 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
349 other_bits = pvt->dcsm_mask;
350 other_bits = ~(other_bits << pvt->dcs_shift);
353 * The extracted bits from DCSM belong in the spaces represented by
354 * the cleared bits in other_bits.
356 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
362 * @input_addr is an InputAddr associated with the node given by mci. Return the
363 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
365 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
367 struct amd64_pvt *pvt;
374 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
375 * base/mask register pair, test the condition shown near the start of
376 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
378 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
380 /* This DRAM chip select is disabled on this node */
381 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
384 base = base_from_dct_base(pvt, csrow);
385 mask = ~mask_from_dct_mask(pvt, csrow);
387 if ((input_addr & mask) == (base & mask)) {
388 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
389 (unsigned long)input_addr, csrow,
396 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
397 (unsigned long)input_addr, pvt->mc_node_id);
403 * Return the base value defined by the DRAM Base register for the node
404 * represented by mci. This function returns the full 40-bit value despite the
405 * fact that the register only stores bits 39-24 of the value. See section
406 * 3.4.4.1 (BKDG #26094, K8, revA-E)
408 static inline u64 get_dram_base(struct mem_ctl_info *mci)
410 struct amd64_pvt *pvt = mci->pvt_info;
412 return pvt->dram_base[pvt->mc_node_id];
416 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
417 * for the node represented by mci. Info is passed back in *hole_base,
418 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
419 * info is invalid. Info may be invalid for either of the following reasons:
421 * - The revision of the node is not E or greater. In this case, the DRAM Hole
422 * Address Register does not exist.
424 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
425 * indicating that its contents are not valid.
427 * The values passed back in *hole_base, *hole_offset, and *hole_size are
428 * complete 32-bit values despite the fact that the bitfields in the DHAR
429 * only represent bits 31-24 of the base and offset values.
431 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
432 u64 *hole_offset, u64 *hole_size)
434 struct amd64_pvt *pvt = mci->pvt_info;
437 /* only revE and later have the DRAM Hole Address Register */
438 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
439 debugf1(" revision %d for node %d does not support DHAR\n",
440 pvt->ext_model, pvt->mc_node_id);
444 /* only valid for Fam10h */
445 if (boot_cpu_data.x86 == 0x10 &&
446 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
447 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
451 if ((pvt->dhar & DHAR_VALID) == 0) {
452 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
457 /* This node has Memory Hoisting */
459 /* +------------------+--------------------+--------------------+-----
460 * | memory | DRAM hole | relocated |
461 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
463 * | | | [0x100000000, |
464 * | | | (0x100000000+ |
465 * | | | (0xffffffff-x))] |
466 * +------------------+--------------------+--------------------+-----
468 * Above is a diagram of physical memory showing the DRAM hole and the
469 * relocated addresses from the DRAM hole. As shown, the DRAM hole
470 * starts at address x (the base address) and extends through address
471 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
472 * addresses in the hole so that they start at 0x100000000.
475 base = dhar_base(pvt->dhar);
478 *hole_size = (0x1ull << 32) - base;
480 if (boot_cpu_data.x86 > 0xf)
481 *hole_offset = f10_dhar_offset(pvt->dhar);
483 *hole_offset = k8_dhar_offset(pvt->dhar);
485 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
486 pvt->mc_node_id, (unsigned long)*hole_base,
487 (unsigned long)*hole_offset, (unsigned long)*hole_size);
491 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
494 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
495 * assumed that sys_addr maps to the node given by mci.
497 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
498 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
499 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
500 * then it is also involved in translating a SysAddr to a DramAddr. Sections
501 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
502 * These parts of the documentation are unclear. I interpret them as follows:
504 * When node n receives a SysAddr, it processes the SysAddr as follows:
506 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
507 * Limit registers for node n. If the SysAddr is not within the range
508 * specified by the base and limit values, then node n ignores the Sysaddr
509 * (since it does not map to node n). Otherwise continue to step 2 below.
511 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
512 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
513 * the range of relocated addresses (starting at 0x100000000) from the DRAM
514 * hole. If not, skip to step 3 below. Else get the value of the
515 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
516 * offset defined by this value from the SysAddr.
518 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
519 * Base register for node n. To obtain the DramAddr, subtract the base
520 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
522 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
524 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
527 dram_base = get_dram_base(mci);
529 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
532 if ((sys_addr >= (1ull << 32)) &&
533 (sys_addr < ((1ull << 32) + hole_size))) {
534 /* use DHAR to translate SysAddr to DramAddr */
535 dram_addr = sys_addr - hole_offset;
537 debugf2("using DHAR to translate SysAddr 0x%lx to "
539 (unsigned long)sys_addr,
540 (unsigned long)dram_addr);
547 * Translate the SysAddr to a DramAddr as shown near the start of
548 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
549 * only deals with 40-bit values. Therefore we discard bits 63-40 of
550 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
551 * discard are all 1s. Otherwise the bits we discard are all 0s. See
552 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
553 * Programmer's Manual Volume 1 Application Programming.
555 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
557 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
558 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
559 (unsigned long)dram_addr);
564 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
565 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
566 * for node interleaving.
568 static int num_node_interleave_bits(unsigned intlv_en)
570 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
573 BUG_ON(intlv_en > 7);
574 n = intlv_shift_table[intlv_en];
578 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
579 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
581 struct amd64_pvt *pvt;
588 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
589 * concerning translating a DramAddr to an InputAddr.
591 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
592 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
595 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
596 intlv_shift, (unsigned long)dram_addr,
597 (unsigned long)input_addr);
603 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
604 * assumed that @sys_addr maps to the node given by mci.
606 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
611 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
613 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
614 (unsigned long)sys_addr, (unsigned long)input_addr);
621 * @input_addr is an InputAddr associated with the node represented by mci.
622 * Translate @input_addr to a DramAddr and return the result.
624 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
626 struct amd64_pvt *pvt;
627 int node_id, intlv_shift;
632 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
633 * shows how to translate a DramAddr to an InputAddr. Here we reverse
634 * this procedure. When translating from a DramAddr to an InputAddr, the
635 * bits used for node interleaving are discarded. Here we recover these
636 * bits from the IntlvSel field of the DRAM Limit register (section
637 * 3.4.4.2) for the node that input_addr is associated with.
640 node_id = pvt->mc_node_id;
641 BUG_ON((node_id < 0) || (node_id > 7));
643 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
645 if (intlv_shift == 0) {
646 debugf1(" InputAddr 0x%lx translates to DramAddr of "
647 "same value\n", (unsigned long)input_addr);
652 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
653 (input_addr & 0xfff);
655 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
656 dram_addr = bits + (intlv_sel << 12);
658 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
659 "(%d node interleave bits)\n", (unsigned long)input_addr,
660 (unsigned long)dram_addr, intlv_shift);
666 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
667 * @dram_addr to a SysAddr.
669 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
671 struct amd64_pvt *pvt = mci->pvt_info;
672 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
675 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
678 if ((dram_addr >= hole_base) &&
679 (dram_addr < (hole_base + hole_size))) {
680 sys_addr = dram_addr + hole_offset;
682 debugf1("using DHAR to translate DramAddr 0x%lx to "
683 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
684 (unsigned long)sys_addr);
690 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
691 sys_addr = dram_addr + base;
694 * The sys_addr we have computed up to this point is a 40-bit value
695 * because the k8 deals with 40-bit values. However, the value we are
696 * supposed to return is a full 64-bit physical address. The AMD
697 * x86-64 architecture specifies that the most significant implemented
698 * address bit through bit 63 of a physical address must be either all
699 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
700 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
701 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
704 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
706 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
707 pvt->mc_node_id, (unsigned long)dram_addr,
708 (unsigned long)sys_addr);
714 * @input_addr is an InputAddr associated with the node given by mci. Translate
715 * @input_addr to a SysAddr.
717 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
720 return dram_addr_to_sys_addr(mci,
721 input_addr_to_dram_addr(mci, input_addr));
725 * Find the minimum and maximum InputAddr values that map to the given @csrow.
726 * Pass back these values in *input_addr_min and *input_addr_max.
728 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
729 u64 *input_addr_min, u64 *input_addr_max)
731 struct amd64_pvt *pvt;
735 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
737 base = base_from_dct_base(pvt, csrow);
738 mask = mask_from_dct_mask(pvt, csrow);
740 *input_addr_min = base & ~mask;
741 *input_addr_max = base | mask | pvt->dcs_mask_notused;
744 /* Map the Error address to a PAGE and PAGE OFFSET. */
745 static inline void error_address_to_page_and_offset(u64 error_address,
746 u32 *page, u32 *offset)
748 *page = (u32) (error_address >> PAGE_SHIFT);
749 *offset = ((u32) error_address) & ~PAGE_MASK;
753 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
754 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
755 * of a node that detected an ECC memory error. mci represents the node that
756 * the error address maps to (possibly different from the node that detected
757 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
760 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
764 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
767 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
768 "address 0x%lx\n", (unsigned long)sys_addr);
772 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
774 static u16 extract_syndrome(struct err_regs *err)
776 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
780 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
783 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
786 enum dev_type edac_cap = EDAC_FLAG_NONE;
788 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
792 if (pvt->dclr0 & BIT(bit))
793 edac_cap = EDAC_FLAG_SECDED;
799 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
801 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
803 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
805 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
806 (dclr & BIT(16)) ? "un" : "",
807 (dclr & BIT(19)) ? "yes" : "no");
809 debugf1(" PAR/ERR parity: %s\n",
810 (dclr & BIT(8)) ? "enabled" : "disabled");
812 debugf1(" DCT 128bit mode width: %s\n",
813 (dclr & BIT(11)) ? "128b" : "64b");
815 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
816 (dclr & BIT(12)) ? "yes" : "no",
817 (dclr & BIT(13)) ? "yes" : "no",
818 (dclr & BIT(14)) ? "yes" : "no",
819 (dclr & BIT(15)) ? "yes" : "no");
822 /* Display and decode various NB registers for debug purposes. */
823 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
827 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
829 debugf1(" NB two channel DRAM capable: %s\n",
830 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
832 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
833 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
834 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
836 amd64_dump_dramcfg_low(pvt->dclr0, 0);
838 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
840 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
843 dhar_base(pvt->dhar),
844 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
845 : f10_dhar_offset(pvt->dhar));
847 debugf1(" DramHoleValid: %s\n",
848 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
850 /* everything below this point is Fam10h and above */
851 if (boot_cpu_data.x86 == 0xf) {
852 amd64_debug_display_dimm_sizes(0, pvt);
856 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
858 /* Only if NOT ganged does dclr1 have valid info */
859 if (!dct_ganging_enabled(pvt))
860 amd64_dump_dramcfg_low(pvt->dclr1, 1);
863 * Determine if ganged and then dump memory sizes for first controller,
864 * and if NOT ganged dump info for 2nd controller.
866 ganged = dct_ganging_enabled(pvt);
868 amd64_debug_display_dimm_sizes(0, pvt);
871 amd64_debug_display_dimm_sizes(1, pvt);
874 /* Read in both of DBAM registers */
875 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
877 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
879 if (boot_cpu_data.x86 >= 0x10)
880 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
884 * NOTE: CPU Revision Dependent code: Rev E and Rev F
886 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
887 * set the shift factor for the DCSB and DCSM values.
889 * ->dcs_mask_notused, RevE:
891 * To find the max InputAddr for the csrow, start with the base address and set
892 * all bits that are "don't care" bits in the test at the start of section
895 * The "don't care" bits are all set bits in the mask and all bits in the gaps
896 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
897 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
900 * ->dcs_mask_notused, RevF and later:
902 * To find the max InputAddr for the csrow, start with the base address and set
903 * all bits that are "don't care" bits in the test at the start of NPT section
906 * The "don't care" bits are all set bits in the mask and all bits in the gaps
907 * between bit ranges [36:27] and [21:13].
909 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
910 * which are all bits in the above-mentioned gaps.
912 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
915 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
916 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
917 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
918 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
919 pvt->dcs_shift = REV_E_DCS_SHIFT;
923 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
924 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
925 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
926 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
933 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
935 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
939 amd64_set_dct_base_and_mask(pvt);
941 for (cs = 0; cs < pvt->cs_count; cs++) {
942 reg = K8_DCSB0 + (cs * 4);
943 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
944 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
945 cs, pvt->dcsb0[cs], reg);
947 /* If DCT are NOT ganged, then read in DCT1's base */
948 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
949 reg = F10_DCSB1 + (cs * 4);
950 if (!amd64_read_pci_cfg(pvt->F2, reg,
952 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
953 cs, pvt->dcsb1[cs], reg);
959 for (cs = 0; cs < pvt->num_dcsm; cs++) {
960 reg = K8_DCSM0 + (cs * 4);
961 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
962 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
963 cs, pvt->dcsm0[cs], reg);
965 /* If DCT are NOT ganged, then read in DCT1's mask */
966 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
967 reg = F10_DCSM1 + (cs * 4);
968 if (!amd64_read_pci_cfg(pvt->F2, reg,
970 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
971 cs, pvt->dcsm1[cs], reg);
978 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
982 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
983 if (pvt->dchr0 & DDR3_MODE)
984 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
986 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
988 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
991 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
997 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
998 * and the later RevF memory controllers (DDR vs DDR2)
1001 * number of memory channels in operation
1003 * contents of the DCL0_LOW register
1005 static int k8_early_channel_count(struct amd64_pvt *pvt)
1009 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
1013 if (pvt->ext_model >= K8_REV_F)
1014 /* RevF (NPT) and later */
1015 flag = pvt->dclr0 & F10_WIDTH_128;
1017 /* RevE and earlier */
1018 flag = pvt->dclr0 & REVE_WIDTH_128;
1023 return (flag) ? 2 : 1;
1026 /* extract the ERROR ADDRESS for the K8 CPUs */
1027 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1028 struct err_regs *info)
1030 return (((u64) (info->nbeah & 0xff)) << 32) +
1031 (info->nbeal & ~0x03);
1035 * Read the Base and Limit registers for K8 based Memory controllers; extract
1036 * fields from the 'raw' reg into separate data fields
1038 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1040 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1043 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1045 amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
1047 /* Extract parts into separate data entries */
1048 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1049 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1050 pvt->dram_rw_en[dram] = (low & 0x3);
1052 amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
1055 * Extract parts into separate data entries. Limit is the HIGHEST memory
1056 * location of the region, so lower 24 bits need to be all ones
1058 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1059 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1060 pvt->dram_DstNode[dram] = (low & 0x7);
1063 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1064 struct err_regs *err_info, u64 sys_addr)
1066 struct mem_ctl_info *src_mci;
1071 syndrome = extract_syndrome(err_info);
1073 /* CHIPKILL enabled */
1074 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
1075 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1078 * Syndrome didn't map, so we don't know which of the
1079 * 2 DIMMs is in error. So we need to ID 'both' of them
1082 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1083 "error reporting race\n", syndrome);
1084 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1089 * non-chipkill ecc mode
1091 * The k8 documentation is unclear about how to determine the
1092 * channel number when using non-chipkill memory. This method
1093 * was obtained from email communication with someone at AMD.
1094 * (Wish the email was placed in this comment - norsk)
1096 channel = ((sys_addr & BIT(3)) != 0);
1100 * Find out which node the error address belongs to. This may be
1101 * different from the node that detected the error.
1103 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1105 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1106 (unsigned long)sys_addr);
1107 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1111 /* Now map the sys_addr to a CSROW */
1112 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1114 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1116 error_address_to_page_and_offset(sys_addr, &page, &offset);
1118 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1119 channel, EDAC_MOD_STR);
1123 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1127 if (pvt->ext_model >= K8_REV_F)
1128 dbam_map = ddr2_dbam;
1129 else if (pvt->ext_model >= K8_REV_D)
1130 dbam_map = ddr2_dbam_revD;
1132 dbam_map = ddr2_dbam_revCG;
1134 return dbam_map[cs_mode];
1138 * Get the number of DCT channels in use.
1141 * number of Memory Channels in operation
1143 * contents of the DCL0_LOW register
1145 static int f10_early_channel_count(struct amd64_pvt *pvt)
1147 int dbams[] = { DBAM0, DBAM1 };
1148 int i, j, channels = 0;
1151 /* If we are in 128 bit mode, then we are using 2 channels */
1152 if (pvt->dclr0 & F10_WIDTH_128) {
1158 * Need to check if in unganged mode: In such, there are 2 channels,
1159 * but they are not in 128 bit mode and thus the above 'dclr0' status
1162 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1163 * their CSEnable bit on. If so, then SINGLE DIMM case.
1165 debugf0("Data width is not 128 bits - need more decoding\n");
1168 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1169 * is more than just one DIMM present in unganged mode. Need to check
1170 * both controllers since DIMMs can be placed in either one.
1172 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1173 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
1176 for (j = 0; j < 4; j++) {
1177 if (DBAM_DIMM(j, dbam) > 0) {
1187 amd64_info("MCT channel count: %d\n", channels);
1196 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1200 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1201 dbam_map = ddr3_dbam;
1203 dbam_map = ddr2_dbam;
1205 return dbam_map[cs_mode];
1208 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1209 struct err_regs *info)
1211 return (((u64) (info->nbeah & 0xffff)) << 32) +
1212 (info->nbeal & ~0x01);
1216 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1217 * fields from the 'raw' reg into separate data fields.
1219 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1221 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1223 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1225 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1226 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1228 /* read the 'raw' DRAM BASE Address register */
1229 amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
1230 amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
1232 /* Extract parts into separate data entries */
1233 pvt->dram_rw_en[dram] = (low_base & 0x3);
1235 if (pvt->dram_rw_en[dram] == 0)
1238 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1240 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1241 (((u64)low_base & 0xFFFF0000) << 8);
1243 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1244 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1246 /* read the 'raw' LIMIT registers */
1247 amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
1248 amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
1250 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1251 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1254 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1255 * memory location of the region, so low 24 bits need to be all ones.
1257 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1258 (((u64) low_limit & 0xFFFF0000) << 8) |
1262 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1265 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
1266 &pvt->dram_ctl_select_low)) {
1267 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1268 "High range addresses at: 0x%x\n",
1269 pvt->dram_ctl_select_low,
1270 dct_sel_baseaddr(pvt));
1272 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1273 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1274 (dct_dram_enabled(pvt) ? "yes" : "no"));
1276 if (!dct_ganging_enabled(pvt))
1277 debugf0(" Address range split per DCT: %s\n",
1278 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1280 debugf0(" DCT data interleave for ECC: %s, "
1281 "DRAM cleared since last warm reset: %s\n",
1282 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1283 (dct_memory_cleared(pvt) ? "yes" : "no"));
1285 debugf0(" DCT channel interleave: %s, "
1286 "DCT interleave bits selector: 0x%x\n",
1287 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1288 dct_sel_interleave_addr(pvt));
1291 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
1292 &pvt->dram_ctl_select_high);
1296 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1297 * Interleaving Modes.
1299 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1300 int hi_range_sel, u32 intlv_en)
1302 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1304 if (dct_ganging_enabled(pvt))
1306 else if (hi_range_sel)
1308 else if (dct_interleave_enabled(pvt)) {
1310 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1312 if (dct_sel_interleave_addr(pvt) == 0)
1313 cs = sys_addr >> 6 & 1;
1314 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1315 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1317 if (dct_sel_interleave_addr(pvt) & 1)
1318 cs = (sys_addr >> 9 & 1) ^ temp;
1320 cs = (sys_addr >> 6 & 1) ^ temp;
1321 } else if (intlv_en & 4)
1322 cs = sys_addr >> 15 & 1;
1323 else if (intlv_en & 2)
1324 cs = sys_addr >> 14 & 1;
1325 else if (intlv_en & 1)
1326 cs = sys_addr >> 13 & 1;
1328 cs = sys_addr >> 12 & 1;
1329 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1330 cs = ~dct_sel_high & 1;
1337 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1341 else if (intlv_en == 3)
1343 else if (intlv_en == 7)
1349 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1350 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1351 u32 dct_sel_base_addr,
1352 u64 dct_sel_base_off,
1353 u32 hole_valid, u32 hole_off,
1359 if (!(dct_sel_base_addr & 0xFFFF0000) &&
1360 hole_valid && (sys_addr >= 0x100000000ULL))
1361 chan_off = hole_off << 16;
1363 chan_off = dct_sel_base_off;
1365 if (hole_valid && (sys_addr >= 0x100000000ULL))
1366 chan_off = hole_off << 16;
1368 chan_off = dram_base & 0xFFFFF8000000ULL;
1371 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1372 (chan_off & 0x0000FFFFFF800000ULL);
1375 /* Hack for the time being - Can we get this from BIOS?? */
1376 #define CH0SPARE_RANK 0
1377 #define CH1SPARE_RANK 1
1380 * checks if the csrow passed in is marked as SPARED, if so returns the new
1383 static inline int f10_process_possible_spare(int csrow,
1384 u32 cs, struct amd64_pvt *pvt)
1389 /* Depending on channel, isolate respective SPARING info */
1391 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1392 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1393 if (swap_done && (csrow == bad_dram_cs))
1394 csrow = CH1SPARE_RANK;
1396 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1397 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1398 if (swap_done && (csrow == bad_dram_cs))
1399 csrow = CH0SPARE_RANK;
1405 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1406 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1409 * -EINVAL: NOT FOUND
1410 * 0..csrow = Chip-Select Row
1412 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1414 struct mem_ctl_info *mci;
1415 struct amd64_pvt *pvt;
1416 u32 cs_base, cs_mask;
1417 int cs_found = -EINVAL;
1424 pvt = mci->pvt_info;
1426 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1428 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1430 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1431 if (!(cs_base & K8_DCSB_CS_ENABLE))
1435 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1436 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1437 * of the actual address.
1439 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1442 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1443 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1445 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1447 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1448 csrow, cs_base, cs_mask);
1450 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1452 debugf1(" Final CSMask=0x%x\n", cs_mask);
1453 debugf1(" (InputAddr & ~CSMask)=0x%x "
1454 "(CSBase & ~CSMask)=0x%x\n",
1455 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1457 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1458 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1460 debugf1(" MATCH csrow=%d\n", cs_found);
1467 /* For a given @dram_range, check if @sys_addr falls within it. */
1468 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1469 u64 sys_addr, int *nid, int *chan_sel)
1471 int node_id, cs_found = -EINVAL, high_range = 0;
1472 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1473 u32 hole_valid, tmp, dct_sel_base, channel;
1474 u64 dram_base, chan_addr, dct_sel_base_off;
1476 dram_base = pvt->dram_base[dram_range];
1477 intlv_en = pvt->dram_IntlvEn[dram_range];
1479 node_id = pvt->dram_DstNode[dram_range];
1480 intlv_sel = pvt->dram_IntlvSel[dram_range];
1482 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1483 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1486 * This assumes that one node's DHAR is the same as all the other
1489 hole_off = (pvt->dhar & 0x0000FF80);
1490 hole_valid = (pvt->dhar & 0x1);
1491 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1493 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1494 hole_off, hole_valid, intlv_sel);
1497 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1500 dct_sel_base = dct_sel_baseaddr(pvt);
1503 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1504 * select between DCT0 and DCT1.
1506 if (dct_high_range_enabled(pvt) &&
1507 !dct_ganging_enabled(pvt) &&
1508 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1511 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1513 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1514 dct_sel_base_off, hole_valid,
1515 hole_off, dram_base);
1517 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1519 /* remove Node ID (in case of memory interleaving) */
1520 tmp = chan_addr & 0xFC0;
1522 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1524 /* remove channel interleave and hash */
1525 if (dct_interleave_enabled(pvt) &&
1526 !dct_high_range_enabled(pvt) &&
1527 !dct_ganging_enabled(pvt)) {
1528 if (dct_sel_interleave_addr(pvt) != 1)
1529 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1531 tmp = chan_addr & 0xFC0;
1532 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1537 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1538 chan_addr, (u32)(chan_addr >> 8));
1540 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1542 if (cs_found >= 0) {
1544 *chan_sel = channel;
1549 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1550 int *node, int *chan_sel)
1552 int dram_range, cs_found = -EINVAL;
1553 u64 dram_base, dram_limit;
1555 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1557 if (!pvt->dram_rw_en[dram_range])
1560 dram_base = pvt->dram_base[dram_range];
1561 dram_limit = pvt->dram_limit[dram_range];
1563 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1565 cs_found = f10_match_to_this_node(pvt, dram_range,
1576 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1577 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1579 * The @sys_addr is usually an error address received from the hardware
1582 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1583 struct err_regs *err_info,
1586 struct amd64_pvt *pvt = mci->pvt_info;
1588 int nid, csrow, chan = 0;
1591 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1594 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1598 error_address_to_page_and_offset(sys_addr, &page, &offset);
1600 syndrome = extract_syndrome(err_info);
1603 * We need the syndromes for channel detection only when we're
1604 * ganged. Otherwise @chan should already contain the channel at
1607 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1608 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1611 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1615 * Channel unknown, report all channels on this CSROW as failed.
1617 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1618 edac_mc_handle_ce(mci, page, offset, syndrome,
1619 csrow, chan, EDAC_MOD_STR);
1623 * debug routine to display the memory sizes of all logical DIMMs and its
1626 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1628 int dimm, size0, size1, factor = 0;
1632 if (boot_cpu_data.x86 == 0xf) {
1633 if (pvt->dclr0 & F10_WIDTH_128)
1636 /* K8 families < revF not supported yet */
1637 if (pvt->ext_model < K8_REV_F)
1643 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1644 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1646 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1647 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1649 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1651 /* Dump memory sizes for DIMM and its CSROWs */
1652 for (dimm = 0; dimm < 4; dimm++) {
1655 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1656 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1659 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1660 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1662 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1663 dimm * 2, size0 << factor,
1664 dimm * 2 + 1, size1 << factor);
1668 static struct amd64_family_type amd64_family_types[] = {
1671 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1672 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1674 .early_channel_count = k8_early_channel_count,
1675 .get_error_address = k8_get_error_address,
1676 .read_dram_base_limit = k8_read_dram_base_limit,
1677 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1678 .dbam_to_cs = k8_dbam_to_chip_select,
1683 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1684 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1686 .early_channel_count = f10_early_channel_count,
1687 .get_error_address = f10_get_error_address,
1688 .read_dram_base_limit = f10_read_dram_base_limit,
1689 .read_dram_ctl_register = f10_read_dram_ctl_register,
1690 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1691 .dbam_to_cs = f10_dbam_to_chip_select,
1696 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1697 unsigned int device,
1698 struct pci_dev *related)
1700 struct pci_dev *dev = NULL;
1702 dev = pci_get_device(vendor, device, dev);
1704 if ((dev->bus->number == related->bus->number) &&
1705 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1707 dev = pci_get_device(vendor, device, dev);
1714 * These are tables of eigenvectors (one per line) which can be used for the
1715 * construction of the syndrome tables. The modified syndrome search algorithm
1716 * uses those to find the symbol in error and thus the DIMM.
1718 * Algorithm courtesy of Ross LaFetra from AMD.
1720 static u16 x4_vectors[] = {
1721 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1722 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1723 0x0001, 0x0002, 0x0004, 0x0008,
1724 0x1013, 0x3032, 0x4044, 0x8088,
1725 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1726 0x4857, 0xc4fe, 0x13cc, 0x3288,
1727 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1728 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1729 0x15c1, 0x2a42, 0x89ac, 0x4758,
1730 0x2b03, 0x1602, 0x4f0c, 0xca08,
1731 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1732 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1733 0x2b87, 0x164e, 0x642c, 0xdc18,
1734 0x40b9, 0x80de, 0x1094, 0x20e8,
1735 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1736 0x11c1, 0x2242, 0x84ac, 0x4c58,
1737 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1738 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1739 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1740 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1741 0x16b3, 0x3d62, 0x4f34, 0x8518,
1742 0x1e2f, 0x391a, 0x5cac, 0xf858,
1743 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1744 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1745 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1746 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1747 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1748 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1749 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1750 0x185d, 0x2ca6, 0x7914, 0x9e28,
1751 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1752 0x4199, 0x82ee, 0x19f4, 0x2e58,
1753 0x4807, 0xc40e, 0x130c, 0x3208,
1754 0x1905, 0x2e0a, 0x5804, 0xac08,
1755 0x213f, 0x132a, 0xadfc, 0x5ba8,
1756 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1759 static u16 x8_vectors[] = {
1760 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1761 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1762 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1763 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1764 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1765 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1766 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1767 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1768 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1769 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1770 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1771 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1772 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1773 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1774 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1775 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1776 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1777 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1778 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1781 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1784 unsigned int i, err_sym;
1786 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1788 int v_idx = err_sym * v_dim;
1789 int v_end = (err_sym + 1) * v_dim;
1791 /* walk over all 16 bits of the syndrome */
1792 for (i = 1; i < (1U << 16); i <<= 1) {
1794 /* if bit is set in that eigenvector... */
1795 if (v_idx < v_end && vectors[v_idx] & i) {
1796 u16 ev_comp = vectors[v_idx++];
1798 /* ... and bit set in the modified syndrome, */
1808 /* can't get to zero, move to next symbol */
1813 debugf0("syndrome(%x) not found\n", syndrome);
1817 static int map_err_sym_to_channel(int err_sym, int sym_size)
1830 return err_sym >> 4;
1836 /* imaginary bits not in a DIMM */
1838 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1850 return err_sym >> 3;
1856 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1858 struct amd64_pvt *pvt = mci->pvt_info;
1861 if (pvt->syn_type == 8)
1862 err_sym = decode_syndrome(syndrome, x8_vectors,
1863 ARRAY_SIZE(x8_vectors),
1865 else if (pvt->syn_type == 4)
1866 err_sym = decode_syndrome(syndrome, x4_vectors,
1867 ARRAY_SIZE(x4_vectors),
1870 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
1874 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1878 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1879 * ADDRESS and process.
1881 static void amd64_handle_ce(struct mem_ctl_info *mci,
1882 struct err_regs *info)
1884 struct amd64_pvt *pvt = mci->pvt_info;
1887 /* Ensure that the Error Address is VALID */
1888 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1889 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1890 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1894 sys_addr = pvt->ops->get_error_address(mci, info);
1896 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1898 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
1901 /* Handle any Un-correctable Errors (UEs) */
1902 static void amd64_handle_ue(struct mem_ctl_info *mci,
1903 struct err_regs *info)
1905 struct amd64_pvt *pvt = mci->pvt_info;
1906 struct mem_ctl_info *log_mci, *src_mci = NULL;
1913 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1914 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1915 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1919 sys_addr = pvt->ops->get_error_address(mci, info);
1922 * Find out which node the error address belongs to. This may be
1923 * different from the node that detected the error.
1925 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1927 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1928 (unsigned long)sys_addr);
1929 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1935 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1937 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1938 (unsigned long)sys_addr);
1939 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1941 error_address_to_page_and_offset(sys_addr, &page, &offset);
1942 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1946 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1947 struct err_regs *info)
1949 u32 ec = ERROR_CODE(info->nbsl);
1950 u32 xec = EXT_ERROR_CODE(info->nbsl);
1951 int ecc_type = (info->nbsh >> 13) & 0x3;
1953 /* Bail early out if this was an 'observed' error */
1954 if (PP(ec) == K8_NBSL_PP_OBS)
1957 /* Do only ECC errors */
1958 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1962 amd64_handle_ce(mci, info);
1963 else if (ecc_type == 1)
1964 amd64_handle_ue(mci, info);
1967 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1969 struct mem_ctl_info *mci = mcis[node_id];
1970 struct err_regs regs;
1972 regs.nbsl = (u32) m->status;
1973 regs.nbsh = (u32)(m->status >> 32);
1974 regs.nbeal = (u32) m->addr;
1975 regs.nbeah = (u32)(m->addr >> 32);
1978 __amd64_decode_bus_error(mci, ®s);
1981 * Check the UE bit of the NB status high register, if set generate some
1982 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1983 * If it was a GART error, skip that process.
1985 * FIXME: this should go somewhere else, if at all.
1987 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
1988 edac_mc_handle_ue_no_info(mci, "UE bit is set");
1993 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1994 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1996 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id,
1999 /* Reserve the ADDRESS MAP Device */
2000 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2002 amd64_err("error address map device not found: "
2003 "vendor %x device 0x%x (broken BIOS?)\n",
2004 PCI_VENDOR_ID_AMD, f1_id);
2008 /* Reserve the MISC Device */
2009 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2011 pci_dev_put(pvt->F1);
2014 amd64_err("error F3 device not found: "
2015 "vendor %x device 0x%x (broken BIOS?)\n",
2016 PCI_VENDOR_ID_AMD, f3_id);
2020 debugf1("F1: %s\n", pci_name(pvt->F1));
2021 debugf1("F2: %s\n", pci_name(pvt->F2));
2022 debugf1("F3: %s\n", pci_name(pvt->F3));
2027 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2029 pci_dev_put(pvt->F1);
2030 pci_dev_put(pvt->F3);
2034 * Retrieve the hardware registers of the memory controller (this includes the
2035 * 'Address Map' and 'Misc' device regs)
2037 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2044 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2045 * those are Read-As-Zero
2047 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2048 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2050 /* check first whether TOP_MEM2 is enabled */
2051 rdmsrl(MSR_K8_SYSCFG, msr_val);
2052 if (msr_val & (1U << 21)) {
2053 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2054 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2056 debugf0(" TOP_MEM2 disabled.\n");
2058 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
2060 if (pvt->ops->read_dram_ctl_register)
2061 pvt->ops->read_dram_ctl_register(pvt);
2063 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2065 * Call CPU specific READ function to get the DRAM Base and
2066 * Limit values from the DCT.
2068 pvt->ops->read_dram_base_limit(pvt, dram);
2071 * Only print out debug info on rows with both R and W Enabled.
2072 * Normal processing, compiler should optimize this whole 'if'
2073 * debug output block away.
2075 if (pvt->dram_rw_en[dram] != 0) {
2076 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2077 "DRAM-LIMIT: 0x%016llx\n",
2079 pvt->dram_base[dram],
2080 pvt->dram_limit[dram]);
2082 debugf1(" IntlvEn=%s %s %s "
2083 "IntlvSel=%d DstNode=%d\n",
2084 pvt->dram_IntlvEn[dram] ?
2085 "Enabled" : "Disabled",
2086 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2087 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2088 pvt->dram_IntlvSel[dram],
2089 pvt->dram_DstNode[dram]);
2093 amd64_read_dct_base_mask(pvt);
2095 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
2096 amd64_read_dbam_reg(pvt);
2098 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2100 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
2101 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
2103 if (boot_cpu_data.x86 >= 0x10) {
2104 if (!dct_ganging_enabled(pvt)) {
2105 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
2106 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
2108 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2111 if (boot_cpu_data.x86 == 0x10 &&
2112 boot_cpu_data.x86_model > 7 &&
2113 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2119 amd64_dump_misc_regs(pvt);
2123 * NOTE: CPU Revision Dependent code
2126 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2127 * k8 private pointer to -->
2128 * DRAM Bank Address mapping register
2130 * DCL register where dual_channel_active is
2132 * The DBAM register consists of 4 sets of 4 bits each definitions:
2135 * 0-3 CSROWs 0 and 1
2136 * 4-7 CSROWs 2 and 3
2137 * 8-11 CSROWs 4 and 5
2138 * 12-15 CSROWs 6 and 7
2140 * Values range from: 0 to 15
2141 * The meaning of the values depends on CPU revision and dual-channel state,
2142 * see relevant BKDG more info.
2144 * The memory controller provides for total of only 8 CSROWs in its current
2145 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2146 * single channel or two (2) DIMMs in dual channel mode.
2148 * The following code logic collapses the various tables for CSROW based on CPU
2152 * The number of PAGE_SIZE pages on the specified CSROW number it
2156 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2158 u32 cs_mode, nr_pages;
2161 * The math on this doesn't look right on the surface because x/2*4 can
2162 * be simplified to x*2 but this expression makes use of the fact that
2163 * it is integral math where 1/2=0. This intermediate value becomes the
2164 * number of bits to shift the DBAM register to extract the proper CSROW
2167 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2169 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2172 * If dual channel then double the memory size of single channel.
2173 * Channel count is 1 or 2
2175 nr_pages <<= (pvt->channel_count - 1);
2177 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2178 debugf0(" nr_pages= %u channel-count = %d\n",
2179 nr_pages, pvt->channel_count);
2185 * Initialize the array of csrow attribute instances, based on the values
2186 * from pci config hardware registers.
2188 static int amd64_init_csrows(struct mem_ctl_info *mci)
2190 struct csrow_info *csrow;
2191 struct amd64_pvt *pvt = mci->pvt_info;
2192 u64 input_addr_min, input_addr_max, sys_addr;
2196 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
2199 pvt->ctl_error_info.nbcfg = val;
2201 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2202 pvt->mc_node_id, val,
2203 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
2205 for (i = 0; i < pvt->cs_count; i++) {
2206 csrow = &mci->csrows[i];
2208 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2209 debugf1("----CSROW %d EMPTY for node %d\n", i,
2214 debugf1("----CSROW %d VALID for MC node %d\n",
2215 i, pvt->mc_node_id);
2218 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2219 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2220 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2221 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2222 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2223 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2224 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2225 /* 8 bytes of resolution */
2227 csrow->mtype = amd64_determine_memory_type(pvt, i);
2229 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2230 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2231 (unsigned long)input_addr_min,
2232 (unsigned long)input_addr_max);
2233 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2234 (unsigned long)sys_addr, csrow->page_mask);
2235 debugf1(" nr_pages: %u first_page: 0x%lx "
2236 "last_page: 0x%lx\n",
2237 (unsigned)csrow->nr_pages,
2238 csrow->first_page, csrow->last_page);
2241 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2243 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2245 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2246 EDAC_S4ECD4ED : EDAC_SECDED;
2248 csrow->edac_mode = EDAC_NONE;
2254 /* get all cores on this DCT */
2255 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2259 for_each_online_cpu(cpu)
2260 if (amd_get_nb_id(cpu) == nid)
2261 cpumask_set_cpu(cpu, mask);
2264 /* check MCG_CTL on all the cpus on this node */
2265 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2271 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2272 amd64_warn("%s: Error allocating mask\n", __func__);
2276 get_cpus_on_this_dct_cpumask(mask, nid);
2278 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2280 for_each_cpu(cpu, mask) {
2281 struct msr *reg = per_cpu_ptr(msrs, cpu);
2282 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2284 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2286 (nbe ? "enabled" : "disabled"));
2294 free_cpumask_var(mask);
2298 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2300 cpumask_var_t cmask;
2303 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2304 amd64_warn("%s: error allocating mask\n", __func__);
2308 get_cpus_on_this_dct_cpumask(cmask, nid);
2310 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2312 for_each_cpu(cpu, cmask) {
2314 struct msr *reg = per_cpu_ptr(msrs, cpu);
2317 if (reg->l & K8_MSR_MCGCTL_NBE)
2318 s->flags.nb_mce_enable = 1;
2320 reg->l |= K8_MSR_MCGCTL_NBE;
2323 * Turn off NB MCE reporting only when it was off before
2325 if (!s->flags.nb_mce_enable)
2326 reg->l &= ~K8_MSR_MCGCTL_NBE;
2329 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2331 free_cpumask_var(cmask);
2336 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2340 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2342 if (toggle_ecc_err_reporting(s, nid, ON)) {
2343 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2347 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2349 /* turn on UECCEn and CECCEn bits */
2350 s->old_nbctl = value & mask;
2351 s->nbctl_valid = true;
2354 pci_write_config_dword(F3, K8_NBCTL, value);
2356 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2358 debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2360 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2362 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2363 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2365 s->flags.nb_ecc_prev = 0;
2367 /* Attempt to turn on DRAM ECC Enable */
2368 value |= K8_NBCFG_ECC_ENABLE;
2369 pci_write_config_dword(F3, K8_NBCFG, value);
2371 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2373 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2374 amd64_warn("Hardware rejected DRAM ECC enable,"
2375 "check memory DIMM configuration.\n");
2378 amd64_info("Hardware accepted DRAM ECC Enable\n");
2381 s->flags.nb_ecc_prev = 1;
2384 debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2386 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2391 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2393 u8 nid = pvt->mc_node_id;
2394 struct ecc_settings *s = ecc_stngs[nid];
2395 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2397 if (!s->nbctl_valid)
2400 amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value);
2402 value |= s->old_nbctl;
2404 pci_write_config_dword(pvt->F3, K8_NBCTL, value);
2406 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2407 if (!s->flags.nb_ecc_prev) {
2408 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2409 value &= ~K8_NBCFG_ECC_ENABLE;
2410 pci_write_config_dword(pvt->F3, K8_NBCFG, value);
2413 /* restore the NB Enable MCGCTL bit */
2414 if (toggle_ecc_err_reporting(s, nid, OFF))
2415 amd64_warn("Error restoring NB MCGCTL settings!\n");
2419 * EDAC requires that the BIOS have ECC enabled before
2420 * taking over the processing of ECC errors. A command line
2421 * option allows to force-enable hardware ECC later in
2422 * enable_ecc_error_reporting().
2424 static const char *ecc_msg =
2425 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2426 " Either enable ECC checking or force module loading by setting "
2427 "'ecc_enable_override'.\n"
2428 " (Note that use of the override may cause unknown side effects.)\n";
2430 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2434 bool nb_mce_en = false;
2436 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2438 ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
2439 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2441 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2443 amd64_notice("NB MCE bank disabled, set MSR "
2444 "0x%08x[4] on node %d to enable.\n",
2445 MSR_IA32_MCG_CTL, nid);
2447 if (!ecc_en || !nb_mce_en) {
2448 amd64_notice("%s", ecc_msg);
2454 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2455 ARRAY_SIZE(amd64_inj_attrs) +
2458 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2460 static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2462 unsigned int i = 0, j = 0;
2464 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2465 sysfs_attrs[i] = amd64_dbg_attrs[i];
2467 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2468 sysfs_attrs[i] = amd64_inj_attrs[j];
2470 sysfs_attrs[i] = terminator;
2472 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2475 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2477 struct amd64_pvt *pvt = mci->pvt_info;
2479 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2480 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2482 if (pvt->nbcap & K8_NBCAP_SECDED)
2483 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2485 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2486 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2488 mci->edac_cap = amd64_determine_edac_cap(pvt);
2489 mci->mod_name = EDAC_MOD_STR;
2490 mci->mod_ver = EDAC_AMD64_VERSION;
2491 mci->ctl_name = pvt->ctl_name;
2492 mci->dev_name = pci_name(pvt->F2);
2493 mci->ctl_page_to_phys = NULL;
2495 /* memory scrubber interface */
2496 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2497 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2501 * returns a pointer to the family descriptor on success, NULL otherwise.
2503 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2505 u8 fam = boot_cpu_data.x86;
2506 struct amd64_family_type *fam_type = NULL;
2510 fam_type = &amd64_family_types[K8_CPUS];
2511 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2512 pvt->ctl_name = fam_type->ctl_name;
2513 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2516 fam_type = &amd64_family_types[F10_CPUS];
2517 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2518 pvt->ctl_name = fam_type->ctl_name;
2519 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2523 amd64_err("Unsupported family!\n");
2527 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2529 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
2531 (pvt->ext_model >= K8_REV_F ? "revF or later "
2532 : "revE or earlier ")
2533 : ""), pvt->mc_node_id);
2537 static int amd64_init_one_instance(struct pci_dev *F2)
2539 struct amd64_pvt *pvt = NULL;
2540 struct amd64_family_type *fam_type = NULL;
2544 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2548 pvt->mc_node_id = get_node_id(F2);
2552 fam_type = amd64_per_family_init(pvt);
2557 err = amd64_reserve_mc_sibling_devices(pvt, fam_type->f1_id,
2563 * Save the pointer to the private data for use in 2nd initialization
2566 pvts[pvt->mc_node_id] = pvt;
2578 * This is the finishing stage of the init code. Needs to be performed after all
2579 * MCs' hardware have been prepped for accessing extended config space.
2581 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2583 int node_id = pvt->mc_node_id;
2584 struct mem_ctl_info *mci;
2587 amd64_read_mc_registers(pvt);
2590 * We need to determine how many memory channels there are. Then use
2591 * that information for calculating the size of the dynamic instance
2592 * tables in the 'mci' structure
2594 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2595 if (pvt->channel_count < 0)
2599 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2603 mci->pvt_info = pvt;
2605 mci->dev = &pvt->F2->dev;
2606 amd64_setup_mci_misc_attributes(mci);
2608 if (amd64_init_csrows(mci))
2609 mci->edac_cap = EDAC_FLAG_NONE;
2611 amd64_set_mc_sysfs_attributes(mci);
2614 if (edac_mc_add_mc(mci)) {
2615 debugf1("failed edac_mc_add_mc()\n");
2619 mcis[node_id] = mci;
2620 pvts[node_id] = NULL;
2622 /* register stuff with EDAC MCE */
2623 if (report_gart_errors)
2624 amd_report_gart_errors(true);
2626 amd_register_ecc_decoder(amd64_decode_bus_error);
2634 debugf0("failure to init 2nd stage: ret=%d\n", ret);
2636 amd64_restore_ecc_error_reporting(pvt);
2638 amd64_free_mc_sibling_devices(pvt);
2640 kfree(pvts[pvt->mc_node_id]);
2641 pvts[node_id] = NULL;
2647 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2648 const struct pci_device_id *mc_type)
2650 u8 nid = get_node_id(pdev);
2651 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2652 struct ecc_settings *s;
2655 ret = pci_enable_device(pdev);
2657 debugf0("ret=%d\n", ret);
2662 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2668 if (!ecc_enabled(F3, nid)) {
2671 if (!ecc_enable_override)
2674 amd64_warn("Forcing ECC on!\n");
2676 if (!enable_ecc_error_reporting(s, nid, F3))
2680 ret = amd64_init_one_instance(pdev);
2682 amd64_err("Error probing instance: %d\n", nid);
2688 ecc_stngs[nid] = NULL;
2694 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2696 struct mem_ctl_info *mci;
2697 struct amd64_pvt *pvt;
2699 /* Remove from EDAC CORE tracking list */
2700 mci = edac_mc_del_mc(&pdev->dev);
2704 pvt = mci->pvt_info;
2706 amd64_restore_ecc_error_reporting(pvt);
2708 amd64_free_mc_sibling_devices(pvt);
2710 /* unregister from EDAC MCE */
2711 amd_report_gart_errors(false);
2712 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2714 kfree(ecc_stngs[pvt->mc_node_id]);
2715 ecc_stngs[pvt->mc_node_id] = NULL;
2717 /* Free the EDAC CORE resources */
2718 mci->pvt_info = NULL;
2719 mcis[pvt->mc_node_id] = NULL;
2726 * This table is part of the interface for loading drivers for PCI devices. The
2727 * PCI core identifies what devices are on a system during boot, and then
2728 * inquiry this table to see if this driver is for a given device found.
2730 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2732 .vendor = PCI_VENDOR_ID_AMD,
2733 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2734 .subvendor = PCI_ANY_ID,
2735 .subdevice = PCI_ANY_ID,
2740 .vendor = PCI_VENDOR_ID_AMD,
2741 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2742 .subvendor = PCI_ANY_ID,
2743 .subdevice = PCI_ANY_ID,
2749 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2751 static struct pci_driver amd64_pci_driver = {
2752 .name = EDAC_MOD_STR,
2753 .probe = amd64_probe_one_instance,
2754 .remove = __devexit_p(amd64_remove_one_instance),
2755 .id_table = amd64_pci_table,
2758 static void amd64_setup_pci_device(void)
2760 struct mem_ctl_info *mci;
2761 struct amd64_pvt *pvt;
2769 pvt = mci->pvt_info;
2771 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2773 if (!amd64_ctl_pci) {
2774 pr_warning("%s(): Unable to create PCI control\n",
2777 pr_warning("%s(): PCI error report via EDAC not set\n",
2783 static int __init amd64_edac_init(void)
2785 int nb, err = -ENODEV;
2786 bool load_ok = false;
2788 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2792 if (amd_cache_northbridges() < 0)
2796 pvts = kzalloc(amd_nb_num() * sizeof(pvts[0]), GFP_KERNEL);
2797 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2798 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2799 if (!(pvts && mcis && ecc_stngs))
2802 msrs = msrs_alloc();
2806 err = pci_register_driver(&amd64_pci_driver);
2811 * At this point, the array 'pvts[]' contains pointers to alloc'd
2812 * amd64_pvt structs. These will be used in the 2nd stage init function
2813 * to finish initialization of the MC instances.
2816 for (nb = 0; nb < amd_nb_num(); nb++) {
2820 err = amd64_init_2nd_stage(pvts[nb]);
2828 amd64_setup_pci_device();
2833 pci_unregister_driver(&amd64_pci_driver);
2843 static void __exit amd64_edac_exit(void)
2846 edac_pci_release_generic_ctl(amd64_ctl_pci);
2848 pci_unregister_driver(&amd64_pci_driver);
2863 module_init(amd64_edac_init);
2864 module_exit(amd64_edac_exit);
2866 MODULE_LICENSE("GPL");
2867 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2868 "Dave Peterson, Thayne Harbaugh");
2869 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2870 EDAC_AMD64_VERSION);
2872 module_param(edac_op_state, int, 0444);
2873 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");