1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
31 static int ddr2_dbam_revCG[] = {
41 static int ddr2_dbam_revD[] = {
53 static int ddr2_dbam[] = { [0] = 128,
62 static int ddr3_dbam[] = { [0] = -1,
73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
77 *FIXME: Produce a better mapping/linearisation.
82 u32 scrubval; /* bit pattern for scrub rate */
83 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
85 { 0x01, 1600000000UL},
107 { 0x00, 0UL}, /* scrubbing off */
110 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
111 u32 *val, const char *func)
115 err = pci_read_config_dword(pdev, offset, val);
117 amd64_warn("%s: error reading F%dx%03x.\n",
118 func, PCI_FUNC(pdev->devfn), offset);
123 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
124 u32 val, const char *func)
128 err = pci_write_config_dword(pdev, offset, val);
130 amd64_warn("%s: error writing to F%dx%03x.\n",
131 func, PCI_FUNC(pdev->devfn), offset);
138 * Depending on the family, F2 DCT reads need special handling:
140 * K8: has a single DCT only
142 * F10h: each DCT has its own set of regs
146 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
149 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
155 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
158 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
161 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
164 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
170 if (addr >= 0x140 && addr <= 0x1a0) {
175 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
178 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
180 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
184 * Memory scrubber control interface. For K8, memory scrubbing is handled by
185 * hardware and can involve L2 cache, dcache as well as the main memory. With
186 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
189 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
190 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
191 * bytes/sec for the setting.
193 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
194 * other archs, we might not have access to the caches directly.
198 * scan the scrub rate mapping table for a close or matching bandwidth value to
199 * issue. If requested is too big, then use last maximum value found.
201 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
207 * map the configured rate (new_bw) to a value specific to the AMD64
208 * memory controller and apply to register. Search for the first
209 * bandwidth entry that is greater or equal than the setting requested
210 * and program that. If at last entry, turn off DRAM scrubbing.
212 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
214 * skip scrub rates which aren't recommended
215 * (see F10 BKDG, F3x58)
217 if (scrubrates[i].scrubval < min_rate)
220 if (scrubrates[i].bandwidth <= new_bw)
224 * if no suitable bandwidth found, turn off DRAM scrubbing
225 * entirely by falling back to the last element in the
230 scrubval = scrubrates[i].scrubval;
232 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
235 return scrubrates[i].bandwidth;
240 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
242 struct amd64_pvt *pvt = mci->pvt_info;
244 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
247 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
249 struct amd64_pvt *pvt = mci->pvt_info;
251 int i, retval = -EINVAL;
253 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
255 scrubval = scrubval & 0x001F;
257 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
259 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
260 if (scrubrates[i].scrubval == scrubval) {
261 retval = scrubrates[i].bandwidth;
268 /* Map from a CSROW entry to the mask entry that operates on it */
269 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
271 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
277 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
278 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
281 return pvt->dcsb0[csrow];
283 return pvt->dcsb1[csrow];
287 * Return the 'mask' address the i'th CS entry. This function is needed because
288 * there number of DCSM registers on Rev E and prior vs Rev F and later is
291 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
294 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
296 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
300 * returns true if the SysAddr given by sys_addr matches the
301 * DRAM base/limit associated with node_id
303 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, int nid)
307 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
308 * all ones if the most significant implemented address bit is 1.
309 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
310 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
311 * Application Programming.
313 addr = sys_addr & 0x000000ffffffffffull;
315 return ((addr >= get_dram_base(pvt, nid)) &&
316 (addr <= get_dram_limit(pvt, nid)));
320 * Attempt to map a SysAddr to a node. On success, return a pointer to the
321 * mem_ctl_info structure for the node that the SysAddr maps to.
323 * On failure, return NULL.
325 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
328 struct amd64_pvt *pvt;
333 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
334 * 3.4.4.2) registers to map the SysAddr to a node ID.
339 * The value of this field should be the same for all DRAM Base
340 * registers. Therefore we arbitrarily choose to read it from the
341 * register for node 0.
343 intlv_en = dram_intlv_en(pvt, 0);
346 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
347 if (amd64_base_limit_match(pvt, sys_addr, node_id))
353 if (unlikely((intlv_en != 0x01) &&
354 (intlv_en != 0x03) &&
355 (intlv_en != 0x07))) {
356 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
360 bits = (((u32) sys_addr) >> 12) & intlv_en;
362 for (node_id = 0; ; ) {
363 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
364 break; /* intlv_sel field matches */
366 if (++node_id >= DRAM_RANGES)
370 /* sanity test for sys_addr */
371 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
372 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
373 "range for node %d with node interleaving enabled.\n",
374 __func__, sys_addr, node_id);
379 return edac_mc_find(node_id);
382 debugf2("sys_addr 0x%lx doesn't match any node\n",
383 (unsigned long)sys_addr);
389 * Extract the DRAM CS base address from selected csrow register.
391 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
393 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
398 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
400 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
402 u64 dcsm_bits, other_bits;
405 /* Extract bits from DRAM CS Mask. */
406 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
408 other_bits = pvt->dcsm_mask;
409 other_bits = ~(other_bits << pvt->dcs_shift);
412 * The extracted bits from DCSM belong in the spaces represented by
413 * the cleared bits in other_bits.
415 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
421 * @input_addr is an InputAddr associated with the node given by mci. Return the
422 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
424 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
426 struct amd64_pvt *pvt;
433 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
434 * base/mask register pair, test the condition shown near the start of
435 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
437 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
439 /* This DRAM chip select is disabled on this node */
440 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
443 base = base_from_dct_base(pvt, csrow);
444 mask = ~mask_from_dct_mask(pvt, csrow);
446 if ((input_addr & mask) == (base & mask)) {
447 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
448 (unsigned long)input_addr, csrow,
455 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
456 (unsigned long)input_addr, pvt->mc_node_id);
462 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
463 * for the node represented by mci. Info is passed back in *hole_base,
464 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
465 * info is invalid. Info may be invalid for either of the following reasons:
467 * - The revision of the node is not E or greater. In this case, the DRAM Hole
468 * Address Register does not exist.
470 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
471 * indicating that its contents are not valid.
473 * The values passed back in *hole_base, *hole_offset, and *hole_size are
474 * complete 32-bit values despite the fact that the bitfields in the DHAR
475 * only represent bits 31-24 of the base and offset values.
477 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
478 u64 *hole_offset, u64 *hole_size)
480 struct amd64_pvt *pvt = mci->pvt_info;
483 /* only revE and later have the DRAM Hole Address Register */
484 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
485 debugf1(" revision %d for node %d does not support DHAR\n",
486 pvt->ext_model, pvt->mc_node_id);
490 /* only valid for Fam10h */
491 if (boot_cpu_data.x86 == 0x10 &&
492 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
493 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
497 if ((pvt->dhar & DHAR_VALID) == 0) {
498 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
503 /* This node has Memory Hoisting */
505 /* +------------------+--------------------+--------------------+-----
506 * | memory | DRAM hole | relocated |
507 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
509 * | | | [0x100000000, |
510 * | | | (0x100000000+ |
511 * | | | (0xffffffff-x))] |
512 * +------------------+--------------------+--------------------+-----
514 * Above is a diagram of physical memory showing the DRAM hole and the
515 * relocated addresses from the DRAM hole. As shown, the DRAM hole
516 * starts at address x (the base address) and extends through address
517 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
518 * addresses in the hole so that they start at 0x100000000.
521 base = dhar_base(pvt->dhar);
524 *hole_size = (0x1ull << 32) - base;
526 if (boot_cpu_data.x86 > 0xf)
527 *hole_offset = f10_dhar_offset(pvt->dhar);
529 *hole_offset = k8_dhar_offset(pvt->dhar);
531 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
532 pvt->mc_node_id, (unsigned long)*hole_base,
533 (unsigned long)*hole_offset, (unsigned long)*hole_size);
537 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
540 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
541 * assumed that sys_addr maps to the node given by mci.
543 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
544 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
545 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
546 * then it is also involved in translating a SysAddr to a DramAddr. Sections
547 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
548 * These parts of the documentation are unclear. I interpret them as follows:
550 * When node n receives a SysAddr, it processes the SysAddr as follows:
552 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
553 * Limit registers for node n. If the SysAddr is not within the range
554 * specified by the base and limit values, then node n ignores the Sysaddr
555 * (since it does not map to node n). Otherwise continue to step 2 below.
557 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
558 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
559 * the range of relocated addresses (starting at 0x100000000) from the DRAM
560 * hole. If not, skip to step 3 below. Else get the value of the
561 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
562 * offset defined by this value from the SysAddr.
564 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
565 * Base register for node n. To obtain the DramAddr, subtract the base
566 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
568 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
570 struct amd64_pvt *pvt = mci->pvt_info;
571 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
574 dram_base = get_dram_base(pvt, pvt->mc_node_id);
576 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
579 if ((sys_addr >= (1ull << 32)) &&
580 (sys_addr < ((1ull << 32) + hole_size))) {
581 /* use DHAR to translate SysAddr to DramAddr */
582 dram_addr = sys_addr - hole_offset;
584 debugf2("using DHAR to translate SysAddr 0x%lx to "
586 (unsigned long)sys_addr,
587 (unsigned long)dram_addr);
594 * Translate the SysAddr to a DramAddr as shown near the start of
595 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
596 * only deals with 40-bit values. Therefore we discard bits 63-40 of
597 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
598 * discard are all 1s. Otherwise the bits we discard are all 0s. See
599 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
600 * Programmer's Manual Volume 1 Application Programming.
602 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
604 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
605 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
606 (unsigned long)dram_addr);
611 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
612 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
613 * for node interleaving.
615 static int num_node_interleave_bits(unsigned intlv_en)
617 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
620 BUG_ON(intlv_en > 7);
621 n = intlv_shift_table[intlv_en];
625 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
626 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
628 struct amd64_pvt *pvt;
635 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
636 * concerning translating a DramAddr to an InputAddr.
638 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
639 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
642 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
643 intlv_shift, (unsigned long)dram_addr,
644 (unsigned long)input_addr);
650 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
651 * assumed that @sys_addr maps to the node given by mci.
653 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
658 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
660 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
661 (unsigned long)sys_addr, (unsigned long)input_addr);
668 * @input_addr is an InputAddr associated with the node represented by mci.
669 * Translate @input_addr to a DramAddr and return the result.
671 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
673 struct amd64_pvt *pvt;
674 int node_id, intlv_shift;
679 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
680 * shows how to translate a DramAddr to an InputAddr. Here we reverse
681 * this procedure. When translating from a DramAddr to an InputAddr, the
682 * bits used for node interleaving are discarded. Here we recover these
683 * bits from the IntlvSel field of the DRAM Limit register (section
684 * 3.4.4.2) for the node that input_addr is associated with.
687 node_id = pvt->mc_node_id;
688 BUG_ON((node_id < 0) || (node_id > 7));
690 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
692 if (intlv_shift == 0) {
693 debugf1(" InputAddr 0x%lx translates to DramAddr of "
694 "same value\n", (unsigned long)input_addr);
699 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
700 (input_addr & 0xfff);
702 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
703 dram_addr = bits + (intlv_sel << 12);
705 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
706 "(%d node interleave bits)\n", (unsigned long)input_addr,
707 (unsigned long)dram_addr, intlv_shift);
713 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
714 * @dram_addr to a SysAddr.
716 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
718 struct amd64_pvt *pvt = mci->pvt_info;
719 u64 hole_base, hole_offset, hole_size, base, sys_addr;
722 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
725 if ((dram_addr >= hole_base) &&
726 (dram_addr < (hole_base + hole_size))) {
727 sys_addr = dram_addr + hole_offset;
729 debugf1("using DHAR to translate DramAddr 0x%lx to "
730 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
731 (unsigned long)sys_addr);
737 base = get_dram_base(pvt, pvt->mc_node_id);
738 sys_addr = dram_addr + base;
741 * The sys_addr we have computed up to this point is a 40-bit value
742 * because the k8 deals with 40-bit values. However, the value we are
743 * supposed to return is a full 64-bit physical address. The AMD
744 * x86-64 architecture specifies that the most significant implemented
745 * address bit through bit 63 of a physical address must be either all
746 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
747 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
748 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
751 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
753 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
754 pvt->mc_node_id, (unsigned long)dram_addr,
755 (unsigned long)sys_addr);
761 * @input_addr is an InputAddr associated with the node given by mci. Translate
762 * @input_addr to a SysAddr.
764 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
767 return dram_addr_to_sys_addr(mci,
768 input_addr_to_dram_addr(mci, input_addr));
772 * Find the minimum and maximum InputAddr values that map to the given @csrow.
773 * Pass back these values in *input_addr_min and *input_addr_max.
775 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
776 u64 *input_addr_min, u64 *input_addr_max)
778 struct amd64_pvt *pvt;
782 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
784 base = base_from_dct_base(pvt, csrow);
785 mask = mask_from_dct_mask(pvt, csrow);
787 *input_addr_min = base & ~mask;
788 *input_addr_max = base | mask | pvt->dcs_mask_notused;
791 /* Map the Error address to a PAGE and PAGE OFFSET. */
792 static inline void error_address_to_page_and_offset(u64 error_address,
793 u32 *page, u32 *offset)
795 *page = (u32) (error_address >> PAGE_SHIFT);
796 *offset = ((u32) error_address) & ~PAGE_MASK;
800 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
801 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
802 * of a node that detected an ECC memory error. mci represents the node that
803 * the error address maps to (possibly different from the node that detected
804 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
807 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
811 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
814 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
815 "address 0x%lx\n", (unsigned long)sys_addr);
819 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
821 static u16 extract_syndrome(struct err_regs *err)
823 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
827 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
830 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
833 enum dev_type edac_cap = EDAC_FLAG_NONE;
835 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
839 if (pvt->dclr0 & BIT(bit))
840 edac_cap = EDAC_FLAG_SECDED;
846 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
848 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
850 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
852 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
853 (dclr & BIT(16)) ? "un" : "",
854 (dclr & BIT(19)) ? "yes" : "no");
856 debugf1(" PAR/ERR parity: %s\n",
857 (dclr & BIT(8)) ? "enabled" : "disabled");
859 debugf1(" DCT 128bit mode width: %s\n",
860 (dclr & BIT(11)) ? "128b" : "64b");
862 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
863 (dclr & BIT(12)) ? "yes" : "no",
864 (dclr & BIT(13)) ? "yes" : "no",
865 (dclr & BIT(14)) ? "yes" : "no",
866 (dclr & BIT(15)) ? "yes" : "no");
869 /* Display and decode various NB registers for debug purposes. */
870 static void dump_misc_regs(struct amd64_pvt *pvt)
872 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
874 debugf1(" NB two channel DRAM capable: %s\n",
875 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
877 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
878 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
879 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
881 amd64_dump_dramcfg_low(pvt->dclr0, 0);
883 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
885 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
888 dhar_base(pvt->dhar),
889 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
890 : f10_dhar_offset(pvt->dhar));
892 debugf1(" DramHoleValid: %s\n",
893 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
895 amd64_debug_display_dimm_sizes(0, pvt);
897 /* everything below this point is Fam10h and above */
898 if (boot_cpu_data.x86 == 0xf)
901 amd64_debug_display_dimm_sizes(1, pvt);
903 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
905 /* Only if NOT ganged does dclr1 have valid info */
906 if (!dct_ganging_enabled(pvt))
907 amd64_dump_dramcfg_low(pvt->dclr1, 1);
910 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
912 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
913 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
917 * NOTE: CPU Revision Dependent code: Rev E and Rev F
919 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
920 * set the shift factor for the DCSB and DCSM values.
922 * ->dcs_mask_notused, RevE:
924 * To find the max InputAddr for the csrow, start with the base address and set
925 * all bits that are "don't care" bits in the test at the start of section
928 * The "don't care" bits are all set bits in the mask and all bits in the gaps
929 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
930 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
933 * ->dcs_mask_notused, RevF and later:
935 * To find the max InputAddr for the csrow, start with the base address and set
936 * all bits that are "don't care" bits in the test at the start of NPT section
939 * The "don't care" bits are all set bits in the mask and all bits in the gaps
940 * between bit ranges [36:27] and [21:13].
942 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
943 * which are all bits in the above-mentioned gaps.
945 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
948 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
949 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
950 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
951 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
952 pvt->dcs_shift = REV_E_DCS_SHIFT;
956 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
957 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
958 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
959 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
966 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
968 static void read_dct_base_mask(struct amd64_pvt *pvt)
972 amd64_set_dct_base_and_mask(pvt);
974 for (cs = 0; cs < pvt->cs_count; cs++) {
975 reg = K8_DCSB0 + (cs * 4);
977 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb0[cs]))
978 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
979 cs, pvt->dcsb0[cs], reg);
981 if (!dct_ganging_enabled(pvt)) {
982 reg = F10_DCSB1 + (cs * 4);
984 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsb1[cs]))
985 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
986 cs, pvt->dcsb1[cs], reg);
990 for (cs = 0; cs < pvt->num_dcsm; cs++) {
991 reg = K8_DCSM0 + (cs * 4);
993 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm0[cs]))
994 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
995 cs, pvt->dcsm0[cs], reg);
997 if (!dct_ganging_enabled(pvt)) {
998 reg = F10_DCSM1 + (cs * 4);
1000 if (!amd64_read_dct_pci_cfg(pvt, reg, &pvt->dcsm1[cs]))
1001 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
1002 cs, pvt->dcsm1[cs], reg);
1007 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
1011 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
1012 if (pvt->dchr0 & DDR3_MODE)
1013 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1015 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1017 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1020 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
1026 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1027 * and the later RevF memory controllers (DDR vs DDR2)
1030 * number of memory channels in operation
1032 * contents of the DCL0_LOW register
1034 static int k8_early_channel_count(struct amd64_pvt *pvt)
1038 err = amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
1042 if (pvt->ext_model >= K8_REV_F)
1043 /* RevF (NPT) and later */
1044 flag = pvt->dclr0 & F10_WIDTH_128;
1046 /* RevE and earlier */
1047 flag = pvt->dclr0 & REVE_WIDTH_128;
1052 return (flag) ? 2 : 1;
1055 /* extract the ERROR ADDRESS for the K8 CPUs */
1056 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1057 struct err_regs *info)
1059 return (((u64) (info->nbeah & 0xff)) << 32) +
1060 (info->nbeal & ~0x03);
1063 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1065 u32 off = range << 3;
1067 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1068 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1070 if (boot_cpu_data.x86 == 0xf)
1073 if (!dram_rw(pvt, range))
1076 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1077 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1080 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1081 struct err_regs *err_info, u64 sys_addr)
1083 struct mem_ctl_info *src_mci;
1088 syndrome = extract_syndrome(err_info);
1090 /* CHIPKILL enabled */
1091 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
1092 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1095 * Syndrome didn't map, so we don't know which of the
1096 * 2 DIMMs is in error. So we need to ID 'both' of them
1099 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1100 "error reporting race\n", syndrome);
1101 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1106 * non-chipkill ecc mode
1108 * The k8 documentation is unclear about how to determine the
1109 * channel number when using non-chipkill memory. This method
1110 * was obtained from email communication with someone at AMD.
1111 * (Wish the email was placed in this comment - norsk)
1113 channel = ((sys_addr & BIT(3)) != 0);
1117 * Find out which node the error address belongs to. This may be
1118 * different from the node that detected the error.
1120 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1122 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1123 (unsigned long)sys_addr);
1124 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1128 /* Now map the sys_addr to a CSROW */
1129 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1131 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1133 error_address_to_page_and_offset(sys_addr, &page, &offset);
1135 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1136 channel, EDAC_MOD_STR);
1140 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1144 if (pvt->ext_model >= K8_REV_F)
1145 dbam_map = ddr2_dbam;
1146 else if (pvt->ext_model >= K8_REV_D)
1147 dbam_map = ddr2_dbam_revD;
1149 dbam_map = ddr2_dbam_revCG;
1151 return dbam_map[cs_mode];
1155 * Get the number of DCT channels in use.
1158 * number of Memory Channels in operation
1160 * contents of the DCL0_LOW register
1162 static int f10_early_channel_count(struct amd64_pvt *pvt)
1164 int dbams[] = { DBAM0, DBAM1 };
1165 int i, j, channels = 0;
1168 /* If we are in 128 bit mode, then we are using 2 channels */
1169 if (pvt->dclr0 & F10_WIDTH_128) {
1175 * Need to check if in unganged mode: In such, there are 2 channels,
1176 * but they are not in 128 bit mode and thus the above 'dclr0' status
1179 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1180 * their CSEnable bit on. If so, then SINGLE DIMM case.
1182 debugf0("Data width is not 128 bits - need more decoding\n");
1185 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1186 * is more than just one DIMM present in unganged mode. Need to check
1187 * both controllers since DIMMs can be placed in either one.
1189 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1190 if (amd64_read_dct_pci_cfg(pvt, dbams[i], &dbam))
1193 for (j = 0; j < 4; j++) {
1194 if (DBAM_DIMM(j, dbam) > 0) {
1204 amd64_info("MCT channel count: %d\n", channels);
1213 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1217 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1218 dbam_map = ddr3_dbam;
1220 dbam_map = ddr2_dbam;
1222 return dbam_map[cs_mode];
1225 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1226 struct err_regs *info)
1228 return (((u64) (info->nbeah & 0xffff)) << 32) +
1229 (info->nbeal & ~0x01);
1232 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1235 if (!amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_LOW, &pvt->dct_sel_low)) {
1236 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, High range addrs at: 0x%x\n",
1237 pvt->dct_sel_low, dct_sel_baseaddr(pvt));
1239 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1240 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1241 (dct_dram_enabled(pvt) ? "yes" : "no"));
1243 if (!dct_ganging_enabled(pvt))
1244 debugf0(" Address range split per DCT: %s\n",
1245 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1247 debugf0(" DCT data interleave for ECC: %s, "
1248 "DRAM cleared since last warm reset: %s\n",
1249 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1250 (dct_memory_cleared(pvt) ? "yes" : "no"));
1252 debugf0(" DCT channel interleave: %s, "
1253 "DCT interleave bits selector: 0x%x\n",
1254 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1255 dct_sel_interleave_addr(pvt));
1258 amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_HIGH, &pvt->dct_sel_hi);
1262 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1263 * Interleaving Modes.
1265 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1266 int hi_range_sel, u32 intlv_en)
1268 u32 cs, temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
1270 if (dct_ganging_enabled(pvt))
1272 else if (hi_range_sel)
1274 else if (dct_interleave_enabled(pvt)) {
1276 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1278 if (dct_sel_interleave_addr(pvt) == 0)
1279 cs = sys_addr >> 6 & 1;
1280 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1281 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1283 if (dct_sel_interleave_addr(pvt) & 1)
1284 cs = (sys_addr >> 9 & 1) ^ temp;
1286 cs = (sys_addr >> 6 & 1) ^ temp;
1287 } else if (intlv_en & 4)
1288 cs = sys_addr >> 15 & 1;
1289 else if (intlv_en & 2)
1290 cs = sys_addr >> 14 & 1;
1291 else if (intlv_en & 1)
1292 cs = sys_addr >> 13 & 1;
1294 cs = sys_addr >> 12 & 1;
1295 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1296 cs = ~dct_sel_high & 1;
1303 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1307 else if (intlv_en == 3)
1309 else if (intlv_en == 7)
1315 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1316 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1317 u32 dct_sel_base_addr,
1318 u64 dct_sel_base_off,
1319 u32 hole_valid, u32 hole_off,
1325 if (!(dct_sel_base_addr & 0xFFFF0000) &&
1326 hole_valid && (sys_addr >= 0x100000000ULL))
1327 chan_off = hole_off << 16;
1329 chan_off = dct_sel_base_off;
1331 if (hole_valid && (sys_addr >= 0x100000000ULL))
1332 chan_off = hole_off << 16;
1334 chan_off = dram_base & 0xFFFFF8000000ULL;
1337 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1338 (chan_off & 0x0000FFFFFF800000ULL);
1341 /* Hack for the time being - Can we get this from BIOS?? */
1342 #define CH0SPARE_RANK 0
1343 #define CH1SPARE_RANK 1
1346 * checks if the csrow passed in is marked as SPARED, if so returns the new
1349 static inline int f10_process_possible_spare(int csrow,
1350 u32 cs, struct amd64_pvt *pvt)
1355 /* Depending on channel, isolate respective SPARING info */
1357 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1358 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1359 if (swap_done && (csrow == bad_dram_cs))
1360 csrow = CH1SPARE_RANK;
1362 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1363 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1364 if (swap_done && (csrow == bad_dram_cs))
1365 csrow = CH0SPARE_RANK;
1371 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1372 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1375 * -EINVAL: NOT FOUND
1376 * 0..csrow = Chip-Select Row
1378 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1380 struct mem_ctl_info *mci;
1381 struct amd64_pvt *pvt;
1382 u32 cs_base, cs_mask;
1383 int cs_found = -EINVAL;
1390 pvt = mci->pvt_info;
1392 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1394 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1396 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1397 if (!(cs_base & K8_DCSB_CS_ENABLE))
1401 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1402 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1403 * of the actual address.
1405 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1408 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1409 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1411 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1413 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1414 csrow, cs_base, cs_mask);
1416 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1418 debugf1(" Final CSMask=0x%x\n", cs_mask);
1419 debugf1(" (InputAddr & ~CSMask)=0x%x "
1420 "(CSBase & ~CSMask)=0x%x\n",
1421 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1423 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1424 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1426 debugf1(" MATCH csrow=%d\n", cs_found);
1433 /* For a given @dram_range, check if @sys_addr falls within it. */
1434 static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
1435 u64 sys_addr, int *nid, int *chan_sel)
1437 int cs_found = -EINVAL, high_range = 0;
1438 u32 intlv_shift, hole_off;
1439 u32 hole_valid, tmp, dct_sel_base, channel;
1440 u64 chan_addr, dct_sel_base_off;
1442 u8 node_id = dram_dst_node(pvt, range);
1443 u32 intlv_en = dram_intlv_en(pvt, range);
1444 u32 intlv_sel = dram_intlv_sel(pvt, range);
1445 u64 dram_base = get_dram_base(pvt, range);
1447 debugf1("(range %d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1448 range, dram_base, sys_addr, get_dram_limit(pvt, range));
1451 * This assumes that one node's DHAR is the same as all the other
1454 hole_off = (pvt->dhar & 0x0000FF80);
1455 hole_valid = (pvt->dhar & 0x1);
1456 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1458 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1459 hole_off, hole_valid, intlv_sel);
1462 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1465 dct_sel_base = dct_sel_baseaddr(pvt);
1468 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1469 * select between DCT0 and DCT1.
1471 if (dct_high_range_enabled(pvt) &&
1472 !dct_ganging_enabled(pvt) &&
1473 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1476 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1478 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1479 dct_sel_base_off, hole_valid,
1480 hole_off, dram_base);
1482 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1484 /* remove Node ID (in case of memory interleaving) */
1485 tmp = chan_addr & 0xFC0;
1487 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1489 /* remove channel interleave and hash */
1490 if (dct_interleave_enabled(pvt) &&
1491 !dct_high_range_enabled(pvt) &&
1492 !dct_ganging_enabled(pvt)) {
1493 if (dct_sel_interleave_addr(pvt) != 1)
1494 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1496 tmp = chan_addr & 0xFC0;
1497 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1502 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1503 chan_addr, (u32)(chan_addr >> 8));
1505 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1507 if (cs_found >= 0) {
1509 *chan_sel = channel;
1514 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1515 int *node, int *chan_sel)
1517 int range, cs_found = -EINVAL;
1519 for (range = 0; range < DRAM_RANGES; range++) {
1521 if (!dram_rw(pvt, range))
1524 if ((get_dram_base(pvt, range) <= sys_addr) &&
1525 (get_dram_limit(pvt, range) >= sys_addr)) {
1527 cs_found = f10_match_to_this_node(pvt, range,
1538 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1539 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1541 * The @sys_addr is usually an error address received from the hardware
1544 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1545 struct err_regs *err_info,
1548 struct amd64_pvt *pvt = mci->pvt_info;
1550 int nid, csrow, chan = 0;
1553 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1556 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1560 error_address_to_page_and_offset(sys_addr, &page, &offset);
1562 syndrome = extract_syndrome(err_info);
1565 * We need the syndromes for channel detection only when we're
1566 * ganged. Otherwise @chan should already contain the channel at
1569 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1570 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1573 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1577 * Channel unknown, report all channels on this CSROW as failed.
1579 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1580 edac_mc_handle_ce(mci, page, offset, syndrome,
1581 csrow, chan, EDAC_MOD_STR);
1585 * debug routine to display the memory sizes of all logical DIMMs and its
1588 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1590 int dimm, size0, size1, factor = 0;
1594 if (boot_cpu_data.x86 == 0xf) {
1595 if (pvt->dclr0 & F10_WIDTH_128)
1598 /* K8 families < revF not supported yet */
1599 if (pvt->ext_model < K8_REV_F)
1605 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1606 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
1608 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1610 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1612 /* Dump memory sizes for DIMM and its CSROWs */
1613 for (dimm = 0; dimm < 4; dimm++) {
1616 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1617 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1620 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1621 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1623 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1624 dimm * 2, size0 << factor,
1625 dimm * 2 + 1, size1 << factor);
1629 static struct amd64_family_type amd64_family_types[] = {
1632 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1633 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1635 .early_channel_count = k8_early_channel_count,
1636 .get_error_address = k8_get_error_address,
1637 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1638 .dbam_to_cs = k8_dbam_to_chip_select,
1639 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1644 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1645 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1647 .early_channel_count = f10_early_channel_count,
1648 .get_error_address = f10_get_error_address,
1649 .read_dram_ctl_register = f10_read_dram_ctl_register,
1650 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1651 .dbam_to_cs = f10_dbam_to_chip_select,
1652 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1658 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1663 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1664 unsigned int device,
1665 struct pci_dev *related)
1667 struct pci_dev *dev = NULL;
1669 dev = pci_get_device(vendor, device, dev);
1671 if ((dev->bus->number == related->bus->number) &&
1672 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1674 dev = pci_get_device(vendor, device, dev);
1681 * These are tables of eigenvectors (one per line) which can be used for the
1682 * construction of the syndrome tables. The modified syndrome search algorithm
1683 * uses those to find the symbol in error and thus the DIMM.
1685 * Algorithm courtesy of Ross LaFetra from AMD.
1687 static u16 x4_vectors[] = {
1688 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1689 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1690 0x0001, 0x0002, 0x0004, 0x0008,
1691 0x1013, 0x3032, 0x4044, 0x8088,
1692 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1693 0x4857, 0xc4fe, 0x13cc, 0x3288,
1694 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1695 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1696 0x15c1, 0x2a42, 0x89ac, 0x4758,
1697 0x2b03, 0x1602, 0x4f0c, 0xca08,
1698 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1699 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1700 0x2b87, 0x164e, 0x642c, 0xdc18,
1701 0x40b9, 0x80de, 0x1094, 0x20e8,
1702 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1703 0x11c1, 0x2242, 0x84ac, 0x4c58,
1704 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1705 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1706 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1707 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1708 0x16b3, 0x3d62, 0x4f34, 0x8518,
1709 0x1e2f, 0x391a, 0x5cac, 0xf858,
1710 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1711 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1712 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1713 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1714 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1715 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1716 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1717 0x185d, 0x2ca6, 0x7914, 0x9e28,
1718 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1719 0x4199, 0x82ee, 0x19f4, 0x2e58,
1720 0x4807, 0xc40e, 0x130c, 0x3208,
1721 0x1905, 0x2e0a, 0x5804, 0xac08,
1722 0x213f, 0x132a, 0xadfc, 0x5ba8,
1723 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1726 static u16 x8_vectors[] = {
1727 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1728 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1729 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1730 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1731 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1732 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1733 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1734 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1735 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1736 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1737 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1738 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1739 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1740 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1741 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1742 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1743 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1744 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1745 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1748 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1751 unsigned int i, err_sym;
1753 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1755 int v_idx = err_sym * v_dim;
1756 int v_end = (err_sym + 1) * v_dim;
1758 /* walk over all 16 bits of the syndrome */
1759 for (i = 1; i < (1U << 16); i <<= 1) {
1761 /* if bit is set in that eigenvector... */
1762 if (v_idx < v_end && vectors[v_idx] & i) {
1763 u16 ev_comp = vectors[v_idx++];
1765 /* ... and bit set in the modified syndrome, */
1775 /* can't get to zero, move to next symbol */
1780 debugf0("syndrome(%x) not found\n", syndrome);
1784 static int map_err_sym_to_channel(int err_sym, int sym_size)
1797 return err_sym >> 4;
1803 /* imaginary bits not in a DIMM */
1805 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1817 return err_sym >> 3;
1823 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1825 struct amd64_pvt *pvt = mci->pvt_info;
1828 if (pvt->syn_type == 8)
1829 err_sym = decode_syndrome(syndrome, x8_vectors,
1830 ARRAY_SIZE(x8_vectors),
1832 else if (pvt->syn_type == 4)
1833 err_sym = decode_syndrome(syndrome, x4_vectors,
1834 ARRAY_SIZE(x4_vectors),
1837 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
1841 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1845 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1846 * ADDRESS and process.
1848 static void amd64_handle_ce(struct mem_ctl_info *mci,
1849 struct err_regs *info)
1851 struct amd64_pvt *pvt = mci->pvt_info;
1854 /* Ensure that the Error Address is VALID */
1855 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1856 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1857 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1861 sys_addr = pvt->ops->get_error_address(mci, info);
1863 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1865 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
1868 /* Handle any Un-correctable Errors (UEs) */
1869 static void amd64_handle_ue(struct mem_ctl_info *mci,
1870 struct err_regs *info)
1872 struct amd64_pvt *pvt = mci->pvt_info;
1873 struct mem_ctl_info *log_mci, *src_mci = NULL;
1880 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1881 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1882 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1886 sys_addr = pvt->ops->get_error_address(mci, info);
1889 * Find out which node the error address belongs to. This may be
1890 * different from the node that detected the error.
1892 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1894 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1895 (unsigned long)sys_addr);
1896 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1902 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1904 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1905 (unsigned long)sys_addr);
1906 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1908 error_address_to_page_and_offset(sys_addr, &page, &offset);
1909 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1913 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1914 struct err_regs *info)
1916 u16 ec = EC(info->nbsl);
1917 u8 xec = XEC(info->nbsl, 0x1f);
1918 int ecc_type = (info->nbsh >> 13) & 0x3;
1920 /* Bail early out if this was an 'observed' error */
1921 if (PP(ec) == K8_NBSL_PP_OBS)
1924 /* Do only ECC errors */
1925 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1929 amd64_handle_ce(mci, info);
1930 else if (ecc_type == 1)
1931 amd64_handle_ue(mci, info);
1934 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1936 struct mem_ctl_info *mci = mcis[node_id];
1937 struct err_regs regs;
1939 regs.nbsl = (u32) m->status;
1940 regs.nbsh = (u32)(m->status >> 32);
1941 regs.nbeal = (u32) m->addr;
1942 regs.nbeah = (u32)(m->addr >> 32);
1945 __amd64_decode_bus_error(mci, ®s);
1948 * Check the UE bit of the NB status high register, if set generate some
1949 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1950 * If it was a GART error, skip that process.
1952 * FIXME: this should go somewhere else, if at all.
1954 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
1955 edac_mc_handle_ue_no_info(mci, "UE bit is set");
1960 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1961 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1963 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
1965 /* Reserve the ADDRESS MAP Device */
1966 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
1968 amd64_err("error address map device not found: "
1969 "vendor %x device 0x%x (broken BIOS?)\n",
1970 PCI_VENDOR_ID_AMD, f1_id);
1974 /* Reserve the MISC Device */
1975 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
1977 pci_dev_put(pvt->F1);
1980 amd64_err("error F3 device not found: "
1981 "vendor %x device 0x%x (broken BIOS?)\n",
1982 PCI_VENDOR_ID_AMD, f3_id);
1986 debugf1("F1: %s\n", pci_name(pvt->F1));
1987 debugf1("F2: %s\n", pci_name(pvt->F2));
1988 debugf1("F3: %s\n", pci_name(pvt->F3));
1993 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
1995 pci_dev_put(pvt->F1);
1996 pci_dev_put(pvt->F3);
2000 * Retrieve the hardware registers of the memory controller (this includes the
2001 * 'Address Map' and 'Misc' device regs)
2003 static void read_mc_regs(struct amd64_pvt *pvt)
2010 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2011 * those are Read-As-Zero
2013 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2014 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2016 /* check first whether TOP_MEM2 is enabled */
2017 rdmsrl(MSR_K8_SYSCFG, msr_val);
2018 if (msr_val & (1U << 21)) {
2019 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2020 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2022 debugf0(" TOP_MEM2 disabled.\n");
2024 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
2026 if (pvt->ops->read_dram_ctl_register)
2027 pvt->ops->read_dram_ctl_register(pvt);
2029 for (range = 0; range < DRAM_RANGES; range++) {
2032 /* read settings for this DRAM range */
2033 read_dram_base_limit_regs(pvt, range);
2035 rw = dram_rw(pvt, range);
2039 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2041 get_dram_base(pvt, range),
2042 get_dram_limit(pvt, range));
2044 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2045 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2046 (rw & 0x1) ? "R" : "-",
2047 (rw & 0x2) ? "W" : "-",
2048 dram_intlv_sel(pvt, range),
2049 dram_dst_node(pvt, range));
2052 read_dct_base_mask(pvt);
2054 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
2055 amd64_read_dbam_reg(pvt);
2057 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2059 amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
2060 amd64_read_dct_pci_cfg(pvt, F10_DCHR_0, &pvt->dchr0);
2062 if (!dct_ganging_enabled(pvt)) {
2063 amd64_read_dct_pci_cfg(pvt, F10_DCLR_1, &pvt->dclr1);
2064 amd64_read_dct_pci_cfg(pvt, F10_DCHR_1, &pvt->dchr1);
2067 if (boot_cpu_data.x86 >= 0x10)
2068 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2070 if (boot_cpu_data.x86 == 0x10 &&
2071 boot_cpu_data.x86_model > 7 &&
2072 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2078 dump_misc_regs(pvt);
2082 * NOTE: CPU Revision Dependent code
2085 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2086 * k8 private pointer to -->
2087 * DRAM Bank Address mapping register
2089 * DCL register where dual_channel_active is
2091 * The DBAM register consists of 4 sets of 4 bits each definitions:
2094 * 0-3 CSROWs 0 and 1
2095 * 4-7 CSROWs 2 and 3
2096 * 8-11 CSROWs 4 and 5
2097 * 12-15 CSROWs 6 and 7
2099 * Values range from: 0 to 15
2100 * The meaning of the values depends on CPU revision and dual-channel state,
2101 * see relevant BKDG more info.
2103 * The memory controller provides for total of only 8 CSROWs in its current
2104 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2105 * single channel or two (2) DIMMs in dual channel mode.
2107 * The following code logic collapses the various tables for CSROW based on CPU
2111 * The number of PAGE_SIZE pages on the specified CSROW number it
2115 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2117 u32 cs_mode, nr_pages;
2120 * The math on this doesn't look right on the surface because x/2*4 can
2121 * be simplified to x*2 but this expression makes use of the fact that
2122 * it is integral math where 1/2=0. This intermediate value becomes the
2123 * number of bits to shift the DBAM register to extract the proper CSROW
2126 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2128 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2131 * If dual channel then double the memory size of single channel.
2132 * Channel count is 1 or 2
2134 nr_pages <<= (pvt->channel_count - 1);
2136 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2137 debugf0(" nr_pages= %u channel-count = %d\n",
2138 nr_pages, pvt->channel_count);
2144 * Initialize the array of csrow attribute instances, based on the values
2145 * from pci config hardware registers.
2147 static int init_csrows(struct mem_ctl_info *mci)
2149 struct csrow_info *csrow;
2150 struct amd64_pvt *pvt = mci->pvt_info;
2151 u64 input_addr_min, input_addr_max, sys_addr;
2155 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
2158 pvt->ctl_error_info.nbcfg = val;
2160 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2161 pvt->mc_node_id, val,
2162 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
2164 for (i = 0; i < pvt->cs_count; i++) {
2165 csrow = &mci->csrows[i];
2167 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2168 debugf1("----CSROW %d EMPTY for node %d\n", i,
2173 debugf1("----CSROW %d VALID for MC node %d\n",
2174 i, pvt->mc_node_id);
2177 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2178 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2179 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2180 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2181 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2182 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2183 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2184 /* 8 bytes of resolution */
2186 csrow->mtype = amd64_determine_memory_type(pvt, i);
2188 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2189 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2190 (unsigned long)input_addr_min,
2191 (unsigned long)input_addr_max);
2192 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2193 (unsigned long)sys_addr, csrow->page_mask);
2194 debugf1(" nr_pages: %u first_page: 0x%lx "
2195 "last_page: 0x%lx\n",
2196 (unsigned)csrow->nr_pages,
2197 csrow->first_page, csrow->last_page);
2200 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2202 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2204 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2205 EDAC_S4ECD4ED : EDAC_SECDED;
2207 csrow->edac_mode = EDAC_NONE;
2213 /* get all cores on this DCT */
2214 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2218 for_each_online_cpu(cpu)
2219 if (amd_get_nb_id(cpu) == nid)
2220 cpumask_set_cpu(cpu, mask);
2223 /* check MCG_CTL on all the cpus on this node */
2224 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2230 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2231 amd64_warn("%s: Error allocating mask\n", __func__);
2235 get_cpus_on_this_dct_cpumask(mask, nid);
2237 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2239 for_each_cpu(cpu, mask) {
2240 struct msr *reg = per_cpu_ptr(msrs, cpu);
2241 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2243 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2245 (nbe ? "enabled" : "disabled"));
2253 free_cpumask_var(mask);
2257 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2259 cpumask_var_t cmask;
2262 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2263 amd64_warn("%s: error allocating mask\n", __func__);
2267 get_cpus_on_this_dct_cpumask(cmask, nid);
2269 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2271 for_each_cpu(cpu, cmask) {
2273 struct msr *reg = per_cpu_ptr(msrs, cpu);
2276 if (reg->l & K8_MSR_MCGCTL_NBE)
2277 s->flags.nb_mce_enable = 1;
2279 reg->l |= K8_MSR_MCGCTL_NBE;
2282 * Turn off NB MCE reporting only when it was off before
2284 if (!s->flags.nb_mce_enable)
2285 reg->l &= ~K8_MSR_MCGCTL_NBE;
2288 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2290 free_cpumask_var(cmask);
2295 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2299 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2301 if (toggle_ecc_err_reporting(s, nid, ON)) {
2302 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2306 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2308 /* turn on UECCEn and CECCEn bits */
2309 s->old_nbctl = value & mask;
2310 s->nbctl_valid = true;
2313 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2315 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2317 debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2319 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2321 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2322 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2324 s->flags.nb_ecc_prev = 0;
2326 /* Attempt to turn on DRAM ECC Enable */
2327 value |= K8_NBCFG_ECC_ENABLE;
2328 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2330 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2332 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2333 amd64_warn("Hardware rejected DRAM ECC enable,"
2334 "check memory DIMM configuration.\n");
2337 amd64_info("Hardware accepted DRAM ECC Enable\n");
2340 s->flags.nb_ecc_prev = 1;
2343 debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2345 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2350 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2353 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2355 if (!s->nbctl_valid)
2358 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2360 value |= s->old_nbctl;
2362 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2364 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2365 if (!s->flags.nb_ecc_prev) {
2366 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2367 value &= ~K8_NBCFG_ECC_ENABLE;
2368 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2371 /* restore the NB Enable MCGCTL bit */
2372 if (toggle_ecc_err_reporting(s, nid, OFF))
2373 amd64_warn("Error restoring NB MCGCTL settings!\n");
2377 * EDAC requires that the BIOS have ECC enabled before
2378 * taking over the processing of ECC errors. A command line
2379 * option allows to force-enable hardware ECC later in
2380 * enable_ecc_error_reporting().
2382 static const char *ecc_msg =
2383 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2384 " Either enable ECC checking or force module loading by setting "
2385 "'ecc_enable_override'.\n"
2386 " (Note that use of the override may cause unknown side effects.)\n";
2388 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2392 bool nb_mce_en = false;
2394 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2396 ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
2397 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2399 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2401 amd64_notice("NB MCE bank disabled, set MSR "
2402 "0x%08x[4] on node %d to enable.\n",
2403 MSR_IA32_MCG_CTL, nid);
2405 if (!ecc_en || !nb_mce_en) {
2406 amd64_notice("%s", ecc_msg);
2412 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2413 ARRAY_SIZE(amd64_inj_attrs) +
2416 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2418 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2420 unsigned int i = 0, j = 0;
2422 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2423 sysfs_attrs[i] = amd64_dbg_attrs[i];
2425 if (boot_cpu_data.x86 >= 0x10)
2426 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2427 sysfs_attrs[i] = amd64_inj_attrs[j];
2429 sysfs_attrs[i] = terminator;
2431 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2434 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
2436 struct amd64_pvt *pvt = mci->pvt_info;
2438 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2439 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2441 if (pvt->nbcap & K8_NBCAP_SECDED)
2442 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2444 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2445 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2447 mci->edac_cap = amd64_determine_edac_cap(pvt);
2448 mci->mod_name = EDAC_MOD_STR;
2449 mci->mod_ver = EDAC_AMD64_VERSION;
2450 mci->ctl_name = pvt->ctl_name;
2451 mci->dev_name = pci_name(pvt->F2);
2452 mci->ctl_page_to_phys = NULL;
2454 /* memory scrubber interface */
2455 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2456 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2460 * returns a pointer to the family descriptor on success, NULL otherwise.
2462 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2464 u8 fam = boot_cpu_data.x86;
2465 struct amd64_family_type *fam_type = NULL;
2469 fam_type = &amd64_family_types[K8_CPUS];
2470 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2471 pvt->ctl_name = fam_type->ctl_name;
2472 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2475 fam_type = &amd64_family_types[F10_CPUS];
2476 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2477 pvt->ctl_name = fam_type->ctl_name;
2478 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2482 amd64_err("Unsupported family!\n");
2486 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2488 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
2490 (pvt->ext_model >= K8_REV_F ? "revF or later "
2491 : "revE or earlier ")
2492 : ""), pvt->mc_node_id);
2496 static int amd64_init_one_instance(struct pci_dev *F2)
2498 struct amd64_pvt *pvt = NULL;
2499 struct amd64_family_type *fam_type = NULL;
2500 struct mem_ctl_info *mci = NULL;
2502 u8 nid = get_node_id(F2);
2505 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2509 pvt->mc_node_id = nid;
2513 fam_type = amd64_per_family_init(pvt);
2518 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2525 * We need to determine how many memory channels there are. Then use
2526 * that information for calculating the size of the dynamic instance
2527 * tables in the 'mci' structure.
2530 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2531 if (pvt->channel_count < 0)
2535 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
2539 mci->pvt_info = pvt;
2540 mci->dev = &pvt->F2->dev;
2542 setup_mci_misc_attrs(mci);
2544 if (init_csrows(mci))
2545 mci->edac_cap = EDAC_FLAG_NONE;
2547 set_mc_sysfs_attrs(mci);
2550 if (edac_mc_add_mc(mci)) {
2551 debugf1("failed edac_mc_add_mc()\n");
2555 /* register stuff with EDAC MCE */
2556 if (report_gart_errors)
2557 amd_report_gart_errors(true);
2559 amd_register_ecc_decoder(amd64_decode_bus_error);
2563 atomic_inc(&drv_instances);
2571 free_mc_sibling_devs(pvt);
2580 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2581 const struct pci_device_id *mc_type)
2583 u8 nid = get_node_id(pdev);
2584 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2585 struct ecc_settings *s;
2588 ret = pci_enable_device(pdev);
2590 debugf0("ret=%d\n", ret);
2595 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2601 if (!ecc_enabled(F3, nid)) {
2604 if (!ecc_enable_override)
2607 amd64_warn("Forcing ECC on!\n");
2609 if (!enable_ecc_error_reporting(s, nid, F3))
2613 ret = amd64_init_one_instance(pdev);
2615 amd64_err("Error probing instance: %d\n", nid);
2616 restore_ecc_error_reporting(s, nid, F3);
2623 ecc_stngs[nid] = NULL;
2629 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2631 struct mem_ctl_info *mci;
2632 struct amd64_pvt *pvt;
2633 u8 nid = get_node_id(pdev);
2634 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2635 struct ecc_settings *s = ecc_stngs[nid];
2637 /* Remove from EDAC CORE tracking list */
2638 mci = edac_mc_del_mc(&pdev->dev);
2642 pvt = mci->pvt_info;
2644 restore_ecc_error_reporting(s, nid, F3);
2646 free_mc_sibling_devs(pvt);
2648 /* unregister from EDAC MCE */
2649 amd_report_gart_errors(false);
2650 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2652 kfree(ecc_stngs[nid]);
2653 ecc_stngs[nid] = NULL;
2655 /* Free the EDAC CORE resources */
2656 mci->pvt_info = NULL;
2664 * This table is part of the interface for loading drivers for PCI devices. The
2665 * PCI core identifies what devices are on a system during boot, and then
2666 * inquiry this table to see if this driver is for a given device found.
2668 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2670 .vendor = PCI_VENDOR_ID_AMD,
2671 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2672 .subvendor = PCI_ANY_ID,
2673 .subdevice = PCI_ANY_ID,
2678 .vendor = PCI_VENDOR_ID_AMD,
2679 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2680 .subvendor = PCI_ANY_ID,
2681 .subdevice = PCI_ANY_ID,
2687 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2689 static struct pci_driver amd64_pci_driver = {
2690 .name = EDAC_MOD_STR,
2691 .probe = amd64_probe_one_instance,
2692 .remove = __devexit_p(amd64_remove_one_instance),
2693 .id_table = amd64_pci_table,
2696 static void setup_pci_device(void)
2698 struct mem_ctl_info *mci;
2699 struct amd64_pvt *pvt;
2707 pvt = mci->pvt_info;
2709 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2711 if (!amd64_ctl_pci) {
2712 pr_warning("%s(): Unable to create PCI control\n",
2715 pr_warning("%s(): PCI error report via EDAC not set\n",
2721 static int __init amd64_edac_init(void)
2725 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2729 if (amd_cache_northbridges() < 0)
2733 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2734 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2735 if (!(mcis && ecc_stngs))
2738 msrs = msrs_alloc();
2742 err = pci_register_driver(&amd64_pci_driver);
2747 if (!atomic_read(&drv_instances))
2748 goto err_no_instances;
2754 pci_unregister_driver(&amd64_pci_driver);
2771 static void __exit amd64_edac_exit(void)
2774 edac_pci_release_generic_ctl(amd64_ctl_pci);
2776 pci_unregister_driver(&amd64_pci_driver);
2788 module_init(amd64_edac_init);
2789 module_exit(amd64_edac_exit);
2791 MODULE_LICENSE("GPL");
2792 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2793 "Dave Peterson, Thayne Harbaugh");
2794 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2795 EDAC_AMD64_VERSION);
2797 module_param(edac_op_state, int, 0444);
2798 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");