1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
31 static int ddr2_dbam_revCG[] = {
41 static int ddr2_dbam_revD[] = {
53 static int ddr2_dbam[] = { [0] = 128,
62 static int ddr3_dbam[] = { [0] = -1,
73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
77 *FIXME: Produce a better mapping/linearisation.
82 u32 scrubval; /* bit pattern for scrub rate */
83 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
85 { 0x01, 1600000000UL},
107 { 0x00, 0UL}, /* scrubbing off */
110 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
111 u32 *val, const char *func)
115 err = pci_read_config_dword(pdev, offset, val);
117 amd64_warn("%s: error reading F%dx%03x.\n",
118 func, PCI_FUNC(pdev->devfn), offset);
123 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
124 u32 val, const char *func)
128 err = pci_write_config_dword(pdev, offset, val);
130 amd64_warn("%s: error writing to F%dx%03x.\n",
131 func, PCI_FUNC(pdev->devfn), offset);
138 * Depending on the family, F2 DCT reads need special handling:
140 * K8: has a single DCT only
142 * F10h: each DCT has its own set of regs
146 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
149 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
155 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
158 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
161 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
164 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
170 if (addr >= 0x140 && addr <= 0x1a0) {
175 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
178 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
180 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
184 * Memory scrubber control interface. For K8, memory scrubbing is handled by
185 * hardware and can involve L2 cache, dcache as well as the main memory. With
186 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
189 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
190 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
191 * bytes/sec for the setting.
193 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
194 * other archs, we might not have access to the caches directly.
198 * scan the scrub rate mapping table for a close or matching bandwidth value to
199 * issue. If requested is too big, then use last maximum value found.
201 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
207 * map the configured rate (new_bw) to a value specific to the AMD64
208 * memory controller and apply to register. Search for the first
209 * bandwidth entry that is greater or equal than the setting requested
210 * and program that. If at last entry, turn off DRAM scrubbing.
212 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
214 * skip scrub rates which aren't recommended
215 * (see F10 BKDG, F3x58)
217 if (scrubrates[i].scrubval < min_rate)
220 if (scrubrates[i].bandwidth <= new_bw)
224 * if no suitable bandwidth found, turn off DRAM scrubbing
225 * entirely by falling back to the last element in the
230 scrubval = scrubrates[i].scrubval;
232 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
235 return scrubrates[i].bandwidth;
240 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
242 struct amd64_pvt *pvt = mci->pvt_info;
244 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
247 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
249 struct amd64_pvt *pvt = mci->pvt_info;
251 int i, retval = -EINVAL;
253 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
255 scrubval = scrubval & 0x001F;
257 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
259 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
260 if (scrubrates[i].scrubval == scrubval) {
261 retval = scrubrates[i].bandwidth;
269 * returns true if the SysAddr given by sys_addr matches the
270 * DRAM base/limit associated with node_id
272 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, int nid)
276 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
277 * all ones if the most significant implemented address bit is 1.
278 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
279 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
280 * Application Programming.
282 addr = sys_addr & 0x000000ffffffffffull;
284 return ((addr >= get_dram_base(pvt, nid)) &&
285 (addr <= get_dram_limit(pvt, nid)));
289 * Attempt to map a SysAddr to a node. On success, return a pointer to the
290 * mem_ctl_info structure for the node that the SysAddr maps to.
292 * On failure, return NULL.
294 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
297 struct amd64_pvt *pvt;
302 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
303 * 3.4.4.2) registers to map the SysAddr to a node ID.
308 * The value of this field should be the same for all DRAM Base
309 * registers. Therefore we arbitrarily choose to read it from the
310 * register for node 0.
312 intlv_en = dram_intlv_en(pvt, 0);
315 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
316 if (amd64_base_limit_match(pvt, sys_addr, node_id))
322 if (unlikely((intlv_en != 0x01) &&
323 (intlv_en != 0x03) &&
324 (intlv_en != 0x07))) {
325 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
329 bits = (((u32) sys_addr) >> 12) & intlv_en;
331 for (node_id = 0; ; ) {
332 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
333 break; /* intlv_sel field matches */
335 if (++node_id >= DRAM_RANGES)
339 /* sanity test for sys_addr */
340 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
341 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
342 "range for node %d with node interleaving enabled.\n",
343 __func__, sys_addr, node_id);
348 return edac_mc_find(node_id);
351 debugf2("sys_addr 0x%lx doesn't match any node\n",
352 (unsigned long)sys_addr);
358 * compute the CS base address of the @csrow on the DRAM controller @dct.
359 * For details see F2x[5C:40] in the processor's BKDG
361 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
362 u64 *base, u64 *mask)
364 u64 csbase, csmask, base_bits, mask_bits;
367 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
368 csbase = pvt->csels[dct].csbases[csrow];
369 csmask = pvt->csels[dct].csmasks[csrow];
370 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
371 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
374 csbase = pvt->csels[dct].csbases[csrow];
375 csmask = pvt->csels[dct].csmasks[csrow >> 1];
378 if (boot_cpu_data.x86 == 0x15)
379 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
381 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
384 *base = (csbase & base_bits) << addr_shift;
387 /* poke holes for the csmask */
388 *mask &= ~(mask_bits << addr_shift);
390 *mask |= (csmask & mask_bits) << addr_shift;
393 #define for_each_chip_select(i, dct, pvt) \
394 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
396 #define for_each_chip_select_mask(i, dct, pvt) \
397 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
400 * @input_addr is an InputAddr associated with the node given by mci. Return the
401 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
403 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
405 struct amd64_pvt *pvt;
411 for_each_chip_select(csrow, 0, pvt) {
412 if (!csrow_enabled(csrow, 0, pvt))
415 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
419 if ((input_addr & mask) == (base & mask)) {
420 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
421 (unsigned long)input_addr, csrow,
427 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
428 (unsigned long)input_addr, pvt->mc_node_id);
434 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
435 * for the node represented by mci. Info is passed back in *hole_base,
436 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
437 * info is invalid. Info may be invalid for either of the following reasons:
439 * - The revision of the node is not E or greater. In this case, the DRAM Hole
440 * Address Register does not exist.
442 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
443 * indicating that its contents are not valid.
445 * The values passed back in *hole_base, *hole_offset, and *hole_size are
446 * complete 32-bit values despite the fact that the bitfields in the DHAR
447 * only represent bits 31-24 of the base and offset values.
449 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
450 u64 *hole_offset, u64 *hole_size)
452 struct amd64_pvt *pvt = mci->pvt_info;
455 /* only revE and later have the DRAM Hole Address Register */
456 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
457 debugf1(" revision %d for node %d does not support DHAR\n",
458 pvt->ext_model, pvt->mc_node_id);
462 /* valid for Fam10h and above */
463 if (boot_cpu_data.x86 >= 0x10 &&
464 (pvt->dhar & DRAM_MEM_HOIST_VALID) == 0) {
465 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
469 if ((pvt->dhar & DHAR_VALID) == 0) {
470 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
475 /* This node has Memory Hoisting */
477 /* +------------------+--------------------+--------------------+-----
478 * | memory | DRAM hole | relocated |
479 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
481 * | | | [0x100000000, |
482 * | | | (0x100000000+ |
483 * | | | (0xffffffff-x))] |
484 * +------------------+--------------------+--------------------+-----
486 * Above is a diagram of physical memory showing the DRAM hole and the
487 * relocated addresses from the DRAM hole. As shown, the DRAM hole
488 * starts at address x (the base address) and extends through address
489 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
490 * addresses in the hole so that they start at 0x100000000.
493 base = dhar_base(pvt);
496 *hole_size = (0x1ull << 32) - base;
498 if (boot_cpu_data.x86 > 0xf)
499 *hole_offset = f10_dhar_offset(pvt);
501 *hole_offset = k8_dhar_offset(pvt);
503 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
504 pvt->mc_node_id, (unsigned long)*hole_base,
505 (unsigned long)*hole_offset, (unsigned long)*hole_size);
509 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
512 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
513 * assumed that sys_addr maps to the node given by mci.
515 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
516 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
517 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
518 * then it is also involved in translating a SysAddr to a DramAddr. Sections
519 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
520 * These parts of the documentation are unclear. I interpret them as follows:
522 * When node n receives a SysAddr, it processes the SysAddr as follows:
524 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
525 * Limit registers for node n. If the SysAddr is not within the range
526 * specified by the base and limit values, then node n ignores the Sysaddr
527 * (since it does not map to node n). Otherwise continue to step 2 below.
529 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
530 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
531 * the range of relocated addresses (starting at 0x100000000) from the DRAM
532 * hole. If not, skip to step 3 below. Else get the value of the
533 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
534 * offset defined by this value from the SysAddr.
536 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
537 * Base register for node n. To obtain the DramAddr, subtract the base
538 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
540 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
542 struct amd64_pvt *pvt = mci->pvt_info;
543 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
546 dram_base = get_dram_base(pvt, pvt->mc_node_id);
548 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
551 if ((sys_addr >= (1ull << 32)) &&
552 (sys_addr < ((1ull << 32) + hole_size))) {
553 /* use DHAR to translate SysAddr to DramAddr */
554 dram_addr = sys_addr - hole_offset;
556 debugf2("using DHAR to translate SysAddr 0x%lx to "
558 (unsigned long)sys_addr,
559 (unsigned long)dram_addr);
566 * Translate the SysAddr to a DramAddr as shown near the start of
567 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
568 * only deals with 40-bit values. Therefore we discard bits 63-40 of
569 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
570 * discard are all 1s. Otherwise the bits we discard are all 0s. See
571 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
572 * Programmer's Manual Volume 1 Application Programming.
574 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
576 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
577 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
578 (unsigned long)dram_addr);
583 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
584 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
585 * for node interleaving.
587 static int num_node_interleave_bits(unsigned intlv_en)
589 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
592 BUG_ON(intlv_en > 7);
593 n = intlv_shift_table[intlv_en];
597 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
598 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
600 struct amd64_pvt *pvt;
607 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
608 * concerning translating a DramAddr to an InputAddr.
610 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
611 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
614 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
615 intlv_shift, (unsigned long)dram_addr,
616 (unsigned long)input_addr);
622 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
623 * assumed that @sys_addr maps to the node given by mci.
625 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
630 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
632 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
633 (unsigned long)sys_addr, (unsigned long)input_addr);
640 * @input_addr is an InputAddr associated with the node represented by mci.
641 * Translate @input_addr to a DramAddr and return the result.
643 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
645 struct amd64_pvt *pvt;
646 int node_id, intlv_shift;
651 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
652 * shows how to translate a DramAddr to an InputAddr. Here we reverse
653 * this procedure. When translating from a DramAddr to an InputAddr, the
654 * bits used for node interleaving are discarded. Here we recover these
655 * bits from the IntlvSel field of the DRAM Limit register (section
656 * 3.4.4.2) for the node that input_addr is associated with.
659 node_id = pvt->mc_node_id;
660 BUG_ON((node_id < 0) || (node_id > 7));
662 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
664 if (intlv_shift == 0) {
665 debugf1(" InputAddr 0x%lx translates to DramAddr of "
666 "same value\n", (unsigned long)input_addr);
671 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
672 (input_addr & 0xfff);
674 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
675 dram_addr = bits + (intlv_sel << 12);
677 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
678 "(%d node interleave bits)\n", (unsigned long)input_addr,
679 (unsigned long)dram_addr, intlv_shift);
685 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
686 * @dram_addr to a SysAddr.
688 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
690 struct amd64_pvt *pvt = mci->pvt_info;
691 u64 hole_base, hole_offset, hole_size, base, sys_addr;
694 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
697 if ((dram_addr >= hole_base) &&
698 (dram_addr < (hole_base + hole_size))) {
699 sys_addr = dram_addr + hole_offset;
701 debugf1("using DHAR to translate DramAddr 0x%lx to "
702 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
703 (unsigned long)sys_addr);
709 base = get_dram_base(pvt, pvt->mc_node_id);
710 sys_addr = dram_addr + base;
713 * The sys_addr we have computed up to this point is a 40-bit value
714 * because the k8 deals with 40-bit values. However, the value we are
715 * supposed to return is a full 64-bit physical address. The AMD
716 * x86-64 architecture specifies that the most significant implemented
717 * address bit through bit 63 of a physical address must be either all
718 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
719 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
720 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
723 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
725 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
726 pvt->mc_node_id, (unsigned long)dram_addr,
727 (unsigned long)sys_addr);
733 * @input_addr is an InputAddr associated with the node given by mci. Translate
734 * @input_addr to a SysAddr.
736 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
739 return dram_addr_to_sys_addr(mci,
740 input_addr_to_dram_addr(mci, input_addr));
744 * Find the minimum and maximum InputAddr values that map to the given @csrow.
745 * Pass back these values in *input_addr_min and *input_addr_max.
747 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
748 u64 *input_addr_min, u64 *input_addr_max)
750 struct amd64_pvt *pvt;
754 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
756 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
758 *input_addr_min = base & ~mask;
759 *input_addr_max = base | mask;
762 /* Map the Error address to a PAGE and PAGE OFFSET. */
763 static inline void error_address_to_page_and_offset(u64 error_address,
764 u32 *page, u32 *offset)
766 *page = (u32) (error_address >> PAGE_SHIFT);
767 *offset = ((u32) error_address) & ~PAGE_MASK;
771 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
772 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
773 * of a node that detected an ECC memory error. mci represents the node that
774 * the error address maps to (possibly different from the node that detected
775 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
778 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
782 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
785 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
786 "address 0x%lx\n", (unsigned long)sys_addr);
790 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
792 static u16 extract_syndrome(struct err_regs *err)
794 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
798 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
801 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
804 enum dev_type edac_cap = EDAC_FLAG_NONE;
806 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
810 if (pvt->dclr0 & BIT(bit))
811 edac_cap = EDAC_FLAG_SECDED;
817 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
819 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
821 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
823 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
824 (dclr & BIT(16)) ? "un" : "",
825 (dclr & BIT(19)) ? "yes" : "no");
827 debugf1(" PAR/ERR parity: %s\n",
828 (dclr & BIT(8)) ? "enabled" : "disabled");
830 debugf1(" DCT 128bit mode width: %s\n",
831 (dclr & BIT(11)) ? "128b" : "64b");
833 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
834 (dclr & BIT(12)) ? "yes" : "no",
835 (dclr & BIT(13)) ? "yes" : "no",
836 (dclr & BIT(14)) ? "yes" : "no",
837 (dclr & BIT(15)) ? "yes" : "no");
840 /* Display and decode various NB registers for debug purposes. */
841 static void dump_misc_regs(struct amd64_pvt *pvt)
843 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
845 debugf1(" NB two channel DRAM capable: %s\n",
846 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
848 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
849 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
850 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
852 amd64_dump_dramcfg_low(pvt->dclr0, 0);
854 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
856 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
858 pvt->dhar, dhar_base(pvt),
859 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
860 : f10_dhar_offset(pvt));
862 debugf1(" DramHoleValid: %s\n",
863 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
865 amd64_debug_display_dimm_sizes(0, pvt);
867 /* everything below this point is Fam10h and above */
868 if (boot_cpu_data.x86 == 0xf)
871 amd64_debug_display_dimm_sizes(1, pvt);
873 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
875 /* Only if NOT ganged does dclr1 have valid info */
876 if (!dct_ganging_enabled(pvt))
877 amd64_dump_dramcfg_low(pvt->dclr1, 1);
880 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
882 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
883 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
887 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
889 static void prep_chip_selects(struct amd64_pvt *pvt)
891 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
892 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
893 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
895 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
896 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
901 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
903 static void read_dct_base_mask(struct amd64_pvt *pvt)
907 prep_chip_selects(pvt);
909 for_each_chip_select(cs, 0, pvt) {
910 u32 reg0 = DCSB0 + (cs * 4);
911 u32 reg1 = DCSB1 + (cs * 4);
912 u32 *base0 = &pvt->csels[0].csbases[cs];
913 u32 *base1 = &pvt->csels[1].csbases[cs];
915 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
916 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
919 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
922 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
923 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
927 for_each_chip_select_mask(cs, 0, pvt) {
928 u32 reg0 = DCSM0 + (cs * 4);
929 u32 reg1 = DCSM1 + (cs * 4);
930 u32 *mask0 = &pvt->csels[0].csmasks[cs];
931 u32 *mask1 = &pvt->csels[1].csmasks[cs];
933 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
934 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
937 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
940 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
941 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
946 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
950 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
951 if (pvt->dchr0 & DDR3_MODE)
952 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
954 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
956 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
959 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
965 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
966 * and the later RevF memory controllers (DDR vs DDR2)
969 * number of memory channels in operation
971 * contents of the DCL0_LOW register
973 static int k8_early_channel_count(struct amd64_pvt *pvt)
977 err = amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
981 if (pvt->ext_model >= K8_REV_F)
982 /* RevF (NPT) and later */
983 flag = pvt->dclr0 & F10_WIDTH_128;
985 /* RevE and earlier */
986 flag = pvt->dclr0 & REVE_WIDTH_128;
991 return (flag) ? 2 : 1;
994 /* extract the ERROR ADDRESS for the K8 CPUs */
995 static u64 k8_get_error_address(struct mem_ctl_info *mci,
996 struct err_regs *info)
998 return (((u64) (info->nbeah & 0xff)) << 32) +
999 (info->nbeal & ~0x03);
1002 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1004 u32 off = range << 3;
1006 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1007 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1009 if (boot_cpu_data.x86 == 0xf)
1012 if (!dram_rw(pvt, range))
1015 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1016 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1019 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1020 struct err_regs *err_info, u64 sys_addr)
1022 struct mem_ctl_info *src_mci;
1027 syndrome = extract_syndrome(err_info);
1029 /* CHIPKILL enabled */
1030 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
1031 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1034 * Syndrome didn't map, so we don't know which of the
1035 * 2 DIMMs is in error. So we need to ID 'both' of them
1038 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1039 "error reporting race\n", syndrome);
1040 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1045 * non-chipkill ecc mode
1047 * The k8 documentation is unclear about how to determine the
1048 * channel number when using non-chipkill memory. This method
1049 * was obtained from email communication with someone at AMD.
1050 * (Wish the email was placed in this comment - norsk)
1052 channel = ((sys_addr & BIT(3)) != 0);
1056 * Find out which node the error address belongs to. This may be
1057 * different from the node that detected the error.
1059 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1061 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1062 (unsigned long)sys_addr);
1063 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1067 /* Now map the sys_addr to a CSROW */
1068 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1070 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1072 error_address_to_page_and_offset(sys_addr, &page, &offset);
1074 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1075 channel, EDAC_MOD_STR);
1079 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1083 if (pvt->ext_model >= K8_REV_F)
1084 dbam_map = ddr2_dbam;
1085 else if (pvt->ext_model >= K8_REV_D)
1086 dbam_map = ddr2_dbam_revD;
1088 dbam_map = ddr2_dbam_revCG;
1090 return dbam_map[cs_mode];
1094 * Get the number of DCT channels in use.
1097 * number of Memory Channels in operation
1099 * contents of the DCL0_LOW register
1101 static int f10_early_channel_count(struct amd64_pvt *pvt)
1103 int dbams[] = { DBAM0, DBAM1 };
1104 int i, j, channels = 0;
1107 /* If we are in 128 bit mode, then we are using 2 channels */
1108 if (pvt->dclr0 & F10_WIDTH_128) {
1114 * Need to check if in unganged mode: In such, there are 2 channels,
1115 * but they are not in 128 bit mode and thus the above 'dclr0' status
1118 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1119 * their CSEnable bit on. If so, then SINGLE DIMM case.
1121 debugf0("Data width is not 128 bits - need more decoding\n");
1124 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1125 * is more than just one DIMM present in unganged mode. Need to check
1126 * both controllers since DIMMs can be placed in either one.
1128 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1129 if (amd64_read_dct_pci_cfg(pvt, dbams[i], &dbam))
1132 for (j = 0; j < 4; j++) {
1133 if (DBAM_DIMM(j, dbam) > 0) {
1143 amd64_info("MCT channel count: %d\n", channels);
1152 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1156 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1157 dbam_map = ddr3_dbam;
1159 dbam_map = ddr2_dbam;
1161 return dbam_map[cs_mode];
1164 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1165 struct err_regs *info)
1167 return (((u64) (info->nbeah & 0xffff)) << 32) +
1168 (info->nbeal & ~0x01);
1171 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1174 if (!amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_LOW, &pvt->dct_sel_low)) {
1175 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, High range addrs at: 0x%x\n",
1176 pvt->dct_sel_low, dct_sel_baseaddr(pvt));
1178 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1179 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1180 (dct_dram_enabled(pvt) ? "yes" : "no"));
1182 if (!dct_ganging_enabled(pvt))
1183 debugf0(" Address range split per DCT: %s\n",
1184 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1186 debugf0(" DCT data interleave for ECC: %s, "
1187 "DRAM cleared since last warm reset: %s\n",
1188 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1189 (dct_memory_cleared(pvt) ? "yes" : "no"));
1191 debugf0(" DCT channel interleave: %s, "
1192 "DCT interleave bits selector: 0x%x\n",
1193 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1194 dct_sel_interleave_addr(pvt));
1197 amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_HIGH, &pvt->dct_sel_hi);
1201 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1202 * Interleaving Modes.
1204 static u8 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1205 int hi_range_sel, u32 intlv_en)
1207 u32 temp, dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
1210 if (dct_ganging_enabled(pvt))
1212 else if (hi_range_sel)
1214 else if (dct_interleave_enabled(pvt)) {
1216 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1218 if (dct_sel_interleave_addr(pvt) == 0)
1219 cs = sys_addr >> 6 & 1;
1220 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1221 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1223 if (dct_sel_interleave_addr(pvt) & 1)
1224 cs = (sys_addr >> 9 & 1) ^ temp;
1226 cs = (sys_addr >> 6 & 1) ^ temp;
1227 } else if (intlv_en & 4)
1228 cs = sys_addr >> 15 & 1;
1229 else if (intlv_en & 2)
1230 cs = sys_addr >> 14 & 1;
1231 else if (intlv_en & 1)
1232 cs = sys_addr >> 13 & 1;
1234 cs = sys_addr >> 12 & 1;
1235 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1236 cs = ~dct_sel_high & 1;
1243 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1247 else if (intlv_en == 3)
1249 else if (intlv_en == 7)
1255 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1256 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1257 u32 dct_sel_base_addr,
1258 u64 dct_sel_base_off,
1259 u32 hole_valid, u64 hole_off,
1265 if (!(dct_sel_base_addr & 0xFFFF0000) &&
1266 hole_valid && (sys_addr >= 0x100000000ULL))
1267 chan_off = hole_off;
1269 chan_off = dct_sel_base_off;
1271 if (hole_valid && (sys_addr >= 0x100000000ULL))
1272 chan_off = hole_off;
1274 chan_off = dram_base & 0xFFFFF8000000ULL;
1277 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1278 (chan_off & 0x0000FFFFFF800000ULL);
1281 /* Hack for the time being - Can we get this from BIOS?? */
1282 #define CH0SPARE_RANK 0
1283 #define CH1SPARE_RANK 1
1286 * checks if the csrow passed in is marked as SPARED, if so returns the new
1289 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1294 /* Depending on channel, isolate respective SPARING info */
1296 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1297 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1298 if (swap_done && (csrow == bad_dram_cs))
1299 csrow = CH1SPARE_RANK;
1301 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1302 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1303 if (swap_done && (csrow == bad_dram_cs))
1304 csrow = CH0SPARE_RANK;
1310 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1311 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1314 * -EINVAL: NOT FOUND
1315 * 0..csrow = Chip-Select Row
1317 static int f10_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1319 struct mem_ctl_info *mci;
1320 struct amd64_pvt *pvt;
1321 u64 cs_base, cs_mask;
1322 int cs_found = -EINVAL;
1329 pvt = mci->pvt_info;
1331 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1333 for_each_chip_select(csrow, dct, pvt) {
1334 if (!csrow_enabled(csrow, dct, pvt))
1337 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1339 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1340 csrow, cs_base, cs_mask);
1344 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1345 "(CSBase & ~CSMask)=0x%llx\n",
1346 (in_addr & cs_mask), (cs_base & cs_mask));
1348 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1349 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1351 debugf1(" MATCH csrow=%d\n", cs_found);
1358 /* For a given @dram_range, check if @sys_addr falls within it. */
1359 static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
1360 u64 sys_addr, int *nid, int *chan_sel)
1362 int cs_found = -EINVAL, high_range = 0;
1363 u64 chan_addr, dct_sel_base_off;
1365 u32 hole_valid, tmp, dct_sel_base;
1369 u8 node_id = dram_dst_node(pvt, range);
1370 u32 intlv_en = dram_intlv_en(pvt, range);
1371 u32 intlv_sel = dram_intlv_sel(pvt, range);
1372 u64 dram_base = get_dram_base(pvt, range);
1374 debugf1("(range %d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1375 range, dram_base, sys_addr, get_dram_limit(pvt, range));
1378 * This assumes that one node's DHAR is the same as all the other
1381 hole_off = f10_dhar_offset(pvt);
1382 hole_valid = (pvt->dhar & DHAR_VALID);
1383 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1385 debugf1(" HoleOffset=0x%016llx HoleValid=%d IntlvSel=0x%x\n",
1386 hole_off, hole_valid, intlv_sel);
1389 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1392 dct_sel_base = dct_sel_baseaddr(pvt);
1395 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1396 * select between DCT0 and DCT1.
1398 if (dct_high_range_enabled(pvt) &&
1399 !dct_ganging_enabled(pvt) &&
1400 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1403 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1405 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1406 dct_sel_base_off, hole_valid,
1407 hole_off, dram_base);
1409 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1411 /* remove Node ID (in case of memory interleaving) */
1412 tmp = chan_addr & 0xFC0;
1414 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1416 /* remove channel interleave and hash */
1417 if (dct_interleave_enabled(pvt) &&
1418 !dct_high_range_enabled(pvt) &&
1419 !dct_ganging_enabled(pvt)) {
1420 if (dct_sel_interleave_addr(pvt) != 1)
1421 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1423 tmp = chan_addr & 0xFC0;
1424 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1429 debugf1(" (ChannelAddrLong=0x%llx)\n", chan_addr);
1431 cs_found = f10_lookup_addr_in_dct(chan_addr, node_id, channel);
1433 if (cs_found >= 0) {
1435 *chan_sel = channel;
1440 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1441 int *node, int *chan_sel)
1443 int range, cs_found = -EINVAL;
1445 for (range = 0; range < DRAM_RANGES; range++) {
1447 if (!dram_rw(pvt, range))
1450 if ((get_dram_base(pvt, range) <= sys_addr) &&
1451 (get_dram_limit(pvt, range) >= sys_addr)) {
1453 cs_found = f10_match_to_this_node(pvt, range,
1464 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1465 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1467 * The @sys_addr is usually an error address received from the hardware
1470 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1471 struct err_regs *err_info,
1474 struct amd64_pvt *pvt = mci->pvt_info;
1476 int nid, csrow, chan = 0;
1479 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1482 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1486 error_address_to_page_and_offset(sys_addr, &page, &offset);
1488 syndrome = extract_syndrome(err_info);
1491 * We need the syndromes for channel detection only when we're
1492 * ganged. Otherwise @chan should already contain the channel at
1495 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1496 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1499 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1503 * Channel unknown, report all channels on this CSROW as failed.
1505 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1506 edac_mc_handle_ce(mci, page, offset, syndrome,
1507 csrow, chan, EDAC_MOD_STR);
1511 * debug routine to display the memory sizes of all logical DIMMs and its
1514 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1516 int dimm, size0, size1, factor = 0;
1520 if (boot_cpu_data.x86 == 0xf) {
1521 if (pvt->dclr0 & F10_WIDTH_128)
1524 /* K8 families < revF not supported yet */
1525 if (pvt->ext_model < K8_REV_F)
1531 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1532 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1533 : pvt->csels[0].csbases;
1535 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1537 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1539 /* Dump memory sizes for DIMM and its CSROWs */
1540 for (dimm = 0; dimm < 4; dimm++) {
1543 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1544 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1547 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1548 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1550 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1551 dimm * 2, size0 << factor,
1552 dimm * 2 + 1, size1 << factor);
1556 static struct amd64_family_type amd64_family_types[] = {
1559 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1560 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1562 .early_channel_count = k8_early_channel_count,
1563 .get_error_address = k8_get_error_address,
1564 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1565 .dbam_to_cs = k8_dbam_to_chip_select,
1566 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1571 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1572 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1574 .early_channel_count = f10_early_channel_count,
1575 .get_error_address = f10_get_error_address,
1576 .read_dram_ctl_register = f10_read_dram_ctl_register,
1577 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1578 .dbam_to_cs = f10_dbam_to_chip_select,
1579 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1585 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1590 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1591 unsigned int device,
1592 struct pci_dev *related)
1594 struct pci_dev *dev = NULL;
1596 dev = pci_get_device(vendor, device, dev);
1598 if ((dev->bus->number == related->bus->number) &&
1599 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1601 dev = pci_get_device(vendor, device, dev);
1608 * These are tables of eigenvectors (one per line) which can be used for the
1609 * construction of the syndrome tables. The modified syndrome search algorithm
1610 * uses those to find the symbol in error and thus the DIMM.
1612 * Algorithm courtesy of Ross LaFetra from AMD.
1614 static u16 x4_vectors[] = {
1615 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1616 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1617 0x0001, 0x0002, 0x0004, 0x0008,
1618 0x1013, 0x3032, 0x4044, 0x8088,
1619 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1620 0x4857, 0xc4fe, 0x13cc, 0x3288,
1621 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1622 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1623 0x15c1, 0x2a42, 0x89ac, 0x4758,
1624 0x2b03, 0x1602, 0x4f0c, 0xca08,
1625 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1626 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1627 0x2b87, 0x164e, 0x642c, 0xdc18,
1628 0x40b9, 0x80de, 0x1094, 0x20e8,
1629 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1630 0x11c1, 0x2242, 0x84ac, 0x4c58,
1631 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1632 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1633 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1634 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1635 0x16b3, 0x3d62, 0x4f34, 0x8518,
1636 0x1e2f, 0x391a, 0x5cac, 0xf858,
1637 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1638 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1639 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1640 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1641 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1642 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1643 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1644 0x185d, 0x2ca6, 0x7914, 0x9e28,
1645 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1646 0x4199, 0x82ee, 0x19f4, 0x2e58,
1647 0x4807, 0xc40e, 0x130c, 0x3208,
1648 0x1905, 0x2e0a, 0x5804, 0xac08,
1649 0x213f, 0x132a, 0xadfc, 0x5ba8,
1650 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1653 static u16 x8_vectors[] = {
1654 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1655 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1656 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1657 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1658 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1659 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1660 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1661 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1662 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1663 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1664 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1665 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1666 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1667 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1668 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1669 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1670 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1671 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1672 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1675 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1678 unsigned int i, err_sym;
1680 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1682 int v_idx = err_sym * v_dim;
1683 int v_end = (err_sym + 1) * v_dim;
1685 /* walk over all 16 bits of the syndrome */
1686 for (i = 1; i < (1U << 16); i <<= 1) {
1688 /* if bit is set in that eigenvector... */
1689 if (v_idx < v_end && vectors[v_idx] & i) {
1690 u16 ev_comp = vectors[v_idx++];
1692 /* ... and bit set in the modified syndrome, */
1702 /* can't get to zero, move to next symbol */
1707 debugf0("syndrome(%x) not found\n", syndrome);
1711 static int map_err_sym_to_channel(int err_sym, int sym_size)
1724 return err_sym >> 4;
1730 /* imaginary bits not in a DIMM */
1732 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1744 return err_sym >> 3;
1750 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1752 struct amd64_pvt *pvt = mci->pvt_info;
1755 if (pvt->syn_type == 8)
1756 err_sym = decode_syndrome(syndrome, x8_vectors,
1757 ARRAY_SIZE(x8_vectors),
1759 else if (pvt->syn_type == 4)
1760 err_sym = decode_syndrome(syndrome, x4_vectors,
1761 ARRAY_SIZE(x4_vectors),
1764 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
1768 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1772 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1773 * ADDRESS and process.
1775 static void amd64_handle_ce(struct mem_ctl_info *mci,
1776 struct err_regs *info)
1778 struct amd64_pvt *pvt = mci->pvt_info;
1781 /* Ensure that the Error Address is VALID */
1782 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1783 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1784 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1788 sys_addr = pvt->ops->get_error_address(mci, info);
1790 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1792 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
1795 /* Handle any Un-correctable Errors (UEs) */
1796 static void amd64_handle_ue(struct mem_ctl_info *mci,
1797 struct err_regs *info)
1799 struct amd64_pvt *pvt = mci->pvt_info;
1800 struct mem_ctl_info *log_mci, *src_mci = NULL;
1807 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1808 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1809 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1813 sys_addr = pvt->ops->get_error_address(mci, info);
1816 * Find out which node the error address belongs to. This may be
1817 * different from the node that detected the error.
1819 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1821 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1822 (unsigned long)sys_addr);
1823 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1829 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1831 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1832 (unsigned long)sys_addr);
1833 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1835 error_address_to_page_and_offset(sys_addr, &page, &offset);
1836 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1840 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1841 struct err_regs *info)
1843 u16 ec = EC(info->nbsl);
1844 u8 xec = XEC(info->nbsl, 0x1f);
1845 int ecc_type = (info->nbsh >> 13) & 0x3;
1847 /* Bail early out if this was an 'observed' error */
1848 if (PP(ec) == K8_NBSL_PP_OBS)
1851 /* Do only ECC errors */
1852 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1856 amd64_handle_ce(mci, info);
1857 else if (ecc_type == 1)
1858 amd64_handle_ue(mci, info);
1861 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1863 struct mem_ctl_info *mci = mcis[node_id];
1864 struct err_regs regs;
1866 regs.nbsl = (u32) m->status;
1867 regs.nbsh = (u32)(m->status >> 32);
1868 regs.nbeal = (u32) m->addr;
1869 regs.nbeah = (u32)(m->addr >> 32);
1872 __amd64_decode_bus_error(mci, ®s);
1875 * Check the UE bit of the NB status high register, if set generate some
1876 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1877 * If it was a GART error, skip that process.
1879 * FIXME: this should go somewhere else, if at all.
1881 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
1882 edac_mc_handle_ue_no_info(mci, "UE bit is set");
1887 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1888 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1890 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
1892 /* Reserve the ADDRESS MAP Device */
1893 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
1895 amd64_err("error address map device not found: "
1896 "vendor %x device 0x%x (broken BIOS?)\n",
1897 PCI_VENDOR_ID_AMD, f1_id);
1901 /* Reserve the MISC Device */
1902 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
1904 pci_dev_put(pvt->F1);
1907 amd64_err("error F3 device not found: "
1908 "vendor %x device 0x%x (broken BIOS?)\n",
1909 PCI_VENDOR_ID_AMD, f3_id);
1913 debugf1("F1: %s\n", pci_name(pvt->F1));
1914 debugf1("F2: %s\n", pci_name(pvt->F2));
1915 debugf1("F3: %s\n", pci_name(pvt->F3));
1920 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
1922 pci_dev_put(pvt->F1);
1923 pci_dev_put(pvt->F3);
1927 * Retrieve the hardware registers of the memory controller (this includes the
1928 * 'Address Map' and 'Misc' device regs)
1930 static void read_mc_regs(struct amd64_pvt *pvt)
1937 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
1938 * those are Read-As-Zero
1940 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
1941 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
1943 /* check first whether TOP_MEM2 is enabled */
1944 rdmsrl(MSR_K8_SYSCFG, msr_val);
1945 if (msr_val & (1U << 21)) {
1946 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
1947 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
1949 debugf0(" TOP_MEM2 disabled.\n");
1951 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
1953 if (pvt->ops->read_dram_ctl_register)
1954 pvt->ops->read_dram_ctl_register(pvt);
1956 for (range = 0; range < DRAM_RANGES; range++) {
1959 /* read settings for this DRAM range */
1960 read_dram_base_limit_regs(pvt, range);
1962 rw = dram_rw(pvt, range);
1966 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
1968 get_dram_base(pvt, range),
1969 get_dram_limit(pvt, range));
1971 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
1972 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
1973 (rw & 0x1) ? "R" : "-",
1974 (rw & 0x2) ? "W" : "-",
1975 dram_intlv_sel(pvt, range),
1976 dram_dst_node(pvt, range));
1979 read_dct_base_mask(pvt);
1981 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
1982 amd64_read_dbam_reg(pvt);
1984 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
1986 amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
1987 amd64_read_dct_pci_cfg(pvt, F10_DCHR_0, &pvt->dchr0);
1989 if (!dct_ganging_enabled(pvt)) {
1990 amd64_read_dct_pci_cfg(pvt, F10_DCLR_1, &pvt->dclr1);
1991 amd64_read_dct_pci_cfg(pvt, F10_DCHR_1, &pvt->dchr1);
1994 if (boot_cpu_data.x86 >= 0x10)
1995 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
1997 if (boot_cpu_data.x86 == 0x10 &&
1998 boot_cpu_data.x86_model > 7 &&
1999 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2005 dump_misc_regs(pvt);
2009 * NOTE: CPU Revision Dependent code
2012 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2013 * k8 private pointer to -->
2014 * DRAM Bank Address mapping register
2016 * DCL register where dual_channel_active is
2018 * The DBAM register consists of 4 sets of 4 bits each definitions:
2021 * 0-3 CSROWs 0 and 1
2022 * 4-7 CSROWs 2 and 3
2023 * 8-11 CSROWs 4 and 5
2024 * 12-15 CSROWs 6 and 7
2026 * Values range from: 0 to 15
2027 * The meaning of the values depends on CPU revision and dual-channel state,
2028 * see relevant BKDG more info.
2030 * The memory controller provides for total of only 8 CSROWs in its current
2031 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2032 * single channel or two (2) DIMMs in dual channel mode.
2034 * The following code logic collapses the various tables for CSROW based on CPU
2038 * The number of PAGE_SIZE pages on the specified CSROW number it
2042 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2044 u32 cs_mode, nr_pages;
2047 * The math on this doesn't look right on the surface because x/2*4 can
2048 * be simplified to x*2 but this expression makes use of the fact that
2049 * it is integral math where 1/2=0. This intermediate value becomes the
2050 * number of bits to shift the DBAM register to extract the proper CSROW
2053 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2055 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2058 * If dual channel then double the memory size of single channel.
2059 * Channel count is 1 or 2
2061 nr_pages <<= (pvt->channel_count - 1);
2063 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2064 debugf0(" nr_pages= %u channel-count = %d\n",
2065 nr_pages, pvt->channel_count);
2071 * Initialize the array of csrow attribute instances, based on the values
2072 * from pci config hardware registers.
2074 static int init_csrows(struct mem_ctl_info *mci)
2076 struct csrow_info *csrow;
2077 struct amd64_pvt *pvt = mci->pvt_info;
2078 u64 input_addr_min, input_addr_max, sys_addr, base, mask;
2082 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
2085 pvt->ctl_error_info.nbcfg = val;
2087 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2088 pvt->mc_node_id, val,
2089 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
2091 for_each_chip_select(i, 0, pvt) {
2092 csrow = &mci->csrows[i];
2094 if (!csrow_enabled(i, 0, pvt)) {
2095 debugf1("----CSROW %d EMPTY for node %d\n", i,
2100 debugf1("----CSROW %d VALID for MC node %d\n",
2101 i, pvt->mc_node_id);
2104 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2105 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2106 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2107 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2108 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2109 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2111 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2112 csrow->page_mask = ~mask;
2113 /* 8 bytes of resolution */
2115 csrow->mtype = amd64_determine_memory_type(pvt, i);
2117 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2118 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2119 (unsigned long)input_addr_min,
2120 (unsigned long)input_addr_max);
2121 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2122 (unsigned long)sys_addr, csrow->page_mask);
2123 debugf1(" nr_pages: %u first_page: 0x%lx "
2124 "last_page: 0x%lx\n",
2125 (unsigned)csrow->nr_pages,
2126 csrow->first_page, csrow->last_page);
2129 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2131 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2133 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2134 EDAC_S4ECD4ED : EDAC_SECDED;
2136 csrow->edac_mode = EDAC_NONE;
2142 /* get all cores on this DCT */
2143 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2147 for_each_online_cpu(cpu)
2148 if (amd_get_nb_id(cpu) == nid)
2149 cpumask_set_cpu(cpu, mask);
2152 /* check MCG_CTL on all the cpus on this node */
2153 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2159 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2160 amd64_warn("%s: Error allocating mask\n", __func__);
2164 get_cpus_on_this_dct_cpumask(mask, nid);
2166 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2168 for_each_cpu(cpu, mask) {
2169 struct msr *reg = per_cpu_ptr(msrs, cpu);
2170 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2172 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2174 (nbe ? "enabled" : "disabled"));
2182 free_cpumask_var(mask);
2186 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2188 cpumask_var_t cmask;
2191 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2192 amd64_warn("%s: error allocating mask\n", __func__);
2196 get_cpus_on_this_dct_cpumask(cmask, nid);
2198 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2200 for_each_cpu(cpu, cmask) {
2202 struct msr *reg = per_cpu_ptr(msrs, cpu);
2205 if (reg->l & K8_MSR_MCGCTL_NBE)
2206 s->flags.nb_mce_enable = 1;
2208 reg->l |= K8_MSR_MCGCTL_NBE;
2211 * Turn off NB MCE reporting only when it was off before
2213 if (!s->flags.nb_mce_enable)
2214 reg->l &= ~K8_MSR_MCGCTL_NBE;
2217 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2219 free_cpumask_var(cmask);
2224 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2228 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2230 if (toggle_ecc_err_reporting(s, nid, ON)) {
2231 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2235 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2237 /* turn on UECCEn and CECCEn bits */
2238 s->old_nbctl = value & mask;
2239 s->nbctl_valid = true;
2242 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2244 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2246 debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2248 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2250 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2251 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2253 s->flags.nb_ecc_prev = 0;
2255 /* Attempt to turn on DRAM ECC Enable */
2256 value |= K8_NBCFG_ECC_ENABLE;
2257 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2259 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2261 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2262 amd64_warn("Hardware rejected DRAM ECC enable,"
2263 "check memory DIMM configuration.\n");
2266 amd64_info("Hardware accepted DRAM ECC Enable\n");
2269 s->flags.nb_ecc_prev = 1;
2272 debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2274 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2279 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2282 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2284 if (!s->nbctl_valid)
2287 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2289 value |= s->old_nbctl;
2291 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2293 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2294 if (!s->flags.nb_ecc_prev) {
2295 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2296 value &= ~K8_NBCFG_ECC_ENABLE;
2297 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2300 /* restore the NB Enable MCGCTL bit */
2301 if (toggle_ecc_err_reporting(s, nid, OFF))
2302 amd64_warn("Error restoring NB MCGCTL settings!\n");
2306 * EDAC requires that the BIOS have ECC enabled before
2307 * taking over the processing of ECC errors. A command line
2308 * option allows to force-enable hardware ECC later in
2309 * enable_ecc_error_reporting().
2311 static const char *ecc_msg =
2312 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2313 " Either enable ECC checking or force module loading by setting "
2314 "'ecc_enable_override'.\n"
2315 " (Note that use of the override may cause unknown side effects.)\n";
2317 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2321 bool nb_mce_en = false;
2323 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2325 ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
2326 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2328 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2330 amd64_notice("NB MCE bank disabled, set MSR "
2331 "0x%08x[4] on node %d to enable.\n",
2332 MSR_IA32_MCG_CTL, nid);
2334 if (!ecc_en || !nb_mce_en) {
2335 amd64_notice("%s", ecc_msg);
2341 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2342 ARRAY_SIZE(amd64_inj_attrs) +
2345 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2347 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2349 unsigned int i = 0, j = 0;
2351 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2352 sysfs_attrs[i] = amd64_dbg_attrs[i];
2354 if (boot_cpu_data.x86 >= 0x10)
2355 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2356 sysfs_attrs[i] = amd64_inj_attrs[j];
2358 sysfs_attrs[i] = terminator;
2360 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2363 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
2365 struct amd64_pvt *pvt = mci->pvt_info;
2367 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2368 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2370 if (pvt->nbcap & K8_NBCAP_SECDED)
2371 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2373 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2374 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2376 mci->edac_cap = amd64_determine_edac_cap(pvt);
2377 mci->mod_name = EDAC_MOD_STR;
2378 mci->mod_ver = EDAC_AMD64_VERSION;
2379 mci->ctl_name = pvt->ctl_name;
2380 mci->dev_name = pci_name(pvt->F2);
2381 mci->ctl_page_to_phys = NULL;
2383 /* memory scrubber interface */
2384 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2385 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2389 * returns a pointer to the family descriptor on success, NULL otherwise.
2391 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2393 u8 fam = boot_cpu_data.x86;
2394 struct amd64_family_type *fam_type = NULL;
2398 fam_type = &amd64_family_types[K8_CPUS];
2399 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2400 pvt->ctl_name = fam_type->ctl_name;
2401 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2404 fam_type = &amd64_family_types[F10_CPUS];
2405 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2406 pvt->ctl_name = fam_type->ctl_name;
2407 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2411 amd64_err("Unsupported family!\n");
2415 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2417 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
2419 (pvt->ext_model >= K8_REV_F ? "revF or later "
2420 : "revE or earlier ")
2421 : ""), pvt->mc_node_id);
2425 static int amd64_init_one_instance(struct pci_dev *F2)
2427 struct amd64_pvt *pvt = NULL;
2428 struct amd64_family_type *fam_type = NULL;
2429 struct mem_ctl_info *mci = NULL;
2431 u8 nid = get_node_id(F2);
2434 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2438 pvt->mc_node_id = nid;
2442 fam_type = amd64_per_family_init(pvt);
2447 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2454 * We need to determine how many memory channels there are. Then use
2455 * that information for calculating the size of the dynamic instance
2456 * tables in the 'mci' structure.
2459 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2460 if (pvt->channel_count < 0)
2464 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
2468 mci->pvt_info = pvt;
2469 mci->dev = &pvt->F2->dev;
2471 setup_mci_misc_attrs(mci);
2473 if (init_csrows(mci))
2474 mci->edac_cap = EDAC_FLAG_NONE;
2476 set_mc_sysfs_attrs(mci);
2479 if (edac_mc_add_mc(mci)) {
2480 debugf1("failed edac_mc_add_mc()\n");
2484 /* register stuff with EDAC MCE */
2485 if (report_gart_errors)
2486 amd_report_gart_errors(true);
2488 amd_register_ecc_decoder(amd64_decode_bus_error);
2492 atomic_inc(&drv_instances);
2500 free_mc_sibling_devs(pvt);
2509 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2510 const struct pci_device_id *mc_type)
2512 u8 nid = get_node_id(pdev);
2513 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2514 struct ecc_settings *s;
2517 ret = pci_enable_device(pdev);
2519 debugf0("ret=%d\n", ret);
2524 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2530 if (!ecc_enabled(F3, nid)) {
2533 if (!ecc_enable_override)
2536 amd64_warn("Forcing ECC on!\n");
2538 if (!enable_ecc_error_reporting(s, nid, F3))
2542 ret = amd64_init_one_instance(pdev);
2544 amd64_err("Error probing instance: %d\n", nid);
2545 restore_ecc_error_reporting(s, nid, F3);
2552 ecc_stngs[nid] = NULL;
2558 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2560 struct mem_ctl_info *mci;
2561 struct amd64_pvt *pvt;
2562 u8 nid = get_node_id(pdev);
2563 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2564 struct ecc_settings *s = ecc_stngs[nid];
2566 /* Remove from EDAC CORE tracking list */
2567 mci = edac_mc_del_mc(&pdev->dev);
2571 pvt = mci->pvt_info;
2573 restore_ecc_error_reporting(s, nid, F3);
2575 free_mc_sibling_devs(pvt);
2577 /* unregister from EDAC MCE */
2578 amd_report_gart_errors(false);
2579 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2581 kfree(ecc_stngs[nid]);
2582 ecc_stngs[nid] = NULL;
2584 /* Free the EDAC CORE resources */
2585 mci->pvt_info = NULL;
2593 * This table is part of the interface for loading drivers for PCI devices. The
2594 * PCI core identifies what devices are on a system during boot, and then
2595 * inquiry this table to see if this driver is for a given device found.
2597 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2599 .vendor = PCI_VENDOR_ID_AMD,
2600 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2601 .subvendor = PCI_ANY_ID,
2602 .subdevice = PCI_ANY_ID,
2607 .vendor = PCI_VENDOR_ID_AMD,
2608 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2609 .subvendor = PCI_ANY_ID,
2610 .subdevice = PCI_ANY_ID,
2616 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2618 static struct pci_driver amd64_pci_driver = {
2619 .name = EDAC_MOD_STR,
2620 .probe = amd64_probe_one_instance,
2621 .remove = __devexit_p(amd64_remove_one_instance),
2622 .id_table = amd64_pci_table,
2625 static void setup_pci_device(void)
2627 struct mem_ctl_info *mci;
2628 struct amd64_pvt *pvt;
2636 pvt = mci->pvt_info;
2638 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2640 if (!amd64_ctl_pci) {
2641 pr_warning("%s(): Unable to create PCI control\n",
2644 pr_warning("%s(): PCI error report via EDAC not set\n",
2650 static int __init amd64_edac_init(void)
2654 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2658 if (amd_cache_northbridges() < 0)
2662 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2663 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2664 if (!(mcis && ecc_stngs))
2667 msrs = msrs_alloc();
2671 err = pci_register_driver(&amd64_pci_driver);
2676 if (!atomic_read(&drv_instances))
2677 goto err_no_instances;
2683 pci_unregister_driver(&amd64_pci_driver);
2700 static void __exit amd64_edac_exit(void)
2703 edac_pci_release_generic_ctl(amd64_ctl_pci);
2705 pci_unregister_driver(&amd64_pci_driver);
2717 module_init(amd64_edac_init);
2718 module_exit(amd64_edac_exit);
2720 MODULE_LICENSE("GPL");
2721 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2722 "Dave Peterson, Thayne Harbaugh");
2723 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2724 EDAC_AMD64_VERSION);
2726 module_param(edac_op_state, int, 0444);
2727 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");