1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
31 static int ddr2_dbam_revCG[] = {
41 static int ddr2_dbam_revD[] = {
53 static int ddr2_dbam[] = { [0] = 128,
62 static int ddr3_dbam[] = { [0] = -1,
73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
77 *FIXME: Produce a better mapping/linearisation.
82 u32 scrubval; /* bit pattern for scrub rate */
83 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
85 { 0x01, 1600000000UL},
107 { 0x00, 0UL}, /* scrubbing off */
110 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
111 u32 *val, const char *func)
115 err = pci_read_config_dword(pdev, offset, val);
117 amd64_warn("%s: error reading F%dx%03x.\n",
118 func, PCI_FUNC(pdev->devfn), offset);
123 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
124 u32 val, const char *func)
128 err = pci_write_config_dword(pdev, offset, val);
130 amd64_warn("%s: error writing to F%dx%03x.\n",
131 func, PCI_FUNC(pdev->devfn), offset);
138 * Depending on the family, F2 DCT reads need special handling:
140 * K8: has a single DCT only
142 * F10h: each DCT has its own set of regs
146 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
149 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
155 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
158 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
161 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
164 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
170 if (addr >= 0x140 && addr <= 0x1a0) {
175 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
178 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
180 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
184 * Memory scrubber control interface. For K8, memory scrubbing is handled by
185 * hardware and can involve L2 cache, dcache as well as the main memory. With
186 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
189 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
190 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
191 * bytes/sec for the setting.
193 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
194 * other archs, we might not have access to the caches directly.
198 * scan the scrub rate mapping table for a close or matching bandwidth value to
199 * issue. If requested is too big, then use last maximum value found.
201 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
207 * map the configured rate (new_bw) to a value specific to the AMD64
208 * memory controller and apply to register. Search for the first
209 * bandwidth entry that is greater or equal than the setting requested
210 * and program that. If at last entry, turn off DRAM scrubbing.
212 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
214 * skip scrub rates which aren't recommended
215 * (see F10 BKDG, F3x58)
217 if (scrubrates[i].scrubval < min_rate)
220 if (scrubrates[i].bandwidth <= new_bw)
224 * if no suitable bandwidth found, turn off DRAM scrubbing
225 * entirely by falling back to the last element in the
230 scrubval = scrubrates[i].scrubval;
232 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
235 return scrubrates[i].bandwidth;
240 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
242 struct amd64_pvt *pvt = mci->pvt_info;
244 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
247 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
249 struct amd64_pvt *pvt = mci->pvt_info;
251 int i, retval = -EINVAL;
253 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
255 scrubval = scrubval & 0x001F;
257 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
259 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
260 if (scrubrates[i].scrubval == scrubval) {
261 retval = scrubrates[i].bandwidth;
269 * returns true if the SysAddr given by sys_addr matches the
270 * DRAM base/limit associated with node_id
272 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, int nid)
276 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
277 * all ones if the most significant implemented address bit is 1.
278 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
279 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
280 * Application Programming.
282 addr = sys_addr & 0x000000ffffffffffull;
284 return ((addr >= get_dram_base(pvt, nid)) &&
285 (addr <= get_dram_limit(pvt, nid)));
289 * Attempt to map a SysAddr to a node. On success, return a pointer to the
290 * mem_ctl_info structure for the node that the SysAddr maps to.
292 * On failure, return NULL.
294 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
297 struct amd64_pvt *pvt;
302 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
303 * 3.4.4.2) registers to map the SysAddr to a node ID.
308 * The value of this field should be the same for all DRAM Base
309 * registers. Therefore we arbitrarily choose to read it from the
310 * register for node 0.
312 intlv_en = dram_intlv_en(pvt, 0);
315 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
316 if (amd64_base_limit_match(pvt, sys_addr, node_id))
322 if (unlikely((intlv_en != 0x01) &&
323 (intlv_en != 0x03) &&
324 (intlv_en != 0x07))) {
325 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
329 bits = (((u32) sys_addr) >> 12) & intlv_en;
331 for (node_id = 0; ; ) {
332 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
333 break; /* intlv_sel field matches */
335 if (++node_id >= DRAM_RANGES)
339 /* sanity test for sys_addr */
340 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
341 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
342 "range for node %d with node interleaving enabled.\n",
343 __func__, sys_addr, node_id);
348 return edac_mc_find(node_id);
351 debugf2("sys_addr 0x%lx doesn't match any node\n",
352 (unsigned long)sys_addr);
358 * compute the CS base address of the @csrow on the DRAM controller @dct.
359 * For details see F2x[5C:40] in the processor's BKDG
361 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
362 u64 *base, u64 *mask)
364 u64 csbase, csmask, base_bits, mask_bits;
367 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
368 csbase = pvt->csels[dct].csbases[csrow];
369 csmask = pvt->csels[dct].csmasks[csrow];
370 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
371 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
374 csbase = pvt->csels[dct].csbases[csrow];
375 csmask = pvt->csels[dct].csmasks[csrow >> 1];
378 if (boot_cpu_data.x86 == 0x15)
379 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
381 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
384 *base = (csbase & base_bits) << addr_shift;
387 /* poke holes for the csmask */
388 *mask &= ~(mask_bits << addr_shift);
390 *mask |= (csmask & mask_bits) << addr_shift;
393 #define for_each_chip_select(i, dct, pvt) \
394 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
396 #define chip_select_base(i, dct, pvt) \
397 pvt->csels[dct].csbases[i]
399 #define for_each_chip_select_mask(i, dct, pvt) \
400 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
403 * @input_addr is an InputAddr associated with the node given by mci. Return the
404 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
406 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
408 struct amd64_pvt *pvt;
414 for_each_chip_select(csrow, 0, pvt) {
415 if (!csrow_enabled(csrow, 0, pvt))
418 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
422 if ((input_addr & mask) == (base & mask)) {
423 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
424 (unsigned long)input_addr, csrow,
430 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
431 (unsigned long)input_addr, pvt->mc_node_id);
437 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
438 * for the node represented by mci. Info is passed back in *hole_base,
439 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
440 * info is invalid. Info may be invalid for either of the following reasons:
442 * - The revision of the node is not E or greater. In this case, the DRAM Hole
443 * Address Register does not exist.
445 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
446 * indicating that its contents are not valid.
448 * The values passed back in *hole_base, *hole_offset, and *hole_size are
449 * complete 32-bit values despite the fact that the bitfields in the DHAR
450 * only represent bits 31-24 of the base and offset values.
452 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
453 u64 *hole_offset, u64 *hole_size)
455 struct amd64_pvt *pvt = mci->pvt_info;
458 /* only revE and later have the DRAM Hole Address Register */
459 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
460 debugf1(" revision %d for node %d does not support DHAR\n",
461 pvt->ext_model, pvt->mc_node_id);
465 /* valid for Fam10h and above */
466 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
467 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
471 if (!dhar_valid(pvt)) {
472 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
477 /* This node has Memory Hoisting */
479 /* +------------------+--------------------+--------------------+-----
480 * | memory | DRAM hole | relocated |
481 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
483 * | | | [0x100000000, |
484 * | | | (0x100000000+ |
485 * | | | (0xffffffff-x))] |
486 * +------------------+--------------------+--------------------+-----
488 * Above is a diagram of physical memory showing the DRAM hole and the
489 * relocated addresses from the DRAM hole. As shown, the DRAM hole
490 * starts at address x (the base address) and extends through address
491 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
492 * addresses in the hole so that they start at 0x100000000.
495 base = dhar_base(pvt);
498 *hole_size = (0x1ull << 32) - base;
500 if (boot_cpu_data.x86 > 0xf)
501 *hole_offset = f10_dhar_offset(pvt);
503 *hole_offset = k8_dhar_offset(pvt);
505 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
506 pvt->mc_node_id, (unsigned long)*hole_base,
507 (unsigned long)*hole_offset, (unsigned long)*hole_size);
511 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
514 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
515 * assumed that sys_addr maps to the node given by mci.
517 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
518 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
519 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
520 * then it is also involved in translating a SysAddr to a DramAddr. Sections
521 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
522 * These parts of the documentation are unclear. I interpret them as follows:
524 * When node n receives a SysAddr, it processes the SysAddr as follows:
526 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
527 * Limit registers for node n. If the SysAddr is not within the range
528 * specified by the base and limit values, then node n ignores the Sysaddr
529 * (since it does not map to node n). Otherwise continue to step 2 below.
531 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
532 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
533 * the range of relocated addresses (starting at 0x100000000) from the DRAM
534 * hole. If not, skip to step 3 below. Else get the value of the
535 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
536 * offset defined by this value from the SysAddr.
538 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
539 * Base register for node n. To obtain the DramAddr, subtract the base
540 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
542 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
544 struct amd64_pvt *pvt = mci->pvt_info;
545 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
548 dram_base = get_dram_base(pvt, pvt->mc_node_id);
550 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
553 if ((sys_addr >= (1ull << 32)) &&
554 (sys_addr < ((1ull << 32) + hole_size))) {
555 /* use DHAR to translate SysAddr to DramAddr */
556 dram_addr = sys_addr - hole_offset;
558 debugf2("using DHAR to translate SysAddr 0x%lx to "
560 (unsigned long)sys_addr,
561 (unsigned long)dram_addr);
568 * Translate the SysAddr to a DramAddr as shown near the start of
569 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
570 * only deals with 40-bit values. Therefore we discard bits 63-40 of
571 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
572 * discard are all 1s. Otherwise the bits we discard are all 0s. See
573 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
574 * Programmer's Manual Volume 1 Application Programming.
576 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
578 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
579 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
580 (unsigned long)dram_addr);
585 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
586 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
587 * for node interleaving.
589 static int num_node_interleave_bits(unsigned intlv_en)
591 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
594 BUG_ON(intlv_en > 7);
595 n = intlv_shift_table[intlv_en];
599 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
600 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
602 struct amd64_pvt *pvt;
609 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
610 * concerning translating a DramAddr to an InputAddr.
612 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
613 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
616 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
617 intlv_shift, (unsigned long)dram_addr,
618 (unsigned long)input_addr);
624 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
625 * assumed that @sys_addr maps to the node given by mci.
627 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
632 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
634 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
635 (unsigned long)sys_addr, (unsigned long)input_addr);
642 * @input_addr is an InputAddr associated with the node represented by mci.
643 * Translate @input_addr to a DramAddr and return the result.
645 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
647 struct amd64_pvt *pvt;
648 int node_id, intlv_shift;
653 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
654 * shows how to translate a DramAddr to an InputAddr. Here we reverse
655 * this procedure. When translating from a DramAddr to an InputAddr, the
656 * bits used for node interleaving are discarded. Here we recover these
657 * bits from the IntlvSel field of the DRAM Limit register (section
658 * 3.4.4.2) for the node that input_addr is associated with.
661 node_id = pvt->mc_node_id;
662 BUG_ON((node_id < 0) || (node_id > 7));
664 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
666 if (intlv_shift == 0) {
667 debugf1(" InputAddr 0x%lx translates to DramAddr of "
668 "same value\n", (unsigned long)input_addr);
673 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
674 (input_addr & 0xfff);
676 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
677 dram_addr = bits + (intlv_sel << 12);
679 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
680 "(%d node interleave bits)\n", (unsigned long)input_addr,
681 (unsigned long)dram_addr, intlv_shift);
687 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
688 * @dram_addr to a SysAddr.
690 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
692 struct amd64_pvt *pvt = mci->pvt_info;
693 u64 hole_base, hole_offset, hole_size, base, sys_addr;
696 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
699 if ((dram_addr >= hole_base) &&
700 (dram_addr < (hole_base + hole_size))) {
701 sys_addr = dram_addr + hole_offset;
703 debugf1("using DHAR to translate DramAddr 0x%lx to "
704 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
705 (unsigned long)sys_addr);
711 base = get_dram_base(pvt, pvt->mc_node_id);
712 sys_addr = dram_addr + base;
715 * The sys_addr we have computed up to this point is a 40-bit value
716 * because the k8 deals with 40-bit values. However, the value we are
717 * supposed to return is a full 64-bit physical address. The AMD
718 * x86-64 architecture specifies that the most significant implemented
719 * address bit through bit 63 of a physical address must be either all
720 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
721 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
722 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
725 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
727 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
728 pvt->mc_node_id, (unsigned long)dram_addr,
729 (unsigned long)sys_addr);
735 * @input_addr is an InputAddr associated with the node given by mci. Translate
736 * @input_addr to a SysAddr.
738 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
741 return dram_addr_to_sys_addr(mci,
742 input_addr_to_dram_addr(mci, input_addr));
746 * Find the minimum and maximum InputAddr values that map to the given @csrow.
747 * Pass back these values in *input_addr_min and *input_addr_max.
749 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
750 u64 *input_addr_min, u64 *input_addr_max)
752 struct amd64_pvt *pvt;
756 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
758 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
760 *input_addr_min = base & ~mask;
761 *input_addr_max = base | mask;
764 /* Map the Error address to a PAGE and PAGE OFFSET. */
765 static inline void error_address_to_page_and_offset(u64 error_address,
766 u32 *page, u32 *offset)
768 *page = (u32) (error_address >> PAGE_SHIFT);
769 *offset = ((u32) error_address) & ~PAGE_MASK;
773 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
774 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
775 * of a node that detected an ECC memory error. mci represents the node that
776 * the error address maps to (possibly different from the node that detected
777 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
780 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
784 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
787 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
788 "address 0x%lx\n", (unsigned long)sys_addr);
792 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
795 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
798 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
801 enum dev_type edac_cap = EDAC_FLAG_NONE;
803 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
807 if (pvt->dclr0 & BIT(bit))
808 edac_cap = EDAC_FLAG_SECDED;
814 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
816 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
818 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
820 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
821 (dclr & BIT(16)) ? "un" : "",
822 (dclr & BIT(19)) ? "yes" : "no");
824 debugf1(" PAR/ERR parity: %s\n",
825 (dclr & BIT(8)) ? "enabled" : "disabled");
827 if (boot_cpu_data.x86 == 0x10)
828 debugf1(" DCT 128bit mode width: %s\n",
829 (dclr & BIT(11)) ? "128b" : "64b");
831 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
832 (dclr & BIT(12)) ? "yes" : "no",
833 (dclr & BIT(13)) ? "yes" : "no",
834 (dclr & BIT(14)) ? "yes" : "no",
835 (dclr & BIT(15)) ? "yes" : "no");
838 /* Display and decode various NB registers for debug purposes. */
839 static void dump_misc_regs(struct amd64_pvt *pvt)
841 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
843 debugf1(" NB two channel DRAM capable: %s\n",
844 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
846 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
847 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
848 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
850 amd64_dump_dramcfg_low(pvt->dclr0, 0);
852 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
854 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
856 pvt->dhar, dhar_base(pvt),
857 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
858 : f10_dhar_offset(pvt));
860 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
862 amd64_debug_display_dimm_sizes(0, pvt);
864 /* everything below this point is Fam10h and above */
865 if (boot_cpu_data.x86 == 0xf)
868 amd64_debug_display_dimm_sizes(1, pvt);
870 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
872 /* Only if NOT ganged does dclr1 have valid info */
873 if (!dct_ganging_enabled(pvt))
874 amd64_dump_dramcfg_low(pvt->dclr1, 1);
878 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
880 static void prep_chip_selects(struct amd64_pvt *pvt)
882 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
883 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
884 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
886 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
887 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
892 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
894 static void read_dct_base_mask(struct amd64_pvt *pvt)
898 prep_chip_selects(pvt);
900 for_each_chip_select(cs, 0, pvt) {
901 u32 reg0 = DCSB0 + (cs * 4);
902 u32 reg1 = DCSB1 + (cs * 4);
903 u32 *base0 = &pvt->csels[0].csbases[cs];
904 u32 *base1 = &pvt->csels[1].csbases[cs];
906 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
907 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
910 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
913 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
914 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
918 for_each_chip_select_mask(cs, 0, pvt) {
919 u32 reg0 = DCSM0 + (cs * 4);
920 u32 reg1 = DCSM1 + (cs * 4);
921 u32 *mask0 = &pvt->csels[0].csmasks[cs];
922 u32 *mask1 = &pvt->csels[1].csmasks[cs];
924 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
925 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
928 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
931 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
932 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
937 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
941 /* F15h supports only DDR3 */
942 if (boot_cpu_data.x86 >= 0x15)
943 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
944 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
945 if (pvt->dchr0 & DDR3_MODE)
946 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
948 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
950 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
953 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
958 /* Get the number of DCT channels the memory controller is using. */
959 static int k8_early_channel_count(struct amd64_pvt *pvt)
963 if (pvt->ext_model >= K8_REV_F)
964 /* RevF (NPT) and later */
965 flag = pvt->dclr0 & F10_WIDTH_128;
967 /* RevE and earlier */
968 flag = pvt->dclr0 & REVE_WIDTH_128;
973 return (flag) ? 2 : 1;
976 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
977 static u64 get_error_address(struct mce *m)
982 if (boot_cpu_data.x86 == 0xf) {
987 return m->addr & GENMASK(start_bit, end_bit);
990 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
992 u32 off = range << 3;
994 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
995 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
997 if (boot_cpu_data.x86 == 0xf)
1000 if (!dram_rw(pvt, range))
1003 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1004 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1007 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1010 struct mem_ctl_info *src_mci;
1011 struct amd64_pvt *pvt = mci->pvt_info;
1015 /* CHIPKILL enabled */
1016 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1017 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1020 * Syndrome didn't map, so we don't know which of the
1021 * 2 DIMMs is in error. So we need to ID 'both' of them
1024 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1025 "error reporting race\n", syndrome);
1026 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1031 * non-chipkill ecc mode
1033 * The k8 documentation is unclear about how to determine the
1034 * channel number when using non-chipkill memory. This method
1035 * was obtained from email communication with someone at AMD.
1036 * (Wish the email was placed in this comment - norsk)
1038 channel = ((sys_addr & BIT(3)) != 0);
1042 * Find out which node the error address belongs to. This may be
1043 * different from the node that detected the error.
1045 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1047 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1048 (unsigned long)sys_addr);
1049 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1053 /* Now map the sys_addr to a CSROW */
1054 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1056 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1058 error_address_to_page_and_offset(sys_addr, &page, &offset);
1060 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1061 channel, EDAC_MOD_STR);
1065 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1069 if (pvt->ext_model >= K8_REV_F)
1070 dbam_map = ddr2_dbam;
1071 else if (pvt->ext_model >= K8_REV_D)
1072 dbam_map = ddr2_dbam_revD;
1074 dbam_map = ddr2_dbam_revCG;
1076 return dbam_map[cs_mode];
1080 * Get the number of DCT channels in use.
1083 * number of Memory Channels in operation
1085 * contents of the DCL0_LOW register
1087 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1089 int i, j, channels = 0;
1091 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1092 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & F10_WIDTH_128))
1096 * Need to check if in unganged mode: In such, there are 2 channels,
1097 * but they are not in 128 bit mode and thus the above 'dclr0' status
1100 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1101 * their CSEnable bit on. If so, then SINGLE DIMM case.
1103 debugf0("Data width is not 128 bits - need more decoding\n");
1106 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1107 * is more than just one DIMM present in unganged mode. Need to check
1108 * both controllers since DIMMs can be placed in either one.
1110 for (i = 0; i < 2; i++) {
1111 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1113 for (j = 0; j < 4; j++) {
1114 if (DBAM_DIMM(j, dbam) > 0) {
1124 amd64_info("MCT channel count: %d\n", channels);
1129 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1133 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1134 dbam_map = ddr3_dbam;
1136 dbam_map = ddr2_dbam;
1138 return dbam_map[cs_mode];
1141 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1144 if (boot_cpu_data.x86 == 0xf)
1147 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1148 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1149 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1151 debugf0(" DCTs operate in %s mode.\n",
1152 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1154 if (!dct_ganging_enabled(pvt))
1155 debugf0(" Address range split per DCT: %s\n",
1156 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1158 debugf0(" data interleave for ECC: %s, "
1159 "DRAM cleared since last warm reset: %s\n",
1160 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1161 (dct_memory_cleared(pvt) ? "yes" : "no"));
1163 debugf0(" channel interleave: %s, "
1164 "interleave bits selector: 0x%x\n",
1165 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1166 dct_sel_interleave_addr(pvt));
1169 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1173 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1174 * Interleaving Modes.
1176 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1177 bool hi_range_sel, u8 intlv_en)
1179 u32 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1181 if (dct_ganging_enabled(pvt))
1185 return dct_sel_high;
1188 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1190 if (dct_interleave_enabled(pvt)) {
1191 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1193 /* return DCT select function: 0=DCT0, 1=DCT1 */
1195 return sys_addr >> 6 & 1;
1197 if (intlv_addr & 0x2) {
1198 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1199 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1201 return ((sys_addr >> shift) & 1) ^ temp;
1204 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1207 if (dct_high_range_enabled(pvt))
1208 return ~dct_sel_high & 1;
1213 /* Convert the sys_addr to the normalized DCT address */
1214 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
1215 u64 sys_addr, bool hi_rng,
1216 u32 dct_sel_base_addr)
1219 u64 dram_base = get_dram_base(pvt, range);
1220 u64 hole_off = f10_dhar_offset(pvt);
1221 u32 hole_valid = dhar_valid(pvt);
1222 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1227 * base address of high range is below 4Gb
1228 * (bits [47:27] at [31:11])
1229 * DRAM address space on this DCT is hoisted above 4Gb &&
1232 * remove hole offset from sys_addr
1234 * remove high range offset from sys_addr
1236 if ((!(dct_sel_base_addr >> 16) ||
1237 dct_sel_base_addr < dhar_base(pvt)) &&
1239 (sys_addr >= BIT_64(32)))
1240 chan_off = hole_off;
1242 chan_off = dct_sel_base_off;
1246 * we have a valid hole &&
1251 * remove dram base to normalize to DCT address
1253 if (hole_valid && (sys_addr >= BIT_64(32)))
1254 chan_off = hole_off;
1256 chan_off = dram_base;
1259 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1263 * checks if the csrow passed in is marked as SPARED, if so returns the new
1266 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1270 if (online_spare_swap_done(pvt, dct) &&
1271 csrow == online_spare_bad_dramcs(pvt, dct)) {
1273 for_each_chip_select(tmp_cs, dct, pvt) {
1274 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1284 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1285 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1288 * -EINVAL: NOT FOUND
1289 * 0..csrow = Chip-Select Row
1291 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1293 struct mem_ctl_info *mci;
1294 struct amd64_pvt *pvt;
1295 u64 cs_base, cs_mask;
1296 int cs_found = -EINVAL;
1303 pvt = mci->pvt_info;
1305 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1307 for_each_chip_select(csrow, dct, pvt) {
1308 if (!csrow_enabled(csrow, dct, pvt))
1311 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1313 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1314 csrow, cs_base, cs_mask);
1318 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1319 "(CSBase & ~CSMask)=0x%llx\n",
1320 (in_addr & cs_mask), (cs_base & cs_mask));
1322 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1323 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1325 debugf1(" MATCH csrow=%d\n", cs_found);
1333 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1334 * swapped with a region located at the bottom of memory so that the GPU can use
1335 * the interleaved region and thus two channels.
1337 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1339 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1341 if (boot_cpu_data.x86 == 0x10) {
1342 /* only revC3 and revE have that feature */
1343 if (boot_cpu_data.x86_model < 4 ||
1344 (boot_cpu_data.x86_model < 0xa &&
1345 boot_cpu_data.x86_mask < 3))
1349 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1351 if (!(swap_reg & 0x1))
1354 swap_base = (swap_reg >> 3) & 0x7f;
1355 swap_limit = (swap_reg >> 11) & 0x7f;
1356 rgn_size = (swap_reg >> 20) & 0x7f;
1357 tmp_addr = sys_addr >> 27;
1359 if (!(sys_addr >> 34) &&
1360 (((tmp_addr >= swap_base) &&
1361 (tmp_addr <= swap_limit)) ||
1362 (tmp_addr < rgn_size)))
1363 return sys_addr ^ (u64)swap_base << 27;
1368 /* For a given @dram_range, check if @sys_addr falls within it. */
1369 static int f1x_match_to_this_node(struct amd64_pvt *pvt, int range,
1370 u64 sys_addr, int *nid, int *chan_sel)
1372 int cs_found = -EINVAL;
1376 bool high_range = false;
1378 u8 node_id = dram_dst_node(pvt, range);
1379 u8 intlv_en = dram_intlv_en(pvt, range);
1380 u32 intlv_sel = dram_intlv_sel(pvt, range);
1382 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1383 range, sys_addr, get_dram_limit(pvt, range));
1385 if (dhar_valid(pvt) &&
1386 dhar_base(pvt) <= sys_addr &&
1387 sys_addr < BIT_64(32)) {
1388 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1394 (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
1395 amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
1396 intlv_en, intlv_sel);
1400 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1402 dct_sel_base = dct_sel_baseaddr(pvt);
1405 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1406 * select between DCT0 and DCT1.
1408 if (dct_high_range_enabled(pvt) &&
1409 !dct_ganging_enabled(pvt) &&
1410 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1413 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1415 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1416 high_range, dct_sel_base);
1418 /* Remove node interleaving, see F1x120 */
1420 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1421 (chan_addr & 0xfff);
1423 /* remove channel interleave */
1424 if (dct_interleave_enabled(pvt) &&
1425 !dct_high_range_enabled(pvt) &&
1426 !dct_ganging_enabled(pvt)) {
1428 if (dct_sel_interleave_addr(pvt) != 1) {
1429 if (dct_sel_interleave_addr(pvt) == 0x3)
1431 chan_addr = ((chan_addr >> 10) << 9) |
1432 (chan_addr & 0x1ff);
1434 /* A[6] or hash 6 */
1435 chan_addr = ((chan_addr >> 7) << 6) |
1439 chan_addr = ((chan_addr >> 13) << 12) |
1440 (chan_addr & 0xfff);
1443 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
1445 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1447 if (cs_found >= 0) {
1449 *chan_sel = channel;
1454 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1455 int *node, int *chan_sel)
1457 int range, cs_found = -EINVAL;
1459 for (range = 0; range < DRAM_RANGES; range++) {
1461 if (!dram_rw(pvt, range))
1464 if ((get_dram_base(pvt, range) <= sys_addr) &&
1465 (get_dram_limit(pvt, range) >= sys_addr)) {
1467 cs_found = f1x_match_to_this_node(pvt, range,
1478 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1479 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1481 * The @sys_addr is usually an error address received from the hardware
1484 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1487 struct amd64_pvt *pvt = mci->pvt_info;
1489 int nid, csrow, chan = 0;
1491 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1494 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1498 error_address_to_page_and_offset(sys_addr, &page, &offset);
1501 * We need the syndromes for channel detection only when we're
1502 * ganged. Otherwise @chan should already contain the channel at
1505 if (dct_ganging_enabled(pvt))
1506 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1509 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1513 * Channel unknown, report all channels on this CSROW as failed.
1515 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1516 edac_mc_handle_ce(mci, page, offset, syndrome,
1517 csrow, chan, EDAC_MOD_STR);
1521 * debug routine to display the memory sizes of all logical DIMMs and its
1524 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1526 int dimm, size0, size1, factor = 0;
1527 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1528 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1530 if (boot_cpu_data.x86 == 0xf) {
1531 if (pvt->dclr0 & F10_WIDTH_128)
1534 /* K8 families < revF not supported yet */
1535 if (pvt->ext_model < K8_REV_F)
1541 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1542 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1543 : pvt->csels[0].csbases;
1545 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1547 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1549 /* Dump memory sizes for DIMM and its CSROWs */
1550 for (dimm = 0; dimm < 4; dimm++) {
1553 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1554 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1557 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1558 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1560 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1561 dimm * 2, size0 << factor,
1562 dimm * 2 + 1, size1 << factor);
1566 static struct amd64_family_type amd64_family_types[] = {
1569 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1570 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1572 .early_channel_count = k8_early_channel_count,
1573 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1574 .dbam_to_cs = k8_dbam_to_chip_select,
1575 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1580 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1581 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1583 .early_channel_count = f1x_early_channel_count,
1584 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1585 .dbam_to_cs = f10_dbam_to_chip_select,
1586 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1592 .early_channel_count = f1x_early_channel_count,
1593 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1594 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1599 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1600 unsigned int device,
1601 struct pci_dev *related)
1603 struct pci_dev *dev = NULL;
1605 dev = pci_get_device(vendor, device, dev);
1607 if ((dev->bus->number == related->bus->number) &&
1608 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1610 dev = pci_get_device(vendor, device, dev);
1617 * These are tables of eigenvectors (one per line) which can be used for the
1618 * construction of the syndrome tables. The modified syndrome search algorithm
1619 * uses those to find the symbol in error and thus the DIMM.
1621 * Algorithm courtesy of Ross LaFetra from AMD.
1623 static u16 x4_vectors[] = {
1624 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1625 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1626 0x0001, 0x0002, 0x0004, 0x0008,
1627 0x1013, 0x3032, 0x4044, 0x8088,
1628 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1629 0x4857, 0xc4fe, 0x13cc, 0x3288,
1630 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1631 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1632 0x15c1, 0x2a42, 0x89ac, 0x4758,
1633 0x2b03, 0x1602, 0x4f0c, 0xca08,
1634 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1635 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1636 0x2b87, 0x164e, 0x642c, 0xdc18,
1637 0x40b9, 0x80de, 0x1094, 0x20e8,
1638 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1639 0x11c1, 0x2242, 0x84ac, 0x4c58,
1640 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1641 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1642 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1643 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1644 0x16b3, 0x3d62, 0x4f34, 0x8518,
1645 0x1e2f, 0x391a, 0x5cac, 0xf858,
1646 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1647 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1648 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1649 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1650 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1651 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1652 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1653 0x185d, 0x2ca6, 0x7914, 0x9e28,
1654 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1655 0x4199, 0x82ee, 0x19f4, 0x2e58,
1656 0x4807, 0xc40e, 0x130c, 0x3208,
1657 0x1905, 0x2e0a, 0x5804, 0xac08,
1658 0x213f, 0x132a, 0xadfc, 0x5ba8,
1659 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1662 static u16 x8_vectors[] = {
1663 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1664 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1665 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1666 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1667 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1668 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1669 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1670 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1671 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1672 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1673 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1674 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1675 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1676 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1677 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1678 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1679 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1680 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1681 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1684 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1687 unsigned int i, err_sym;
1689 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1691 int v_idx = err_sym * v_dim;
1692 int v_end = (err_sym + 1) * v_dim;
1694 /* walk over all 16 bits of the syndrome */
1695 for (i = 1; i < (1U << 16); i <<= 1) {
1697 /* if bit is set in that eigenvector... */
1698 if (v_idx < v_end && vectors[v_idx] & i) {
1699 u16 ev_comp = vectors[v_idx++];
1701 /* ... and bit set in the modified syndrome, */
1711 /* can't get to zero, move to next symbol */
1716 debugf0("syndrome(%x) not found\n", syndrome);
1720 static int map_err_sym_to_channel(int err_sym, int sym_size)
1733 return err_sym >> 4;
1739 /* imaginary bits not in a DIMM */
1741 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1753 return err_sym >> 3;
1759 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1761 struct amd64_pvt *pvt = mci->pvt_info;
1764 if (pvt->syn_type == 8)
1765 err_sym = decode_syndrome(syndrome, x8_vectors,
1766 ARRAY_SIZE(x8_vectors),
1768 else if (pvt->syn_type == 4)
1769 err_sym = decode_syndrome(syndrome, x4_vectors,
1770 ARRAY_SIZE(x4_vectors),
1773 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
1777 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1781 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1782 * ADDRESS and process.
1784 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1786 struct amd64_pvt *pvt = mci->pvt_info;
1790 /* Ensure that the Error Address is VALID */
1791 if (!(m->status & MCI_STATUS_ADDRV)) {
1792 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1793 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1797 sys_addr = get_error_address(m);
1798 syndrome = extract_syndrome(m->status);
1800 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1802 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1805 /* Handle any Un-correctable Errors (UEs) */
1806 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1808 struct mem_ctl_info *log_mci, *src_mci = NULL;
1815 if (!(m->status & MCI_STATUS_ADDRV)) {
1816 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1817 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1821 sys_addr = get_error_address(m);
1824 * Find out which node the error address belongs to. This may be
1825 * different from the node that detected the error.
1827 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1829 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1830 (unsigned long)sys_addr);
1831 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1837 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1839 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1840 (unsigned long)sys_addr);
1841 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1843 error_address_to_page_and_offset(sys_addr, &page, &offset);
1844 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1848 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1851 u16 ec = EC(m->status);
1852 u8 xec = XEC(m->status, 0x1f);
1853 u8 ecc_type = (m->status >> 45) & 0x3;
1855 /* Bail early out if this was an 'observed' error */
1856 if (PP(ec) == NBSL_PP_OBS)
1859 /* Do only ECC errors */
1860 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1864 amd64_handle_ce(mci, m);
1865 else if (ecc_type == 1)
1866 amd64_handle_ue(mci, m);
1869 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1871 struct mem_ctl_info *mci = mcis[node_id];
1873 __amd64_decode_bus_error(mci, m);
1877 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1878 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1880 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
1882 /* Reserve the ADDRESS MAP Device */
1883 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
1885 amd64_err("error address map device not found: "
1886 "vendor %x device 0x%x (broken BIOS?)\n",
1887 PCI_VENDOR_ID_AMD, f1_id);
1891 /* Reserve the MISC Device */
1892 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
1894 pci_dev_put(pvt->F1);
1897 amd64_err("error F3 device not found: "
1898 "vendor %x device 0x%x (broken BIOS?)\n",
1899 PCI_VENDOR_ID_AMD, f3_id);
1903 debugf1("F1: %s\n", pci_name(pvt->F1));
1904 debugf1("F2: %s\n", pci_name(pvt->F2));
1905 debugf1("F3: %s\n", pci_name(pvt->F3));
1910 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
1912 pci_dev_put(pvt->F1);
1913 pci_dev_put(pvt->F3);
1917 * Retrieve the hardware registers of the memory controller (this includes the
1918 * 'Address Map' and 'Misc' device regs)
1920 static void read_mc_regs(struct amd64_pvt *pvt)
1927 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
1928 * those are Read-As-Zero
1930 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
1931 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
1933 /* check first whether TOP_MEM2 is enabled */
1934 rdmsrl(MSR_K8_SYSCFG, msr_val);
1935 if (msr_val & (1U << 21)) {
1936 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
1937 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
1939 debugf0(" TOP_MEM2 disabled.\n");
1941 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
1943 read_dram_ctl_register(pvt);
1945 for (range = 0; range < DRAM_RANGES; range++) {
1948 /* read settings for this DRAM range */
1949 read_dram_base_limit_regs(pvt, range);
1951 rw = dram_rw(pvt, range);
1955 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
1957 get_dram_base(pvt, range),
1958 get_dram_limit(pvt, range));
1960 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
1961 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
1962 (rw & 0x1) ? "R" : "-",
1963 (rw & 0x2) ? "W" : "-",
1964 dram_intlv_sel(pvt, range),
1965 dram_dst_node(pvt, range));
1968 read_dct_base_mask(pvt);
1970 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
1971 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
1973 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
1975 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
1976 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
1978 if (!dct_ganging_enabled(pvt)) {
1979 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
1980 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
1983 if (boot_cpu_data.x86 >= 0x10) {
1984 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
1985 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
1988 if (boot_cpu_data.x86 == 0x10 &&
1989 boot_cpu_data.x86_model > 7 &&
1990 /* F3x180[EccSymbolSize]=1 => x8 symbols */
1996 dump_misc_regs(pvt);
2000 * NOTE: CPU Revision Dependent code
2003 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2004 * k8 private pointer to -->
2005 * DRAM Bank Address mapping register
2007 * DCL register where dual_channel_active is
2009 * The DBAM register consists of 4 sets of 4 bits each definitions:
2012 * 0-3 CSROWs 0 and 1
2013 * 4-7 CSROWs 2 and 3
2014 * 8-11 CSROWs 4 and 5
2015 * 12-15 CSROWs 6 and 7
2017 * Values range from: 0 to 15
2018 * The meaning of the values depends on CPU revision and dual-channel state,
2019 * see relevant BKDG more info.
2021 * The memory controller provides for total of only 8 CSROWs in its current
2022 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2023 * single channel or two (2) DIMMs in dual channel mode.
2025 * The following code logic collapses the various tables for CSROW based on CPU
2029 * The number of PAGE_SIZE pages on the specified CSROW number it
2033 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2035 u32 cs_mode, nr_pages;
2038 * The math on this doesn't look right on the surface because x/2*4 can
2039 * be simplified to x*2 but this expression makes use of the fact that
2040 * it is integral math where 1/2=0. This intermediate value becomes the
2041 * number of bits to shift the DBAM register to extract the proper CSROW
2044 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2046 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2049 * If dual channel then double the memory size of single channel.
2050 * Channel count is 1 or 2
2052 nr_pages <<= (pvt->channel_count - 1);
2054 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2055 debugf0(" nr_pages= %u channel-count = %d\n",
2056 nr_pages, pvt->channel_count);
2062 * Initialize the array of csrow attribute instances, based on the values
2063 * from pci config hardware registers.
2065 static int init_csrows(struct mem_ctl_info *mci)
2067 struct csrow_info *csrow;
2068 struct amd64_pvt *pvt = mci->pvt_info;
2069 u64 input_addr_min, input_addr_max, sys_addr, base, mask;
2073 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2077 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2078 pvt->mc_node_id, val,
2079 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2081 for_each_chip_select(i, 0, pvt) {
2082 csrow = &mci->csrows[i];
2084 if (!csrow_enabled(i, 0, pvt)) {
2085 debugf1("----CSROW %d EMPTY for node %d\n", i,
2090 debugf1("----CSROW %d VALID for MC node %d\n",
2091 i, pvt->mc_node_id);
2094 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2095 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2096 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2097 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2098 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2099 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2101 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2102 csrow->page_mask = ~mask;
2103 /* 8 bytes of resolution */
2105 csrow->mtype = amd64_determine_memory_type(pvt, i);
2107 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2108 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2109 (unsigned long)input_addr_min,
2110 (unsigned long)input_addr_max);
2111 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2112 (unsigned long)sys_addr, csrow->page_mask);
2113 debugf1(" nr_pages: %u first_page: 0x%lx "
2114 "last_page: 0x%lx\n",
2115 (unsigned)csrow->nr_pages,
2116 csrow->first_page, csrow->last_page);
2119 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2121 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2123 (pvt->nbcfg & NBCFG_CHIPKILL) ?
2124 EDAC_S4ECD4ED : EDAC_SECDED;
2126 csrow->edac_mode = EDAC_NONE;
2132 /* get all cores on this DCT */
2133 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2137 for_each_online_cpu(cpu)
2138 if (amd_get_nb_id(cpu) == nid)
2139 cpumask_set_cpu(cpu, mask);
2142 /* check MCG_CTL on all the cpus on this node */
2143 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2149 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2150 amd64_warn("%s: Error allocating mask\n", __func__);
2154 get_cpus_on_this_dct_cpumask(mask, nid);
2156 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2158 for_each_cpu(cpu, mask) {
2159 struct msr *reg = per_cpu_ptr(msrs, cpu);
2160 nbe = reg->l & MSR_MCGCTL_NBE;
2162 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2164 (nbe ? "enabled" : "disabled"));
2172 free_cpumask_var(mask);
2176 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2178 cpumask_var_t cmask;
2181 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2182 amd64_warn("%s: error allocating mask\n", __func__);
2186 get_cpus_on_this_dct_cpumask(cmask, nid);
2188 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2190 for_each_cpu(cpu, cmask) {
2192 struct msr *reg = per_cpu_ptr(msrs, cpu);
2195 if (reg->l & MSR_MCGCTL_NBE)
2196 s->flags.nb_mce_enable = 1;
2198 reg->l |= MSR_MCGCTL_NBE;
2201 * Turn off NB MCE reporting only when it was off before
2203 if (!s->flags.nb_mce_enable)
2204 reg->l &= ~MSR_MCGCTL_NBE;
2207 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2209 free_cpumask_var(cmask);
2214 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2218 u32 value, mask = 0x3; /* UECC/CECC enable */
2220 if (toggle_ecc_err_reporting(s, nid, ON)) {
2221 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2225 amd64_read_pci_cfg(F3, NBCTL, &value);
2227 s->old_nbctl = value & mask;
2228 s->nbctl_valid = true;
2231 amd64_write_pci_cfg(F3, NBCTL, value);
2233 amd64_read_pci_cfg(F3, NBCFG, &value);
2235 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2236 nid, value, !!(value & NBCFG_ECC_ENABLE));
2238 if (!(value & NBCFG_ECC_ENABLE)) {
2239 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2241 s->flags.nb_ecc_prev = 0;
2243 /* Attempt to turn on DRAM ECC Enable */
2244 value |= NBCFG_ECC_ENABLE;
2245 amd64_write_pci_cfg(F3, NBCFG, value);
2247 amd64_read_pci_cfg(F3, NBCFG, &value);
2249 if (!(value & NBCFG_ECC_ENABLE)) {
2250 amd64_warn("Hardware rejected DRAM ECC enable,"
2251 "check memory DIMM configuration.\n");
2254 amd64_info("Hardware accepted DRAM ECC Enable\n");
2257 s->flags.nb_ecc_prev = 1;
2260 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2261 nid, value, !!(value & NBCFG_ECC_ENABLE));
2266 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2269 u32 value, mask = 0x3; /* UECC/CECC enable */
2272 if (!s->nbctl_valid)
2275 amd64_read_pci_cfg(F3, NBCTL, &value);
2277 value |= s->old_nbctl;
2279 amd64_write_pci_cfg(F3, NBCTL, value);
2281 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2282 if (!s->flags.nb_ecc_prev) {
2283 amd64_read_pci_cfg(F3, NBCFG, &value);
2284 value &= ~NBCFG_ECC_ENABLE;
2285 amd64_write_pci_cfg(F3, NBCFG, value);
2288 /* restore the NB Enable MCGCTL bit */
2289 if (toggle_ecc_err_reporting(s, nid, OFF))
2290 amd64_warn("Error restoring NB MCGCTL settings!\n");
2294 * EDAC requires that the BIOS have ECC enabled before
2295 * taking over the processing of ECC errors. A command line
2296 * option allows to force-enable hardware ECC later in
2297 * enable_ecc_error_reporting().
2299 static const char *ecc_msg =
2300 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2301 " Either enable ECC checking or force module loading by setting "
2302 "'ecc_enable_override'.\n"
2303 " (Note that use of the override may cause unknown side effects.)\n";
2305 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2309 bool nb_mce_en = false;
2311 amd64_read_pci_cfg(F3, NBCFG, &value);
2313 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2314 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2316 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2318 amd64_notice("NB MCE bank disabled, set MSR "
2319 "0x%08x[4] on node %d to enable.\n",
2320 MSR_IA32_MCG_CTL, nid);
2322 if (!ecc_en || !nb_mce_en) {
2323 amd64_notice("%s", ecc_msg);
2329 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2330 ARRAY_SIZE(amd64_inj_attrs) +
2333 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2335 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2337 unsigned int i = 0, j = 0;
2339 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2340 sysfs_attrs[i] = amd64_dbg_attrs[i];
2342 if (boot_cpu_data.x86 >= 0x10)
2343 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2344 sysfs_attrs[i] = amd64_inj_attrs[j];
2346 sysfs_attrs[i] = terminator;
2348 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2351 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
2353 struct amd64_pvt *pvt = mci->pvt_info;
2355 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2356 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2358 if (pvt->nbcap & NBCAP_SECDED)
2359 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2361 if (pvt->nbcap & NBCAP_CHIPKILL)
2362 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2364 mci->edac_cap = amd64_determine_edac_cap(pvt);
2365 mci->mod_name = EDAC_MOD_STR;
2366 mci->mod_ver = EDAC_AMD64_VERSION;
2367 mci->ctl_name = pvt->ctl_name;
2368 mci->dev_name = pci_name(pvt->F2);
2369 mci->ctl_page_to_phys = NULL;
2371 /* memory scrubber interface */
2372 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2373 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2377 * returns a pointer to the family descriptor on success, NULL otherwise.
2379 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2381 u8 fam = boot_cpu_data.x86;
2382 struct amd64_family_type *fam_type = NULL;
2386 fam_type = &amd64_family_types[K8_CPUS];
2387 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2388 pvt->ctl_name = fam_type->ctl_name;
2389 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2392 fam_type = &amd64_family_types[F10_CPUS];
2393 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2394 pvt->ctl_name = fam_type->ctl_name;
2395 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2399 amd64_err("Unsupported family!\n");
2403 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2405 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
2407 (pvt->ext_model >= K8_REV_F ? "revF or later "
2408 : "revE or earlier ")
2409 : ""), pvt->mc_node_id);
2413 static int amd64_init_one_instance(struct pci_dev *F2)
2415 struct amd64_pvt *pvt = NULL;
2416 struct amd64_family_type *fam_type = NULL;
2417 struct mem_ctl_info *mci = NULL;
2419 u8 nid = get_node_id(F2);
2422 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2426 pvt->mc_node_id = nid;
2430 fam_type = amd64_per_family_init(pvt);
2435 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2442 * We need to determine how many memory channels there are. Then use
2443 * that information for calculating the size of the dynamic instance
2444 * tables in the 'mci' structure.
2447 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2448 if (pvt->channel_count < 0)
2452 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
2456 mci->pvt_info = pvt;
2457 mci->dev = &pvt->F2->dev;
2459 setup_mci_misc_attrs(mci);
2461 if (init_csrows(mci))
2462 mci->edac_cap = EDAC_FLAG_NONE;
2464 set_mc_sysfs_attrs(mci);
2467 if (edac_mc_add_mc(mci)) {
2468 debugf1("failed edac_mc_add_mc()\n");
2472 /* register stuff with EDAC MCE */
2473 if (report_gart_errors)
2474 amd_report_gart_errors(true);
2476 amd_register_ecc_decoder(amd64_decode_bus_error);
2480 atomic_inc(&drv_instances);
2488 free_mc_sibling_devs(pvt);
2497 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2498 const struct pci_device_id *mc_type)
2500 u8 nid = get_node_id(pdev);
2501 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2502 struct ecc_settings *s;
2505 ret = pci_enable_device(pdev);
2507 debugf0("ret=%d\n", ret);
2512 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2518 if (!ecc_enabled(F3, nid)) {
2521 if (!ecc_enable_override)
2524 amd64_warn("Forcing ECC on!\n");
2526 if (!enable_ecc_error_reporting(s, nid, F3))
2530 ret = amd64_init_one_instance(pdev);
2532 amd64_err("Error probing instance: %d\n", nid);
2533 restore_ecc_error_reporting(s, nid, F3);
2540 ecc_stngs[nid] = NULL;
2546 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2548 struct mem_ctl_info *mci;
2549 struct amd64_pvt *pvt;
2550 u8 nid = get_node_id(pdev);
2551 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2552 struct ecc_settings *s = ecc_stngs[nid];
2554 /* Remove from EDAC CORE tracking list */
2555 mci = edac_mc_del_mc(&pdev->dev);
2559 pvt = mci->pvt_info;
2561 restore_ecc_error_reporting(s, nid, F3);
2563 free_mc_sibling_devs(pvt);
2565 /* unregister from EDAC MCE */
2566 amd_report_gart_errors(false);
2567 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2569 kfree(ecc_stngs[nid]);
2570 ecc_stngs[nid] = NULL;
2572 /* Free the EDAC CORE resources */
2573 mci->pvt_info = NULL;
2581 * This table is part of the interface for loading drivers for PCI devices. The
2582 * PCI core identifies what devices are on a system during boot, and then
2583 * inquiry this table to see if this driver is for a given device found.
2585 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2587 .vendor = PCI_VENDOR_ID_AMD,
2588 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2589 .subvendor = PCI_ANY_ID,
2590 .subdevice = PCI_ANY_ID,
2595 .vendor = PCI_VENDOR_ID_AMD,
2596 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2597 .subvendor = PCI_ANY_ID,
2598 .subdevice = PCI_ANY_ID,
2604 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2606 static struct pci_driver amd64_pci_driver = {
2607 .name = EDAC_MOD_STR,
2608 .probe = amd64_probe_one_instance,
2609 .remove = __devexit_p(amd64_remove_one_instance),
2610 .id_table = amd64_pci_table,
2613 static void setup_pci_device(void)
2615 struct mem_ctl_info *mci;
2616 struct amd64_pvt *pvt;
2624 pvt = mci->pvt_info;
2626 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2628 if (!amd64_ctl_pci) {
2629 pr_warning("%s(): Unable to create PCI control\n",
2632 pr_warning("%s(): PCI error report via EDAC not set\n",
2638 static int __init amd64_edac_init(void)
2642 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2646 if (amd_cache_northbridges() < 0)
2650 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2651 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2652 if (!(mcis && ecc_stngs))
2655 msrs = msrs_alloc();
2659 err = pci_register_driver(&amd64_pci_driver);
2664 if (!atomic_read(&drv_instances))
2665 goto err_no_instances;
2671 pci_unregister_driver(&amd64_pci_driver);
2688 static void __exit amd64_edac_exit(void)
2691 edac_pci_release_generic_ctl(amd64_ctl_pci);
2693 pci_unregister_driver(&amd64_pci_driver);
2705 module_init(amd64_edac_init);
2706 module_exit(amd64_edac_exit);
2708 MODULE_LICENSE("GPL");
2709 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2710 "Dave Peterson, Thayne Harbaugh");
2711 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2712 EDAC_AMD64_VERSION);
2714 module_param(edac_op_state, int, 0444);
2715 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");