1 #include "amd64_edac.h"
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 /* Lookup table for all possible MC control instances */
18 static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19 static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only
23 * for DDR2 DRAM mapping.
25 u32 revf_quad_ddr2_shift[] = {
26 0, /* 0000b NULL DIMM (128mb) */
45 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
46 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
49 *FIXME: Produce a better mapping/linearisation.
52 struct scrubrate scrubrates[] = {
53 { 0x01, 1600000000UL},
75 { 0x00, 0UL}, /* scrubbing off */
79 * Memory scrubber control interface. For K8, memory scrubbing is handled by
80 * hardware and can involve L2 cache, dcache as well as the main memory. With
81 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
84 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
85 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
86 * bytes/sec for the setting.
88 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
89 * other archs, we might not have access to the caches directly.
93 * scan the scrub rate mapping table for a close or matching bandwidth value to
94 * issue. If requested is too big, then use last maximum value found.
96 static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
103 * map the configured rate (new_bw) to a value specific to the AMD64
104 * memory controller and apply to register. Search for the first
105 * bandwidth entry that is greater or equal than the setting requested
106 * and program that. If at last entry, turn off DRAM scrubbing.
108 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
110 * skip scrub rates which aren't recommended
111 * (see F10 BKDG, F3x58)
113 if (scrubrates[i].scrubval < min_scrubrate)
116 if (scrubrates[i].bandwidth <= new_bw)
120 * if no suitable bandwidth found, turn off DRAM scrubbing
121 * entirely by falling back to the last element in the
126 scrubval = scrubrates[i].scrubval;
128 edac_printk(KERN_DEBUG, EDAC_MC,
129 "Setting scrub rate bandwidth: %u\n",
130 scrubrates[i].bandwidth);
132 edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
134 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
139 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
141 struct amd64_pvt *pvt = mci->pvt_info;
142 u32 min_scrubrate = 0x0;
144 switch (boot_cpu_data.x86) {
146 min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
149 min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
152 min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
156 amd64_printk(KERN_ERR, "Unsupported family!\n");
159 return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
163 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
165 struct amd64_pvt *pvt = mci->pvt_info;
169 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
171 scrubval = scrubval & 0x001F;
173 edac_printk(KERN_DEBUG, EDAC_MC,
174 "pci-read, sdram scrub control value: %d \n", scrubval);
176 for (i = 0; ARRAY_SIZE(scrubrates); i++) {
177 if (scrubrates[i].scrubval == scrubval) {
178 *bw = scrubrates[i].bandwidth;
187 /* Map from a CSROW entry to the mask entry that operates on it */
188 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
190 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F)
196 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
197 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
200 return pvt->dcsb0[csrow];
202 return pvt->dcsb1[csrow];
206 * Return the 'mask' address the i'th CS entry. This function is needed because
207 * there number of DCSM registers on Rev E and prior vs Rev F and later is
210 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
213 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
215 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
220 * In *base and *limit, pass back the full 40-bit base and limit physical
221 * addresses for the node given by node_id. This information is obtained from
222 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
223 * base and limit addresses are of type SysAddr, as defined at the start of
224 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
225 * in the address range they represent.
227 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
228 u64 *base, u64 *limit)
230 *base = pvt->dram_base[node_id];
231 *limit = pvt->dram_limit[node_id];
235 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
238 static int amd64_base_limit_match(struct amd64_pvt *pvt,
239 u64 sys_addr, int node_id)
241 u64 base, limit, addr;
243 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
245 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
246 * all ones if the most significant implemented address bit is 1.
247 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
248 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
249 * Application Programming.
251 addr = sys_addr & 0x000000ffffffffffull;
253 return (addr >= base) && (addr <= limit);
257 * Attempt to map a SysAddr to a node. On success, return a pointer to the
258 * mem_ctl_info structure for the node that the SysAddr maps to.
260 * On failure, return NULL.
262 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
265 struct amd64_pvt *pvt;
270 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
271 * 3.4.4.2) registers to map the SysAddr to a node ID.
276 * The value of this field should be the same for all DRAM Base
277 * registers. Therefore we arbitrarily choose to read it from the
278 * register for node 0.
280 intlv_en = pvt->dram_IntlvEn[0];
283 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
284 if (amd64_base_limit_match(pvt, sys_addr, node_id))
290 if (unlikely((intlv_en != 0x01) &&
291 (intlv_en != 0x03) &&
292 (intlv_en != 0x07))) {
293 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
294 "IntlvEn field of DRAM Base Register for node 0: "
295 "this probably indicates a BIOS bug.\n", intlv_en);
299 bits = (((u32) sys_addr) >> 12) & intlv_en;
301 for (node_id = 0; ; ) {
302 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
303 break; /* intlv_sel field matches */
305 if (++node_id >= DRAM_REG_COUNT)
309 /* sanity test for sys_addr */
310 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
311 amd64_printk(KERN_WARNING,
312 "%s(): sys_addr 0x%llx falls outside base/limit "
313 "address range for node %d with node interleaving "
315 __func__, sys_addr, node_id);
320 return edac_mc_find(node_id);
323 debugf2("sys_addr 0x%lx doesn't match any node\n",
324 (unsigned long)sys_addr);
330 * Extract the DRAM CS base address from selected csrow register.
332 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
334 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
339 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
341 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
343 u64 dcsm_bits, other_bits;
346 /* Extract bits from DRAM CS Mask. */
347 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
349 other_bits = pvt->dcsm_mask;
350 other_bits = ~(other_bits << pvt->dcs_shift);
353 * The extracted bits from DCSM belong in the spaces represented by
354 * the cleared bits in other_bits.
356 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
362 * @input_addr is an InputAddr associated with the node given by mci. Return the
363 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
365 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
367 struct amd64_pvt *pvt;
374 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
375 * base/mask register pair, test the condition shown near the start of
376 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
378 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
380 /* This DRAM chip select is disabled on this node */
381 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
384 base = base_from_dct_base(pvt, csrow);
385 mask = ~mask_from_dct_mask(pvt, csrow);
387 if ((input_addr & mask) == (base & mask)) {
388 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
389 (unsigned long)input_addr, csrow,
396 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
397 (unsigned long)input_addr, pvt->mc_node_id);
403 * Return the base value defined by the DRAM Base register for the node
404 * represented by mci. This function returns the full 40-bit value despite the
405 * fact that the register only stores bits 39-24 of the value. See section
406 * 3.4.4.1 (BKDG #26094, K8, revA-E)
408 static inline u64 get_dram_base(struct mem_ctl_info *mci)
410 struct amd64_pvt *pvt = mci->pvt_info;
412 return pvt->dram_base[pvt->mc_node_id];
416 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
417 * for the node represented by mci. Info is passed back in *hole_base,
418 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
419 * info is invalid. Info may be invalid for either of the following reasons:
421 * - The revision of the node is not E or greater. In this case, the DRAM Hole
422 * Address Register does not exist.
424 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
425 * indicating that its contents are not valid.
427 * The values passed back in *hole_base, *hole_offset, and *hole_size are
428 * complete 32-bit values despite the fact that the bitfields in the DHAR
429 * only represent bits 31-24 of the base and offset values.
431 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
432 u64 *hole_offset, u64 *hole_size)
434 struct amd64_pvt *pvt = mci->pvt_info;
437 /* only revE and later have the DRAM Hole Address Register */
438 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) {
439 debugf1(" revision %d for node %d does not support DHAR\n",
440 pvt->ext_model, pvt->mc_node_id);
444 /* only valid for Fam10h */
445 if (boot_cpu_data.x86 == 0x10 &&
446 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
447 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
451 if ((pvt->dhar & DHAR_VALID) == 0) {
452 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
457 /* This node has Memory Hoisting */
459 /* +------------------+--------------------+--------------------+-----
460 * | memory | DRAM hole | relocated |
461 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
463 * | | | [0x100000000, |
464 * | | | (0x100000000+ |
465 * | | | (0xffffffff-x))] |
466 * +------------------+--------------------+--------------------+-----
468 * Above is a diagram of physical memory showing the DRAM hole and the
469 * relocated addresses from the DRAM hole. As shown, the DRAM hole
470 * starts at address x (the base address) and extends through address
471 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
472 * addresses in the hole so that they start at 0x100000000.
475 base = dhar_base(pvt->dhar);
478 *hole_size = (0x1ull << 32) - base;
480 if (boot_cpu_data.x86 > 0xf)
481 *hole_offset = f10_dhar_offset(pvt->dhar);
483 *hole_offset = k8_dhar_offset(pvt->dhar);
485 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
486 pvt->mc_node_id, (unsigned long)*hole_base,
487 (unsigned long)*hole_offset, (unsigned long)*hole_size);
491 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
494 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
495 * assumed that sys_addr maps to the node given by mci.
497 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
498 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
499 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
500 * then it is also involved in translating a SysAddr to a DramAddr. Sections
501 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
502 * These parts of the documentation are unclear. I interpret them as follows:
504 * When node n receives a SysAddr, it processes the SysAddr as follows:
506 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
507 * Limit registers for node n. If the SysAddr is not within the range
508 * specified by the base and limit values, then node n ignores the Sysaddr
509 * (since it does not map to node n). Otherwise continue to step 2 below.
511 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
512 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
513 * the range of relocated addresses (starting at 0x100000000) from the DRAM
514 * hole. If not, skip to step 3 below. Else get the value of the
515 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
516 * offset defined by this value from the SysAddr.
518 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
519 * Base register for node n. To obtain the DramAddr, subtract the base
520 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
522 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
524 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
527 dram_base = get_dram_base(mci);
529 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
532 if ((sys_addr >= (1ull << 32)) &&
533 (sys_addr < ((1ull << 32) + hole_size))) {
534 /* use DHAR to translate SysAddr to DramAddr */
535 dram_addr = sys_addr - hole_offset;
537 debugf2("using DHAR to translate SysAddr 0x%lx to "
539 (unsigned long)sys_addr,
540 (unsigned long)dram_addr);
547 * Translate the SysAddr to a DramAddr as shown near the start of
548 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
549 * only deals with 40-bit values. Therefore we discard bits 63-40 of
550 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
551 * discard are all 1s. Otherwise the bits we discard are all 0s. See
552 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
553 * Programmer's Manual Volume 1 Application Programming.
555 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
557 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
558 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
559 (unsigned long)dram_addr);
564 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
565 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
566 * for node interleaving.
568 static int num_node_interleave_bits(unsigned intlv_en)
570 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
573 BUG_ON(intlv_en > 7);
574 n = intlv_shift_table[intlv_en];
578 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
579 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
581 struct amd64_pvt *pvt;
588 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
589 * concerning translating a DramAddr to an InputAddr.
591 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
592 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
595 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
596 intlv_shift, (unsigned long)dram_addr,
597 (unsigned long)input_addr);
603 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
604 * assumed that @sys_addr maps to the node given by mci.
606 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
611 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
613 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
614 (unsigned long)sys_addr, (unsigned long)input_addr);
621 * @input_addr is an InputAddr associated with the node represented by mci.
622 * Translate @input_addr to a DramAddr and return the result.
624 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
626 struct amd64_pvt *pvt;
627 int node_id, intlv_shift;
632 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
633 * shows how to translate a DramAddr to an InputAddr. Here we reverse
634 * this procedure. When translating from a DramAddr to an InputAddr, the
635 * bits used for node interleaving are discarded. Here we recover these
636 * bits from the IntlvSel field of the DRAM Limit register (section
637 * 3.4.4.2) for the node that input_addr is associated with.
640 node_id = pvt->mc_node_id;
641 BUG_ON((node_id < 0) || (node_id > 7));
643 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
645 if (intlv_shift == 0) {
646 debugf1(" InputAddr 0x%lx translates to DramAddr of "
647 "same value\n", (unsigned long)input_addr);
652 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
653 (input_addr & 0xfff);
655 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
656 dram_addr = bits + (intlv_sel << 12);
658 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
659 "(%d node interleave bits)\n", (unsigned long)input_addr,
660 (unsigned long)dram_addr, intlv_shift);
666 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
667 * @dram_addr to a SysAddr.
669 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
671 struct amd64_pvt *pvt = mci->pvt_info;
672 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
675 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
678 if ((dram_addr >= hole_base) &&
679 (dram_addr < (hole_base + hole_size))) {
680 sys_addr = dram_addr + hole_offset;
682 debugf1("using DHAR to translate DramAddr 0x%lx to "
683 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
684 (unsigned long)sys_addr);
690 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
691 sys_addr = dram_addr + base;
694 * The sys_addr we have computed up to this point is a 40-bit value
695 * because the k8 deals with 40-bit values. However, the value we are
696 * supposed to return is a full 64-bit physical address. The AMD
697 * x86-64 architecture specifies that the most significant implemented
698 * address bit through bit 63 of a physical address must be either all
699 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
700 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
701 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
704 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
706 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
707 pvt->mc_node_id, (unsigned long)dram_addr,
708 (unsigned long)sys_addr);
714 * @input_addr is an InputAddr associated with the node given by mci. Translate
715 * @input_addr to a SysAddr.
717 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
720 return dram_addr_to_sys_addr(mci,
721 input_addr_to_dram_addr(mci, input_addr));
725 * Find the minimum and maximum InputAddr values that map to the given @csrow.
726 * Pass back these values in *input_addr_min and *input_addr_max.
728 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
729 u64 *input_addr_min, u64 *input_addr_max)
731 struct amd64_pvt *pvt;
735 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
737 base = base_from_dct_base(pvt, csrow);
738 mask = mask_from_dct_mask(pvt, csrow);
740 *input_addr_min = base & ~mask;
741 *input_addr_max = base | mask | pvt->dcs_mask_notused;
745 * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
746 * Address High (section 3.6.4.6) register values and return the result. Address
747 * is located in the info structure (nbeah and nbeal), the encoding is device
750 static u64 extract_error_address(struct mem_ctl_info *mci,
751 struct err_regs *info)
753 struct amd64_pvt *pvt = mci->pvt_info;
755 return pvt->ops->get_error_address(mci, info);
759 /* Map the Error address to a PAGE and PAGE OFFSET. */
760 static inline void error_address_to_page_and_offset(u64 error_address,
761 u32 *page, u32 *offset)
763 *page = (u32) (error_address >> PAGE_SHIFT);
764 *offset = ((u32) error_address) & ~PAGE_MASK;
768 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
769 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
770 * of a node that detected an ECC memory error. mci represents the node that
771 * the error address maps to (possibly different from the node that detected
772 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
775 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
779 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
782 amd64_mc_printk(mci, KERN_ERR,
783 "Failed to translate InputAddr to csrow for "
784 "address 0x%lx\n", (unsigned long)sys_addr);
788 static int get_channel_from_ecc_syndrome(unsigned short syndrome);
790 static void amd64_cpu_display_info(struct amd64_pvt *pvt)
792 if (boot_cpu_data.x86 == 0x11)
793 edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
794 else if (boot_cpu_data.x86 == 0x10)
795 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
796 else if (boot_cpu_data.x86 == 0xf)
797 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
798 (pvt->ext_model >= OPTERON_CPU_REV_F) ?
799 "Rev F or later" : "Rev E or earlier");
801 /* we'll hardly ever ever get here */
802 edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
806 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
809 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
812 enum dev_type edac_cap = EDAC_FLAG_NONE;
814 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F)
818 if (pvt->dclr0 & BIT(bit))
819 edac_cap = EDAC_FLAG_SECDED;
825 static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
828 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
830 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
832 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
833 (dclr & BIT(16)) ? "un" : "",
834 (dclr & BIT(19)) ? "yes" : "no");
836 debugf1(" PAR/ERR parity: %s\n",
837 (dclr & BIT(8)) ? "enabled" : "disabled");
839 debugf1(" DCT 128bit mode width: %s\n",
840 (dclr & BIT(11)) ? "128b" : "64b");
842 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
843 (dclr & BIT(12)) ? "yes" : "no",
844 (dclr & BIT(13)) ? "yes" : "no",
845 (dclr & BIT(14)) ? "yes" : "no",
846 (dclr & BIT(15)) ? "yes" : "no");
849 /* Display and decode various NB registers for debug purposes. */
850 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
854 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
856 debugf1(" NB two channel DRAM capable: %s\n",
857 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
859 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
860 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
861 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
863 amd64_dump_dramcfg_low(pvt->dclr0, 0);
865 debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare);
867 if (boot_cpu_data.x86 == 0xf) {
868 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
869 pvt->dhar, dhar_base(pvt->dhar),
870 k8_dhar_offset(pvt->dhar));
871 debugf1(" DramHoleValid=%s\n",
872 (pvt->dhar & DHAR_VALID) ? "True" : "False");
874 debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0);
876 /* everything below this point is Fam10h and above */
880 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
881 pvt->dhar, dhar_base(pvt->dhar),
882 f10_dhar_offset(pvt->dhar));
883 debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
884 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
886 (pvt->dhar & DHAR_VALID) ?
890 /* Only if NOT ganged does dcl1 have valid info */
891 if (!dct_ganging_enabled(pvt))
892 amd64_dump_dramcfg_low(pvt->dclr1, 1);
895 * Determine if ganged and then dump memory sizes for first controller,
896 * and if NOT ganged dump info for 2nd controller.
898 ganged = dct_ganging_enabled(pvt);
900 f10_debug_display_dimm_sizes(0, pvt, ganged);
903 f10_debug_display_dimm_sizes(1, pvt, ganged);
906 /* Read in both of DBAM registers */
907 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
909 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
911 if (boot_cpu_data.x86 >= 0x10)
912 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
916 * NOTE: CPU Revision Dependent code: Rev E and Rev F
918 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
919 * set the shift factor for the DCSB and DCSM values.
921 * ->dcs_mask_notused, RevE:
923 * To find the max InputAddr for the csrow, start with the base address and set
924 * all bits that are "don't care" bits in the test at the start of section
927 * The "don't care" bits are all set bits in the mask and all bits in the gaps
928 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
929 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
932 * ->dcs_mask_notused, RevF and later:
934 * To find the max InputAddr for the csrow, start with the base address and set
935 * all bits that are "don't care" bits in the test at the start of NPT section
938 * The "don't care" bits are all set bits in the mask and all bits in the gaps
939 * between bit ranges [36:27] and [21:13].
941 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
942 * which are all bits in the above-mentioned gaps.
944 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
947 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) {
948 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
949 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
950 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
951 pvt->dcs_shift = REV_E_DCS_SHIFT;
955 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
956 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
957 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
958 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
960 if (boot_cpu_data.x86 == 0x11) {
971 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
973 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
977 amd64_set_dct_base_and_mask(pvt);
979 for (cs = 0; cs < pvt->cs_count; cs++) {
980 reg = K8_DCSB0 + (cs * 4);
981 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
982 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
983 cs, pvt->dcsb0[cs], reg);
985 /* If DCT are NOT ganged, then read in DCT1's base */
986 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
987 reg = F10_DCSB1 + (cs * 4);
988 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
990 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
991 cs, pvt->dcsb1[cs], reg);
997 for (cs = 0; cs < pvt->num_dcsm; cs++) {
998 reg = K8_DCSM0 + (cs * 4);
999 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
1000 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
1001 cs, pvt->dcsm0[cs], reg);
1003 /* If DCT are NOT ganged, then read in DCT1's mask */
1004 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1005 reg = F10_DCSM1 + (cs * 4);
1006 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1008 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
1009 cs, pvt->dcsm1[cs], reg);
1016 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1020 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) {
1021 /* Rev F and later */
1022 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1024 /* Rev E and earlier */
1025 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1028 debugf1(" Memory type is: %s\n",
1029 (type == MEM_DDR2) ? "MEM_DDR2" :
1030 (type == MEM_RDDR2) ? "MEM_RDDR2" :
1031 (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
1037 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1038 * and the later RevF memory controllers (DDR vs DDR2)
1041 * number of memory channels in operation
1043 * contents of the DCL0_LOW register
1045 static int k8_early_channel_count(struct amd64_pvt *pvt)
1049 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1053 if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) {
1054 /* RevF (NPT) and later */
1055 flag = pvt->dclr0 & F10_WIDTH_128;
1057 /* RevE and earlier */
1058 flag = pvt->dclr0 & REVE_WIDTH_128;
1064 return (flag) ? 2 : 1;
1067 /* extract the ERROR ADDRESS for the K8 CPUs */
1068 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1069 struct err_regs *info)
1071 return (((u64) (info->nbeah & 0xff)) << 32) +
1072 (info->nbeal & ~0x03);
1076 * Read the Base and Limit registers for K8 based Memory controllers; extract
1077 * fields from the 'raw' reg into separate data fields
1079 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1081 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1084 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1086 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1088 /* Extract parts into separate data entries */
1089 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1090 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1091 pvt->dram_rw_en[dram] = (low & 0x3);
1093 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1096 * Extract parts into separate data entries. Limit is the HIGHEST memory
1097 * location of the region, so lower 24 bits need to be all ones
1099 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1100 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1101 pvt->dram_DstNode[dram] = (low & 0x7);
1104 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1105 struct err_regs *info,
1108 struct mem_ctl_info *src_mci;
1109 unsigned short syndrome;
1113 /* Extract the syndrome parts and form a 16-bit syndrome */
1114 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1115 syndrome |= LOW_SYNDROME(info->nbsh);
1117 /* CHIPKILL enabled */
1118 if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1119 channel = get_channel_from_ecc_syndrome(syndrome);
1122 * Syndrome didn't map, so we don't know which of the
1123 * 2 DIMMs is in error. So we need to ID 'both' of them
1126 amd64_mc_printk(mci, KERN_WARNING,
1127 "unknown syndrome 0x%x - possible error "
1128 "reporting race\n", syndrome);
1129 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1134 * non-chipkill ecc mode
1136 * The k8 documentation is unclear about how to determine the
1137 * channel number when using non-chipkill memory. This method
1138 * was obtained from email communication with someone at AMD.
1139 * (Wish the email was placed in this comment - norsk)
1141 channel = ((SystemAddress & BIT(3)) != 0);
1145 * Find out which node the error address belongs to. This may be
1146 * different from the node that detected the error.
1148 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
1150 amd64_mc_printk(mci, KERN_ERR,
1151 "failed to map error address 0x%lx to a node\n",
1152 (unsigned long)SystemAddress);
1153 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1157 /* Now map the SystemAddress to a CSROW */
1158 csrow = sys_addr_to_csrow(src_mci, SystemAddress);
1160 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1162 error_address_to_page_and_offset(SystemAddress, &page, &offset);
1164 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1165 channel, EDAC_MOD_STR);
1170 * determrine the number of PAGES in for this DIMM's size based on its DRAM
1173 * First step is to calc the number of bits to shift a value of 1 left to
1174 * indicate show many pages. Start with the DBAM value as the starting bits,
1175 * then proceed to adjust those shift bits, based on CPU rev and the table.
1176 * See BKDG on the DBAM
1178 static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1182 if (pvt->ext_model >= OPTERON_CPU_REV_F) {
1183 nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1186 * RevE and less section; this line is tricky. It collapses the
1187 * table used by RevD and later to one that matches revisions CG
1190 dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
1191 (dram_map > 8 ? 4 : (dram_map > 5 ?
1192 3 : (dram_map > 2 ? 1 : 0))) : 0;
1194 /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
1195 nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
1202 * Get the number of DCT channels in use.
1205 * number of Memory Channels in operation
1207 * contents of the DCL0_LOW register
1209 static int f10_early_channel_count(struct amd64_pvt *pvt)
1211 int dbams[] = { DBAM0, DBAM1 };
1212 int i, j, channels = 0;
1215 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0))
1218 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1))
1221 /* If we are in 128 bit mode, then we are using 2 channels */
1222 if (pvt->dclr0 & F10_WIDTH_128) {
1223 debugf0("Data WIDTH is 128 bits - 2 channels\n");
1229 * Need to check if in UN-ganged mode: In such, there are 2 channels,
1230 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit
1233 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1234 * their CSEnable bit on. If so, then SINGLE DIMM case.
1236 debugf0("Data WIDTH is NOT 128 bits - need more decoding\n");
1239 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1240 * is more than just one DIMM present in unganged mode. Need to check
1241 * both controllers since DIMMs can be placed in either one.
1243 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1244 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1247 for (j = 0; j < 4; j++) {
1248 if (DBAM_DIMM(j, dbam) > 0) {
1255 debugf0("MCT channel count: %d\n", channels);
1264 static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1266 return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT);
1269 /* Enable extended configuration access via 0xCF8 feature */
1270 static void amd64_setup(struct amd64_pvt *pvt)
1274 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1276 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1277 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1278 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1281 /* Restore the extended configuration access via 0xCF8 feature */
1282 static void amd64_teardown(struct amd64_pvt *pvt)
1286 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1288 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1289 if (pvt->flags.cf8_extcfg)
1290 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1291 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1294 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1295 struct err_regs *info)
1297 return (((u64) (info->nbeah & 0xffff)) << 32) +
1298 (info->nbeal & ~0x01);
1302 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1303 * fields from the 'raw' reg into separate data fields.
1305 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1307 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1309 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1311 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1312 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1314 /* read the 'raw' DRAM BASE Address register */
1315 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1317 /* Read from the ECS data register */
1318 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1320 /* Extract parts into separate data entries */
1321 pvt->dram_rw_en[dram] = (low_base & 0x3);
1323 if (pvt->dram_rw_en[dram] == 0)
1326 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1328 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1329 (((u64)low_base & 0xFFFF0000) << 8);
1331 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1332 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1334 /* read the 'raw' LIMIT registers */
1335 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1337 /* Read from the ECS data register for the HIGH portion */
1338 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1340 debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
1341 high_base, low_base, high_limit, low_limit);
1343 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1344 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1347 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1348 * memory location of the region, so low 24 bits need to be all ones.
1350 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1351 (((u64) low_limit & 0xFFFF0000) << 8) |
1355 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1358 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1359 &pvt->dram_ctl_select_low)) {
1360 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1361 "High range addresses at: 0x%x\n",
1362 pvt->dram_ctl_select_low,
1363 dct_sel_baseaddr(pvt));
1365 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1366 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1367 (dct_dram_enabled(pvt) ? "yes" : "no"));
1369 if (!dct_ganging_enabled(pvt))
1370 debugf0(" Address range split per DCT: %s\n",
1371 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1373 debugf0(" DCT data interleave for ECC: %s, "
1374 "DRAM cleared since last warm reset: %s\n",
1375 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1376 (dct_memory_cleared(pvt) ? "yes" : "no"));
1378 debugf0(" DCT channel interleave: %s, "
1379 "DCT interleave bits selector: 0x%x\n",
1380 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1381 dct_sel_interleave_addr(pvt));
1384 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1385 &pvt->dram_ctl_select_high);
1389 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1390 * Interleaving Modes.
1392 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1393 int hi_range_sel, u32 intlv_en)
1395 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1397 if (dct_ganging_enabled(pvt))
1399 else if (hi_range_sel)
1401 else if (dct_interleave_enabled(pvt)) {
1403 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1405 if (dct_sel_interleave_addr(pvt) == 0)
1406 cs = sys_addr >> 6 & 1;
1407 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1408 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1410 if (dct_sel_interleave_addr(pvt) & 1)
1411 cs = (sys_addr >> 9 & 1) ^ temp;
1413 cs = (sys_addr >> 6 & 1) ^ temp;
1414 } else if (intlv_en & 4)
1415 cs = sys_addr >> 15 & 1;
1416 else if (intlv_en & 2)
1417 cs = sys_addr >> 14 & 1;
1418 else if (intlv_en & 1)
1419 cs = sys_addr >> 13 & 1;
1421 cs = sys_addr >> 12 & 1;
1422 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1423 cs = ~dct_sel_high & 1;
1430 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1434 else if (intlv_en == 3)
1436 else if (intlv_en == 7)
1442 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1443 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1444 u32 dct_sel_base_addr,
1445 u64 dct_sel_base_off,
1446 u32 hole_valid, u32 hole_off,
1452 if (!(dct_sel_base_addr & 0xFFFFF800) &&
1453 hole_valid && (sys_addr >= 0x100000000ULL))
1454 chan_off = hole_off << 16;
1456 chan_off = dct_sel_base_off;
1458 if (hole_valid && (sys_addr >= 0x100000000ULL))
1459 chan_off = hole_off << 16;
1461 chan_off = dram_base & 0xFFFFF8000000ULL;
1464 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1465 (chan_off & 0x0000FFFFFF800000ULL);
1468 /* Hack for the time being - Can we get this from BIOS?? */
1469 #define CH0SPARE_RANK 0
1470 #define CH1SPARE_RANK 1
1473 * checks if the csrow passed in is marked as SPARED, if so returns the new
1476 static inline int f10_process_possible_spare(int csrow,
1477 u32 cs, struct amd64_pvt *pvt)
1482 /* Depending on channel, isolate respective SPARING info */
1484 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1485 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1486 if (swap_done && (csrow == bad_dram_cs))
1487 csrow = CH1SPARE_RANK;
1489 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1490 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1491 if (swap_done && (csrow == bad_dram_cs))
1492 csrow = CH0SPARE_RANK;
1498 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1499 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1502 * -EINVAL: NOT FOUND
1503 * 0..csrow = Chip-Select Row
1505 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1507 struct mem_ctl_info *mci;
1508 struct amd64_pvt *pvt;
1509 u32 cs_base, cs_mask;
1510 int cs_found = -EINVAL;
1513 mci = mci_lookup[nid];
1517 pvt = mci->pvt_info;
1519 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1521 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1523 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1524 if (!(cs_base & K8_DCSB_CS_ENABLE))
1528 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1529 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1530 * of the actual address.
1532 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1535 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1536 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1538 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1540 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1541 csrow, cs_base, cs_mask);
1543 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1545 debugf1(" Final CSMask=0x%x\n", cs_mask);
1546 debugf1(" (InputAddr & ~CSMask)=0x%x "
1547 "(CSBase & ~CSMask)=0x%x\n",
1548 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1550 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1551 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1553 debugf1(" MATCH csrow=%d\n", cs_found);
1560 /* For a given @dram_range, check if @sys_addr falls within it. */
1561 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1562 u64 sys_addr, int *nid, int *chan_sel)
1564 int node_id, cs_found = -EINVAL, high_range = 0;
1565 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1566 u32 hole_valid, tmp, dct_sel_base, channel;
1567 u64 dram_base, chan_addr, dct_sel_base_off;
1569 dram_base = pvt->dram_base[dram_range];
1570 intlv_en = pvt->dram_IntlvEn[dram_range];
1572 node_id = pvt->dram_DstNode[dram_range];
1573 intlv_sel = pvt->dram_IntlvSel[dram_range];
1575 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1576 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1579 * This assumes that one node's DHAR is the same as all the other
1582 hole_off = (pvt->dhar & 0x0000FF80);
1583 hole_valid = (pvt->dhar & 0x1);
1584 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1586 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1587 hole_off, hole_valid, intlv_sel);
1590 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1593 dct_sel_base = dct_sel_baseaddr(pvt);
1596 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1597 * select between DCT0 and DCT1.
1599 if (dct_high_range_enabled(pvt) &&
1600 !dct_ganging_enabled(pvt) &&
1601 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1604 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1606 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1607 dct_sel_base_off, hole_valid,
1608 hole_off, dram_base);
1610 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1612 /* remove Node ID (in case of memory interleaving) */
1613 tmp = chan_addr & 0xFC0;
1615 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1617 /* remove channel interleave and hash */
1618 if (dct_interleave_enabled(pvt) &&
1619 !dct_high_range_enabled(pvt) &&
1620 !dct_ganging_enabled(pvt)) {
1621 if (dct_sel_interleave_addr(pvt) != 1)
1622 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1624 tmp = chan_addr & 0xFC0;
1625 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1630 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1631 chan_addr, (u32)(chan_addr >> 8));
1633 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1635 if (cs_found >= 0) {
1637 *chan_sel = channel;
1642 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1643 int *node, int *chan_sel)
1645 int dram_range, cs_found = -EINVAL;
1646 u64 dram_base, dram_limit;
1648 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1650 if (!pvt->dram_rw_en[dram_range])
1653 dram_base = pvt->dram_base[dram_range];
1654 dram_limit = pvt->dram_limit[dram_range];
1656 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1658 cs_found = f10_match_to_this_node(pvt, dram_range,
1669 * This the F10h reference code from AMD to map a @sys_addr to NodeID,
1672 * The @sys_addr is usually an error address received from the hardware.
1674 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1675 struct err_regs *info,
1678 struct amd64_pvt *pvt = mci->pvt_info;
1680 unsigned short syndrome;
1681 int nid, csrow, chan = 0;
1683 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1686 error_address_to_page_and_offset(sys_addr, &page, &offset);
1688 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1689 syndrome |= LOW_SYNDROME(info->nbsh);
1692 * Is CHIPKILL on? If so, then we can attempt to use the
1693 * syndrome to isolate which channel the error was on.
1695 if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
1696 chan = get_channel_from_ecc_syndrome(syndrome);
1699 edac_mc_handle_ce(mci, page, offset, syndrome,
1700 csrow, chan, EDAC_MOD_STR);
1703 * Channel unknown, report all channels on this
1706 for (chan = 0; chan < mci->csrows[csrow].nr_channels;
1708 edac_mc_handle_ce(mci, page, offset,
1716 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1721 * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
1722 * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
1723 * indicates an empty DIMM slot, as reported by Hardware on empty slots.
1725 * Normalize to 128MB by subracting 27 bit shift.
1727 static int map_dbam_to_csrow_size(int index)
1731 if (index > 0 && index <= DBAM_MAX_VALUE)
1732 mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27)));
1738 * debug routine to display the memory sizes of a DIMM (ganged or not) and it
1741 static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt,
1744 int dimm, size0, size1;
1748 debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl,
1749 ctrl ? pvt->dbam1 : pvt->dbam0,
1750 ganged ? "GANGED - dbam1 not used" : "NON-GANGED");
1752 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1753 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1755 /* Dump memory sizes for DIMM and its CSROWs */
1756 for (dimm = 0; dimm < 4; dimm++) {
1759 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1760 size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1763 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1764 size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam));
1766 debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB "
1779 * Very early hardware probe on pci_probe thread to determine if this module
1780 * supports the hardware.
1786 static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
1791 * If we are on a DDR3 machine, we don't know yet if
1792 * we support that properly at this time
1794 if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
1795 (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
1797 amd64_printk(KERN_WARNING,
1798 "%s() This machine is running with DDR3 memory. "
1799 "This is not currently supported. "
1800 "DCHR0=0x%x DCHR1=0x%x\n",
1801 __func__, pvt->dchr0, pvt->dchr1);
1803 amd64_printk(KERN_WARNING,
1804 " Contact '%s' module MAINTAINER to help add"
1815 * There currently are 3 types type of MC devices for AMD Athlon/Opterons
1816 * (as per PCI DEVICE_IDs):
1818 * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
1819 * DEVICE ID, even though there is differences between the different Revisions
1822 * Family F10h and F11h.
1825 static struct amd64_family_type amd64_family_types[] = {
1828 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1829 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1831 .early_channel_count = k8_early_channel_count,
1832 .get_error_address = k8_get_error_address,
1833 .read_dram_base_limit = k8_read_dram_base_limit,
1834 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1835 .dbam_map_to_pages = k8_dbam_map_to_pages,
1839 .ctl_name = "Family 10h",
1840 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1841 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1843 .probe_valid_hardware = f10_probe_valid_hardware,
1844 .early_channel_count = f10_early_channel_count,
1845 .get_error_address = f10_get_error_address,
1846 .read_dram_base_limit = f10_read_dram_base_limit,
1847 .read_dram_ctl_register = f10_read_dram_ctl_register,
1848 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1849 .dbam_map_to_pages = f10_dbam_map_to_pages,
1853 .ctl_name = "Family 11h",
1854 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1855 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1857 .probe_valid_hardware = f10_probe_valid_hardware,
1858 .early_channel_count = f10_early_channel_count,
1859 .get_error_address = f10_get_error_address,
1860 .read_dram_base_limit = f10_read_dram_base_limit,
1861 .read_dram_ctl_register = f10_read_dram_ctl_register,
1862 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1863 .dbam_map_to_pages = f10_dbam_map_to_pages,
1868 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1869 unsigned int device,
1870 struct pci_dev *related)
1872 struct pci_dev *dev = NULL;
1874 dev = pci_get_device(vendor, device, dev);
1876 if ((dev->bus->number == related->bus->number) &&
1877 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1879 dev = pci_get_device(vendor, device, dev);
1886 * syndrome mapping table for ECC ChipKill devices
1888 * The comment in each row is the token (nibble) number that is in error.
1889 * The least significant nibble of the syndrome is the mask for the bits
1890 * that are in error (need to be toggled) for the particular nibble.
1892 * Each row contains 16 entries.
1893 * The first entry (0th) is the channel number for that row of syndromes.
1894 * The remaining 15 entries are the syndromes for the respective Error
1897 * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
1899 * The 2nd index entry is 0x0010 that the second bit is damaged.
1900 * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
1902 * Thus so on until index 15, 0x1111, whose entry has the syndrome
1903 * indicating that all 4 bits are damaged.
1905 * A search is performed on this table looking for a given syndrome.
1907 * See the AMD documentation for ECC syndromes. This ECC table is valid
1908 * across all the versions of the AMD64 processors.
1910 * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
1911 * COLUMN index, then search all ROWS of that column, looking for a match
1912 * with the input syndrome. The ROW value will be the token number.
1914 * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
1917 #define NUMBER_ECC_ROWS 36
1918 static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = {
1919 /* Channel 0 syndromes */
1920 {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57,
1921 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df },
1922 {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7,
1923 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f },
1924 {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
1925 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f },
1926 {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057,
1927 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df },
1928 {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097,
1929 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f },
1930 {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857,
1931 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf },
1932 {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467,
1933 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f },
1934 {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27,
1935 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff },
1936 {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177,
1937 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f },
1938 {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07,
1939 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f },
1940 {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07,
1941 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f },
1942 {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7,
1943 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f },
1944 {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87,
1945 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f },
1946 {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067,
1947 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f },
1948 {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77,
1949 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f },
1950 {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77,
1951 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f },
1953 /* Channel 1 syndromes */
1954 {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
1955 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
1956 {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
1957 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
1958 {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
1959 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
1960 {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
1961 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
1962 {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
1963 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
1964 {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
1965 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
1966 {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
1967 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
1968 {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
1969 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
1970 {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
1971 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
1972 {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
1973 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
1974 {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
1975 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
1976 {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
1977 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
1978 {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
1979 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
1980 {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
1981 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
1982 {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
1983 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
1984 {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
1985 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
1987 /* ECC bits are also in the set of tokens and they too can go bad
1988 * first 2 cover channel 0, while the second 2 cover channel 1
1990 {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
1991 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
1992 {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
1993 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
1994 {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
1995 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
1996 {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
1997 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
2001 * Given the syndrome argument, scan each of the channel tables for a syndrome
2002 * match. Depending on which table it is found, return the channel number.
2004 static int get_channel_from_ecc_syndrome(unsigned short syndrome)
2009 /* Determine column to scan */
2010 column = syndrome & 0xF;
2012 /* Scan all rows, looking for syndrome, or end of table */
2013 for (row = 0; row < NUMBER_ECC_ROWS; row++) {
2014 if (ecc_chipkill_syndromes[row][column] == syndrome)
2015 return ecc_chipkill_syndromes[row][0];
2018 debugf0("syndrome(%x) not found\n", syndrome);
2023 * Check for valid error in the NB Status High register. If so, proceed to read
2024 * NB Status Low, NB Address Low and NB Address High registers and store data
2025 * into error structure.
2028 * - 1: if hardware regs contains valid error info
2029 * - 0: if no valid error is indicated
2031 static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
2032 struct err_regs *regs)
2034 struct amd64_pvt *pvt;
2035 struct pci_dev *misc_f3_ctl;
2037 pvt = mci->pvt_info;
2038 misc_f3_ctl = pvt->misc_f3_ctl;
2040 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, ®s->nbsh))
2043 if (!(regs->nbsh & K8_NBSH_VALID_BIT))
2046 /* valid error, read remaining error information registers */
2047 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, ®s->nbsl) ||
2048 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, ®s->nbeal) ||
2049 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, ®s->nbeah) ||
2050 amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, ®s->nbcfg))
2057 * This function is called to retrieve the error data from hardware and store it
2058 * in the info structure.
2061 * - 1: if a valid error is found
2062 * - 0: if no error is found
2064 static int amd64_get_error_info(struct mem_ctl_info *mci,
2065 struct err_regs *info)
2067 struct amd64_pvt *pvt;
2068 struct err_regs regs;
2070 pvt = mci->pvt_info;
2072 if (!amd64_get_error_info_regs(mci, info))
2076 * Here's the problem with the K8's EDAC reporting: There are four
2077 * registers which report pieces of error information. They are shared
2078 * between CEs and UEs. Furthermore, contrary to what is stated in the
2079 * BKDG, the overflow bit is never used! Every error always updates the
2080 * reporting registers.
2082 * Can you see the race condition? All four error reporting registers
2083 * must be read before a new error updates them! There is no way to read
2084 * all four registers atomically. The best than can be done is to detect
2085 * that a race has occured and then report the error without any kind of
2088 * What is still positive is that errors are still reported and thus
2089 * problems can still be detected - just not localized because the
2090 * syndrome and address are spread out across registers.
2092 * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev.
2093 * UEs and CEs should have separate register sets with proper overflow
2094 * bits that are used! At very least the problem can be fixed by
2095 * honoring the ErrValid bit in 'nbsh' and not updating registers - just
2096 * set the overflow bit - unless the current error is CE and the new
2097 * error is UE which would be the only situation for overwriting the
2103 /* Use info from the second read - most current */
2104 if (unlikely(!amd64_get_error_info_regs(mci, info)))
2107 /* clear the error bits in hardware */
2108 pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
2110 /* Check for the possible race condition */
2111 if ((regs.nbsh != info->nbsh) ||
2112 (regs.nbsl != info->nbsl) ||
2113 (regs.nbeah != info->nbeah) ||
2114 (regs.nbeal != info->nbeal)) {
2115 amd64_mc_printk(mci, KERN_WARNING,
2116 "hardware STATUS read access race condition "
2124 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
2125 * ADDRESS and process.
2127 static void amd64_handle_ce(struct mem_ctl_info *mci,
2128 struct err_regs *info)
2130 struct amd64_pvt *pvt = mci->pvt_info;
2133 /* Ensure that the Error Address is VALID */
2134 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2135 amd64_mc_printk(mci, KERN_ERR,
2136 "HW has no ERROR_ADDRESS available\n");
2137 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
2141 SystemAddress = extract_error_address(mci, info);
2143 amd64_mc_printk(mci, KERN_ERR,
2144 "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress);
2146 pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress);
2149 /* Handle any Un-correctable Errors (UEs) */
2150 static void amd64_handle_ue(struct mem_ctl_info *mci,
2151 struct err_regs *info)
2156 struct mem_ctl_info *log_mci, *src_mci = NULL;
2160 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2161 amd64_mc_printk(mci, KERN_CRIT,
2162 "HW has no ERROR_ADDRESS available\n");
2163 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2167 SystemAddress = extract_error_address(mci, info);
2170 * Find out which node the error address belongs to. This may be
2171 * different from the node that detected the error.
2173 src_mci = find_mc_by_sys_addr(mci, SystemAddress);
2175 amd64_mc_printk(mci, KERN_CRIT,
2176 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
2177 (unsigned long)SystemAddress);
2178 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2184 csrow = sys_addr_to_csrow(log_mci, SystemAddress);
2186 amd64_mc_printk(mci, KERN_CRIT,
2187 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
2188 (unsigned long)SystemAddress);
2189 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2191 error_address_to_page_and_offset(SystemAddress, &page, &offset);
2192 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
2196 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2197 struct err_regs *info)
2199 u32 ec = ERROR_CODE(info->nbsl);
2200 u32 xec = EXT_ERROR_CODE(info->nbsl);
2201 int ecc_type = (info->nbsh >> 13) & 0x3;
2203 /* Bail early out if this was an 'observed' error */
2204 if (PP(ec) == K8_NBSL_PP_OBS)
2207 /* Do only ECC errors */
2208 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2212 amd64_handle_ce(mci, info);
2213 else if (ecc_type == 1)
2214 amd64_handle_ue(mci, info);
2217 * If main error is CE then overflow must be CE. If main error is UE
2218 * then overflow is unknown. We'll call the overflow a CE - if
2219 * panic_on_ue is set then we're already panic'ed and won't arrive
2220 * here. Else, then apparently someone doesn't think that UE's are
2223 if (info->nbsh & K8_NBSH_OVERFLOW)
2224 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow");
2227 void amd64_decode_bus_error(int node_id, struct err_regs *regs)
2229 struct mem_ctl_info *mci = mci_lookup[node_id];
2231 __amd64_decode_bus_error(mci, regs);
2234 * Check the UE bit of the NB status high register, if set generate some
2235 * logs. If NOT a GART error, then process the event as a NO-INFO event.
2236 * If it was a GART error, skip that process.
2238 * FIXME: this should go somewhere else, if at all.
2240 if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
2241 edac_mc_handle_ue_no_info(mci, "UE bit is set");
2246 * The main polling 'check' function, called FROM the edac core to perform the
2247 * error checking and if an error is encountered, error processing.
2249 static void amd64_check(struct mem_ctl_info *mci)
2251 struct err_regs regs;
2253 if (amd64_get_error_info(mci, ®s)) {
2254 struct amd64_pvt *pvt = mci->pvt_info;
2255 amd_decode_nb_mce(pvt->mc_node_id, ®s, 1);
2261 * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2262 * 2) AMD Family index value
2265 * Upon return of 0, the following filled in:
2267 * struct pvt->addr_f1_ctl
2268 * struct pvt->misc_f3_ctl
2270 * Filled in with related device funcitions of 'dram_f2_ctl'
2271 * These devices are "reserved" via the pci_get_device()
2273 * Upon return of 1 (error status):
2277 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2279 const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
2281 /* Reserve the ADDRESS MAP Device */
2282 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2283 amd64_dev->addr_f1_ctl,
2286 if (!pvt->addr_f1_ctl) {
2287 amd64_printk(KERN_ERR, "error address map device not found: "
2288 "vendor %x device 0x%x (broken BIOS?)\n",
2289 PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
2293 /* Reserve the MISC Device */
2294 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2295 amd64_dev->misc_f3_ctl,
2298 if (!pvt->misc_f3_ctl) {
2299 pci_dev_put(pvt->addr_f1_ctl);
2300 pvt->addr_f1_ctl = NULL;
2302 amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2303 "vendor %x device 0x%x (broken BIOS?)\n",
2304 PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
2308 debugf1(" Addr Map device PCI Bus ID:\t%s\n",
2309 pci_name(pvt->addr_f1_ctl));
2310 debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
2311 pci_name(pvt->dram_f2_ctl));
2312 debugf1(" Misc device PCI Bus ID:\t%s\n",
2313 pci_name(pvt->misc_f3_ctl));
2318 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2320 pci_dev_put(pvt->addr_f1_ctl);
2321 pci_dev_put(pvt->misc_f3_ctl);
2325 * Retrieve the hardware registers of the memory controller (this includes the
2326 * 'Address Map' and 'Misc' device regs)
2328 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2334 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2335 * those are Read-As-Zero
2337 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2338 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2340 /* check first whether TOP_MEM2 is enabled */
2341 rdmsrl(MSR_K8_SYSCFG, msr_val);
2342 if (msr_val & (1U << 21)) {
2343 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2344 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2346 debugf0(" TOP_MEM2 disabled.\n");
2348 amd64_cpu_display_info(pvt);
2350 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2352 if (pvt->ops->read_dram_ctl_register)
2353 pvt->ops->read_dram_ctl_register(pvt);
2355 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2357 * Call CPU specific READ function to get the DRAM Base and
2358 * Limit values from the DCT.
2360 pvt->ops->read_dram_base_limit(pvt, dram);
2363 * Only print out debug info on rows with both R and W Enabled.
2364 * Normal processing, compiler should optimize this whole 'if'
2365 * debug output block away.
2367 if (pvt->dram_rw_en[dram] != 0) {
2368 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2369 "DRAM-LIMIT: 0x%016llx\n",
2371 pvt->dram_base[dram],
2372 pvt->dram_limit[dram]);
2374 debugf1(" IntlvEn=%s %s %s "
2375 "IntlvSel=%d DstNode=%d\n",
2376 pvt->dram_IntlvEn[dram] ?
2377 "Enabled" : "Disabled",
2378 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2379 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2380 pvt->dram_IntlvSel[dram],
2381 pvt->dram_DstNode[dram]);
2385 amd64_read_dct_base_mask(pvt);
2387 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2388 amd64_read_dbam_reg(pvt);
2390 amd64_read_pci_cfg(pvt->misc_f3_ctl,
2391 F10_ONLINE_SPARE, &pvt->online_spare);
2393 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2394 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2396 if (!dct_ganging_enabled(pvt)) {
2397 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
2398 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
2400 amd64_dump_misc_regs(pvt);
2404 * NOTE: CPU Revision Dependent code
2407 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2408 * k8 private pointer to -->
2409 * DRAM Bank Address mapping register
2411 * DCL register where dual_channel_active is
2413 * The DBAM register consists of 4 sets of 4 bits each definitions:
2416 * 0-3 CSROWs 0 and 1
2417 * 4-7 CSROWs 2 and 3
2418 * 8-11 CSROWs 4 and 5
2419 * 12-15 CSROWs 6 and 7
2421 * Values range from: 0 to 15
2422 * The meaning of the values depends on CPU revision and dual-channel state,
2423 * see relevant BKDG more info.
2425 * The memory controller provides for total of only 8 CSROWs in its current
2426 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2427 * single channel or two (2) DIMMs in dual channel mode.
2429 * The following code logic collapses the various tables for CSROW based on CPU
2433 * The number of PAGE_SIZE pages on the specified CSROW number it
2437 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2439 u32 dram_map, nr_pages;
2442 * The math on this doesn't look right on the surface because x/2*4 can
2443 * be simplified to x*2 but this expression makes use of the fact that
2444 * it is integral math where 1/2=0. This intermediate value becomes the
2445 * number of bits to shift the DBAM register to extract the proper CSROW
2448 dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2450 nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map);
2453 * If dual channel then double the memory size of single channel.
2454 * Channel count is 1 or 2
2456 nr_pages <<= (pvt->channel_count - 1);
2458 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map);
2459 debugf0(" nr_pages= %u channel-count = %d\n",
2460 nr_pages, pvt->channel_count);
2466 * Initialize the array of csrow attribute instances, based on the values
2467 * from pci config hardware registers.
2469 static int amd64_init_csrows(struct mem_ctl_info *mci)
2471 struct csrow_info *csrow;
2472 struct amd64_pvt *pvt;
2473 u64 input_addr_min, input_addr_max, sys_addr;
2476 pvt = mci->pvt_info;
2478 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2480 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2481 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2482 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2485 for (i = 0; i < pvt->cs_count; i++) {
2486 csrow = &mci->csrows[i];
2488 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2489 debugf1("----CSROW %d EMPTY for node %d\n", i,
2494 debugf1("----CSROW %d VALID for MC node %d\n",
2495 i, pvt->mc_node_id);
2498 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2499 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2500 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2501 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2502 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2503 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2504 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2505 /* 8 bytes of resolution */
2507 csrow->mtype = amd64_determine_memory_type(pvt);
2509 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2510 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2511 (unsigned long)input_addr_min,
2512 (unsigned long)input_addr_max);
2513 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2514 (unsigned long)sys_addr, csrow->page_mask);
2515 debugf1(" nr_pages: %u first_page: 0x%lx "
2516 "last_page: 0x%lx\n",
2517 (unsigned)csrow->nr_pages,
2518 csrow->first_page, csrow->last_page);
2521 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2523 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2525 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2526 EDAC_S4ECD4ED : EDAC_SECDED;
2528 csrow->edac_mode = EDAC_NONE;
2534 /* get all cores on this DCT */
2535 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2539 for_each_online_cpu(cpu)
2540 if (amd_get_nb_id(cpu) == nid)
2541 cpumask_set_cpu(cpu, mask);
2544 /* check MCG_CTL on all the cpus on this node */
2545 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2549 int cpu, nbe, idx = 0;
2552 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2553 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2558 get_cpus_on_this_dct_cpumask(mask, nid);
2560 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
2562 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2564 free_cpumask_var(mask);
2568 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2570 for_each_cpu(cpu, mask) {
2571 nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
2573 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2575 (nbe ? "enabled" : "disabled"));
2586 free_cpumask_var(mask);
2590 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2592 cpumask_var_t cmask;
2593 struct msr *msrs = NULL;
2596 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2597 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2602 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2604 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
2606 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2611 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2613 for_each_cpu(cpu, cmask) {
2616 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2617 pvt->flags.ecc_report = 1;
2619 msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2622 * Turn off ECC reporting only when it was off before
2624 if (!pvt->flags.ecc_report)
2625 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2629 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2632 free_cpumask_var(cmask);
2638 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2641 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2643 struct amd64_pvt *pvt = mci->pvt_info;
2644 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2646 if (!ecc_enable_override)
2649 amd64_printk(KERN_WARNING,
2650 "'ecc_enable_override' parameter is active, "
2651 "Enabling AMD ECC hardware now: CAUTION\n");
2653 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2655 /* turn on UECCn and CECCEn bits */
2656 pvt->old_nbctl = value & mask;
2657 pvt->nbctl_mcgctl_saved = 1;
2660 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2662 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2663 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2666 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2668 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2669 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2670 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2672 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2673 amd64_printk(KERN_WARNING,
2674 "This node reports that DRAM ECC is "
2675 "currently Disabled; ENABLING now\n");
2677 /* Attempt to turn on DRAM ECC Enable */
2678 value |= K8_NBCFG_ECC_ENABLE;
2679 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2681 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2683 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2684 amd64_printk(KERN_WARNING,
2685 "Hardware rejects Enabling DRAM ECC checking\n"
2686 "Check memory DIMM configuration\n");
2688 amd64_printk(KERN_DEBUG,
2689 "Hardware accepted DRAM ECC Enable\n");
2692 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2693 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2694 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2696 pvt->ctl_error_info.nbcfg = value;
2699 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2701 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2703 if (!pvt->nbctl_mcgctl_saved)
2706 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2708 value |= pvt->old_nbctl;
2710 /* restore the NB Enable MCGCTL bit */
2711 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2713 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2714 amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
2719 * EDAC requires that the BIOS have ECC enabled before taking over the
2720 * processing of ECC errors. This is because the BIOS can properly initialize
2721 * the memory system completely. A command line option allows to force-enable
2722 * hardware ECC later in amd64_enable_ecc_error_reporting().
2724 static const char *ecc_warning =
2725 "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
2726 " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
2727 " Also, use of the override can cause unknown side effects.\n";
2729 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2733 bool nb_mce_en = false;
2735 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2737 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2739 amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
2740 "is currently disabled, set F3x%x[22] (%s).\n",
2741 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2743 amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
2745 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2747 amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
2748 "0x%08x[4] on node %d to enable.\n",
2749 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2751 if (!ecc_enabled || !nb_mce_en) {
2752 if (!ecc_enable_override) {
2753 amd64_printk(KERN_WARNING, "%s", ecc_warning);
2757 /* CLEAR the override, since BIOS controlled it */
2758 ecc_enable_override = 0;
2763 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2764 ARRAY_SIZE(amd64_inj_attrs) +
2767 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2769 static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2771 unsigned int i = 0, j = 0;
2773 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2774 sysfs_attrs[i] = amd64_dbg_attrs[i];
2776 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2777 sysfs_attrs[i] = amd64_inj_attrs[j];
2779 sysfs_attrs[i] = terminator;
2781 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2784 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2786 struct amd64_pvt *pvt = mci->pvt_info;
2788 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2789 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2791 if (pvt->nbcap & K8_NBCAP_SECDED)
2792 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2794 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2795 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2797 mci->edac_cap = amd64_determine_edac_cap(pvt);
2798 mci->mod_name = EDAC_MOD_STR;
2799 mci->mod_ver = EDAC_AMD64_VERSION;
2800 mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
2801 mci->dev_name = pci_name(pvt->dram_f2_ctl);
2802 mci->ctl_page_to_phys = NULL;
2804 /* IMPORTANT: Set the polling 'check' function in this module */
2805 mci->edac_check = amd64_check;
2807 /* memory scrubber interface */
2808 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2809 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2813 * Init stuff for this DRAM Controller device.
2815 * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
2816 * Space feature MUST be enabled on ALL Processors prior to actually reading
2817 * from the ECS registers. Since the loading of the module can occur on any
2818 * 'core', and cores don't 'see' all the other processors ECS data when the
2819 * others are NOT enabled. Our solution is to first enable ECS access in this
2820 * routine on all processors, gather some data in a amd64_pvt structure and
2821 * later come back in a finish-setup function to perform that final
2822 * initialization. See also amd64_init_2nd_stage() for that.
2824 static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
2827 struct amd64_pvt *pvt = NULL;
2831 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2835 pvt->mc_node_id = get_node_id(dram_f2_ctl);
2837 pvt->dram_f2_ctl = dram_f2_ctl;
2838 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2839 pvt->mc_type_index = mc_type_index;
2840 pvt->ops = family_ops(mc_type_index);
2843 * We have the dram_f2_ctl device as an argument, now go reserve its
2844 * sibling devices from the PCI system.
2847 err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
2852 err = amd64_check_ecc_enabled(pvt);
2857 * Key operation here: setup of HW prior to performing ops on it. Some
2858 * setup is required to access ECS data. After this is performed, the
2859 * 'teardown' function must be called upon error and normal exit paths.
2861 if (boot_cpu_data.x86 >= 0x10)
2865 * Save the pointer to the private data for use in 2nd initialization
2868 pvt_lookup[pvt->mc_node_id] = pvt;
2873 amd64_free_mc_sibling_devices(pvt);
2883 * This is the finishing stage of the init code. Needs to be performed after all
2884 * MCs' hardware have been prepped for accessing extended config space.
2886 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2888 int node_id = pvt->mc_node_id;
2889 struct mem_ctl_info *mci;
2892 amd64_read_mc_registers(pvt);
2895 if (pvt->ops->probe_valid_hardware) {
2896 err = pvt->ops->probe_valid_hardware(pvt);
2902 * We need to determine how many memory channels there are. Then use
2903 * that information for calculating the size of the dynamic instance
2904 * tables in the 'mci' structure
2906 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2907 if (pvt->channel_count < 0)
2911 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2915 mci->pvt_info = pvt;
2917 mci->dev = &pvt->dram_f2_ctl->dev;
2918 amd64_setup_mci_misc_attributes(mci);
2920 if (amd64_init_csrows(mci))
2921 mci->edac_cap = EDAC_FLAG_NONE;
2923 amd64_enable_ecc_error_reporting(mci);
2924 amd64_set_mc_sysfs_attributes(mci);
2927 if (edac_mc_add_mc(mci)) {
2928 debugf1("failed edac_mc_add_mc()\n");
2932 mci_lookup[node_id] = mci;
2933 pvt_lookup[node_id] = NULL;
2935 /* register stuff with EDAC MCE */
2936 if (report_gart_errors)
2937 amd_report_gart_errors(true);
2939 amd_register_ecc_decoder(amd64_decode_bus_error);
2947 debugf0("failure to init 2nd stage: ret=%d\n", ret);
2949 amd64_restore_ecc_error_reporting(pvt);
2951 if (boot_cpu_data.x86 > 0xf)
2952 amd64_teardown(pvt);
2954 amd64_free_mc_sibling_devices(pvt);
2956 kfree(pvt_lookup[pvt->mc_node_id]);
2957 pvt_lookup[node_id] = NULL;
2963 static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
2964 const struct pci_device_id *mc_type)
2968 debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
2969 get_amd_family_name(mc_type->driver_data));
2971 ret = pci_enable_device(pdev);
2975 ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
2978 debugf0("ret=%d\n", ret);
2983 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2985 struct mem_ctl_info *mci;
2986 struct amd64_pvt *pvt;
2988 /* Remove from EDAC CORE tracking list */
2989 mci = edac_mc_del_mc(&pdev->dev);
2993 pvt = mci->pvt_info;
2995 amd64_restore_ecc_error_reporting(pvt);
2997 if (boot_cpu_data.x86 > 0xf)
2998 amd64_teardown(pvt);
3000 amd64_free_mc_sibling_devices(pvt);
3003 mci->pvt_info = NULL;
3005 mci_lookup[pvt->mc_node_id] = NULL;
3007 /* unregister from EDAC MCE */
3008 amd_report_gart_errors(false);
3009 amd_unregister_ecc_decoder(amd64_decode_bus_error);
3011 /* Free the EDAC CORE resources */
3016 * This table is part of the interface for loading drivers for PCI devices. The
3017 * PCI core identifies what devices are on a system during boot, and then
3018 * inquiry this table to see if this driver is for a given device found.
3020 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
3022 .vendor = PCI_VENDOR_ID_AMD,
3023 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
3024 .subvendor = PCI_ANY_ID,
3025 .subdevice = PCI_ANY_ID,
3028 .driver_data = K8_CPUS
3031 .vendor = PCI_VENDOR_ID_AMD,
3032 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
3033 .subvendor = PCI_ANY_ID,
3034 .subdevice = PCI_ANY_ID,
3037 .driver_data = F10_CPUS
3040 .vendor = PCI_VENDOR_ID_AMD,
3041 .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
3042 .subvendor = PCI_ANY_ID,
3043 .subdevice = PCI_ANY_ID,
3046 .driver_data = F11_CPUS
3050 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
3052 static struct pci_driver amd64_pci_driver = {
3053 .name = EDAC_MOD_STR,
3054 .probe = amd64_init_one_instance,
3055 .remove = __devexit_p(amd64_remove_one_instance),
3056 .id_table = amd64_pci_table,
3059 static void amd64_setup_pci_device(void)
3061 struct mem_ctl_info *mci;
3062 struct amd64_pvt *pvt;
3067 mci = mci_lookup[0];
3070 pvt = mci->pvt_info;
3072 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
3075 if (!amd64_ctl_pci) {
3076 pr_warning("%s(): Unable to create PCI control\n",
3079 pr_warning("%s(): PCI error report via EDAC not set\n",
3085 static int __init amd64_edac_init(void)
3087 int nb, err = -ENODEV;
3089 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
3093 if (cache_k8_northbridges() < 0)
3096 err = pci_register_driver(&amd64_pci_driver);
3101 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
3102 * amd64_pvt structs. These will be used in the 2nd stage init function
3103 * to finish initialization of the MC instances.
3105 for (nb = 0; nb < num_k8_northbridges; nb++) {
3106 if (!pvt_lookup[nb])
3109 err = amd64_init_2nd_stage(pvt_lookup[nb]);
3114 amd64_setup_pci_device();
3119 debugf0("2nd stage failed\n");
3120 pci_unregister_driver(&amd64_pci_driver);
3125 static void __exit amd64_edac_exit(void)
3128 edac_pci_release_generic_ctl(amd64_ctl_pci);
3130 pci_unregister_driver(&amd64_pci_driver);
3133 module_init(amd64_edac_init);
3134 module_exit(amd64_edac_exit);
3136 MODULE_LICENSE("GPL");
3137 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3138 "Dave Peterson, Thayne Harbaugh");
3139 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3140 EDAC_AMD64_VERSION);
3142 module_param(edac_op_state, int, 0444);
3143 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");