1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
18 /* Lookup table for all possible MC control instances */
20 static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
21 static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
24 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
27 static int ddr2_dbam_revCG[] = {
37 static int ddr2_dbam_revD[] = {
49 static int ddr2_dbam[] = { [0] = 128,
58 static int ddr3_dbam[] = { [0] = -1,
69 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
70 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
73 *FIXME: Produce a better mapping/linearisation.
76 struct scrubrate scrubrates[] = {
77 { 0x01, 1600000000UL},
99 { 0x00, 0UL}, /* scrubbing off */
103 * Memory scrubber control interface. For K8, memory scrubbing is handled by
104 * hardware and can involve L2 cache, dcache as well as the main memory. With
105 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
108 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
109 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
110 * bytes/sec for the setting.
112 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
113 * other archs, we might not have access to the caches directly.
117 * scan the scrub rate mapping table for a close or matching bandwidth value to
118 * issue. If requested is too big, then use last maximum value found.
120 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
126 * map the configured rate (new_bw) to a value specific to the AMD64
127 * memory controller and apply to register. Search for the first
128 * bandwidth entry that is greater or equal than the setting requested
129 * and program that. If at last entry, turn off DRAM scrubbing.
131 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
133 * skip scrub rates which aren't recommended
134 * (see F10 BKDG, F3x58)
136 if (scrubrates[i].scrubval < min_rate)
139 if (scrubrates[i].bandwidth <= new_bw)
143 * if no suitable bandwidth found, turn off DRAM scrubbing
144 * entirely by falling back to the last element in the
149 scrubval = scrubrates[i].scrubval;
151 edac_printk(KERN_DEBUG, EDAC_MC,
152 "Setting scrub rate bandwidth: %u\n",
153 scrubrates[i].bandwidth);
155 edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
157 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
162 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
164 struct amd64_pvt *pvt = mci->pvt_info;
166 return __amd64_set_scrub_rate(pvt->misc_f3_ctl, bw, pvt->min_scrubrate);
169 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
171 struct amd64_pvt *pvt = mci->pvt_info;
175 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
177 scrubval = scrubval & 0x001F;
179 edac_printk(KERN_DEBUG, EDAC_MC,
180 "pci-read, sdram scrub control value: %d \n", scrubval);
182 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
183 if (scrubrates[i].scrubval == scrubval) {
184 *bw = scrubrates[i].bandwidth;
193 /* Map from a CSROW entry to the mask entry that operates on it */
194 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
196 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
202 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
203 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
206 return pvt->dcsb0[csrow];
208 return pvt->dcsb1[csrow];
212 * Return the 'mask' address the i'th CS entry. This function is needed because
213 * there number of DCSM registers on Rev E and prior vs Rev F and later is
216 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
219 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
221 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
226 * In *base and *limit, pass back the full 40-bit base and limit physical
227 * addresses for the node given by node_id. This information is obtained from
228 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
229 * base and limit addresses are of type SysAddr, as defined at the start of
230 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
231 * in the address range they represent.
233 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
234 u64 *base, u64 *limit)
236 *base = pvt->dram_base[node_id];
237 *limit = pvt->dram_limit[node_id];
241 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
244 static int amd64_base_limit_match(struct amd64_pvt *pvt,
245 u64 sys_addr, int node_id)
247 u64 base, limit, addr;
249 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
251 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
252 * all ones if the most significant implemented address bit is 1.
253 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
254 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
255 * Application Programming.
257 addr = sys_addr & 0x000000ffffffffffull;
259 return (addr >= base) && (addr <= limit);
263 * Attempt to map a SysAddr to a node. On success, return a pointer to the
264 * mem_ctl_info structure for the node that the SysAddr maps to.
266 * On failure, return NULL.
268 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
271 struct amd64_pvt *pvt;
276 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
277 * 3.4.4.2) registers to map the SysAddr to a node ID.
282 * The value of this field should be the same for all DRAM Base
283 * registers. Therefore we arbitrarily choose to read it from the
284 * register for node 0.
286 intlv_en = pvt->dram_IntlvEn[0];
289 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
290 if (amd64_base_limit_match(pvt, sys_addr, node_id))
296 if (unlikely((intlv_en != 0x01) &&
297 (intlv_en != 0x03) &&
298 (intlv_en != 0x07))) {
299 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
300 "IntlvEn field of DRAM Base Register for node 0: "
301 "this probably indicates a BIOS bug.\n", intlv_en);
305 bits = (((u32) sys_addr) >> 12) & intlv_en;
307 for (node_id = 0; ; ) {
308 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
309 break; /* intlv_sel field matches */
311 if (++node_id >= DRAM_REG_COUNT)
315 /* sanity test for sys_addr */
316 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
317 amd64_printk(KERN_WARNING,
318 "%s(): sys_addr 0x%llx falls outside base/limit "
319 "address range for node %d with node interleaving "
321 __func__, sys_addr, node_id);
326 return edac_mc_find(node_id);
329 debugf2("sys_addr 0x%lx doesn't match any node\n",
330 (unsigned long)sys_addr);
336 * Extract the DRAM CS base address from selected csrow register.
338 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
340 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
345 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
347 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
349 u64 dcsm_bits, other_bits;
352 /* Extract bits from DRAM CS Mask. */
353 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
355 other_bits = pvt->dcsm_mask;
356 other_bits = ~(other_bits << pvt->dcs_shift);
359 * The extracted bits from DCSM belong in the spaces represented by
360 * the cleared bits in other_bits.
362 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
368 * @input_addr is an InputAddr associated with the node given by mci. Return the
369 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
371 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
373 struct amd64_pvt *pvt;
380 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
381 * base/mask register pair, test the condition shown near the start of
382 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
384 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
386 /* This DRAM chip select is disabled on this node */
387 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
390 base = base_from_dct_base(pvt, csrow);
391 mask = ~mask_from_dct_mask(pvt, csrow);
393 if ((input_addr & mask) == (base & mask)) {
394 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
395 (unsigned long)input_addr, csrow,
402 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
403 (unsigned long)input_addr, pvt->mc_node_id);
409 * Return the base value defined by the DRAM Base register for the node
410 * represented by mci. This function returns the full 40-bit value despite the
411 * fact that the register only stores bits 39-24 of the value. See section
412 * 3.4.4.1 (BKDG #26094, K8, revA-E)
414 static inline u64 get_dram_base(struct mem_ctl_info *mci)
416 struct amd64_pvt *pvt = mci->pvt_info;
418 return pvt->dram_base[pvt->mc_node_id];
422 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
423 * for the node represented by mci. Info is passed back in *hole_base,
424 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
425 * info is invalid. Info may be invalid for either of the following reasons:
427 * - The revision of the node is not E or greater. In this case, the DRAM Hole
428 * Address Register does not exist.
430 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
431 * indicating that its contents are not valid.
433 * The values passed back in *hole_base, *hole_offset, and *hole_size are
434 * complete 32-bit values despite the fact that the bitfields in the DHAR
435 * only represent bits 31-24 of the base and offset values.
437 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
438 u64 *hole_offset, u64 *hole_size)
440 struct amd64_pvt *pvt = mci->pvt_info;
443 /* only revE and later have the DRAM Hole Address Register */
444 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
445 debugf1(" revision %d for node %d does not support DHAR\n",
446 pvt->ext_model, pvt->mc_node_id);
450 /* only valid for Fam10h */
451 if (boot_cpu_data.x86 == 0x10 &&
452 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
453 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
457 if ((pvt->dhar & DHAR_VALID) == 0) {
458 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
463 /* This node has Memory Hoisting */
465 /* +------------------+--------------------+--------------------+-----
466 * | memory | DRAM hole | relocated |
467 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
469 * | | | [0x100000000, |
470 * | | | (0x100000000+ |
471 * | | | (0xffffffff-x))] |
472 * +------------------+--------------------+--------------------+-----
474 * Above is a diagram of physical memory showing the DRAM hole and the
475 * relocated addresses from the DRAM hole. As shown, the DRAM hole
476 * starts at address x (the base address) and extends through address
477 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
478 * addresses in the hole so that they start at 0x100000000.
481 base = dhar_base(pvt->dhar);
484 *hole_size = (0x1ull << 32) - base;
486 if (boot_cpu_data.x86 > 0xf)
487 *hole_offset = f10_dhar_offset(pvt->dhar);
489 *hole_offset = k8_dhar_offset(pvt->dhar);
491 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
492 pvt->mc_node_id, (unsigned long)*hole_base,
493 (unsigned long)*hole_offset, (unsigned long)*hole_size);
497 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
500 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
501 * assumed that sys_addr maps to the node given by mci.
503 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
504 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
505 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
506 * then it is also involved in translating a SysAddr to a DramAddr. Sections
507 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
508 * These parts of the documentation are unclear. I interpret them as follows:
510 * When node n receives a SysAddr, it processes the SysAddr as follows:
512 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
513 * Limit registers for node n. If the SysAddr is not within the range
514 * specified by the base and limit values, then node n ignores the Sysaddr
515 * (since it does not map to node n). Otherwise continue to step 2 below.
517 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
518 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
519 * the range of relocated addresses (starting at 0x100000000) from the DRAM
520 * hole. If not, skip to step 3 below. Else get the value of the
521 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
522 * offset defined by this value from the SysAddr.
524 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
525 * Base register for node n. To obtain the DramAddr, subtract the base
526 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
528 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
530 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
533 dram_base = get_dram_base(mci);
535 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
538 if ((sys_addr >= (1ull << 32)) &&
539 (sys_addr < ((1ull << 32) + hole_size))) {
540 /* use DHAR to translate SysAddr to DramAddr */
541 dram_addr = sys_addr - hole_offset;
543 debugf2("using DHAR to translate SysAddr 0x%lx to "
545 (unsigned long)sys_addr,
546 (unsigned long)dram_addr);
553 * Translate the SysAddr to a DramAddr as shown near the start of
554 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
555 * only deals with 40-bit values. Therefore we discard bits 63-40 of
556 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
557 * discard are all 1s. Otherwise the bits we discard are all 0s. See
558 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
559 * Programmer's Manual Volume 1 Application Programming.
561 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
563 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
564 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
565 (unsigned long)dram_addr);
570 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
571 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
572 * for node interleaving.
574 static int num_node_interleave_bits(unsigned intlv_en)
576 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
579 BUG_ON(intlv_en > 7);
580 n = intlv_shift_table[intlv_en];
584 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
585 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
587 struct amd64_pvt *pvt;
594 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
595 * concerning translating a DramAddr to an InputAddr.
597 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
598 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
601 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
602 intlv_shift, (unsigned long)dram_addr,
603 (unsigned long)input_addr);
609 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
610 * assumed that @sys_addr maps to the node given by mci.
612 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
617 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
619 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
620 (unsigned long)sys_addr, (unsigned long)input_addr);
627 * @input_addr is an InputAddr associated with the node represented by mci.
628 * Translate @input_addr to a DramAddr and return the result.
630 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
632 struct amd64_pvt *pvt;
633 int node_id, intlv_shift;
638 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
639 * shows how to translate a DramAddr to an InputAddr. Here we reverse
640 * this procedure. When translating from a DramAddr to an InputAddr, the
641 * bits used for node interleaving are discarded. Here we recover these
642 * bits from the IntlvSel field of the DRAM Limit register (section
643 * 3.4.4.2) for the node that input_addr is associated with.
646 node_id = pvt->mc_node_id;
647 BUG_ON((node_id < 0) || (node_id > 7));
649 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
651 if (intlv_shift == 0) {
652 debugf1(" InputAddr 0x%lx translates to DramAddr of "
653 "same value\n", (unsigned long)input_addr);
658 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
659 (input_addr & 0xfff);
661 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
662 dram_addr = bits + (intlv_sel << 12);
664 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
665 "(%d node interleave bits)\n", (unsigned long)input_addr,
666 (unsigned long)dram_addr, intlv_shift);
672 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
673 * @dram_addr to a SysAddr.
675 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
677 struct amd64_pvt *pvt = mci->pvt_info;
678 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
681 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
684 if ((dram_addr >= hole_base) &&
685 (dram_addr < (hole_base + hole_size))) {
686 sys_addr = dram_addr + hole_offset;
688 debugf1("using DHAR to translate DramAddr 0x%lx to "
689 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
690 (unsigned long)sys_addr);
696 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
697 sys_addr = dram_addr + base;
700 * The sys_addr we have computed up to this point is a 40-bit value
701 * because the k8 deals with 40-bit values. However, the value we are
702 * supposed to return is a full 64-bit physical address. The AMD
703 * x86-64 architecture specifies that the most significant implemented
704 * address bit through bit 63 of a physical address must be either all
705 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
706 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
707 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
710 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
712 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
713 pvt->mc_node_id, (unsigned long)dram_addr,
714 (unsigned long)sys_addr);
720 * @input_addr is an InputAddr associated with the node given by mci. Translate
721 * @input_addr to a SysAddr.
723 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
726 return dram_addr_to_sys_addr(mci,
727 input_addr_to_dram_addr(mci, input_addr));
731 * Find the minimum and maximum InputAddr values that map to the given @csrow.
732 * Pass back these values in *input_addr_min and *input_addr_max.
734 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
735 u64 *input_addr_min, u64 *input_addr_max)
737 struct amd64_pvt *pvt;
741 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
743 base = base_from_dct_base(pvt, csrow);
744 mask = mask_from_dct_mask(pvt, csrow);
746 *input_addr_min = base & ~mask;
747 *input_addr_max = base | mask | pvt->dcs_mask_notused;
750 /* Map the Error address to a PAGE and PAGE OFFSET. */
751 static inline void error_address_to_page_and_offset(u64 error_address,
752 u32 *page, u32 *offset)
754 *page = (u32) (error_address >> PAGE_SHIFT);
755 *offset = ((u32) error_address) & ~PAGE_MASK;
759 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
760 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
761 * of a node that detected an ECC memory error. mci represents the node that
762 * the error address maps to (possibly different from the node that detected
763 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
766 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
770 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
773 amd64_mc_printk(mci, KERN_ERR,
774 "Failed to translate InputAddr to csrow for "
775 "address 0x%lx\n", (unsigned long)sys_addr);
779 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
781 static u16 extract_syndrome(struct err_regs *err)
783 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
787 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
790 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
793 enum dev_type edac_cap = EDAC_FLAG_NONE;
795 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
799 if (pvt->dclr0 & BIT(bit))
800 edac_cap = EDAC_FLAG_SECDED;
806 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
808 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
810 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
812 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
813 (dclr & BIT(16)) ? "un" : "",
814 (dclr & BIT(19)) ? "yes" : "no");
816 debugf1(" PAR/ERR parity: %s\n",
817 (dclr & BIT(8)) ? "enabled" : "disabled");
819 debugf1(" DCT 128bit mode width: %s\n",
820 (dclr & BIT(11)) ? "128b" : "64b");
822 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
823 (dclr & BIT(12)) ? "yes" : "no",
824 (dclr & BIT(13)) ? "yes" : "no",
825 (dclr & BIT(14)) ? "yes" : "no",
826 (dclr & BIT(15)) ? "yes" : "no");
829 /* Display and decode various NB registers for debug purposes. */
830 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
834 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
836 debugf1(" NB two channel DRAM capable: %s\n",
837 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
839 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
840 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
841 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
843 amd64_dump_dramcfg_low(pvt->dclr0, 0);
845 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
847 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
850 dhar_base(pvt->dhar),
851 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
852 : f10_dhar_offset(pvt->dhar));
854 debugf1(" DramHoleValid: %s\n",
855 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
857 /* everything below this point is Fam10h and above */
858 if (boot_cpu_data.x86 == 0xf) {
859 amd64_debug_display_dimm_sizes(0, pvt);
863 amd64_printk(KERN_INFO, "using %s syndromes.\n",
864 ((pvt->syn_type == 8) ? "x8" : "x4"));
866 /* Only if NOT ganged does dclr1 have valid info */
867 if (!dct_ganging_enabled(pvt))
868 amd64_dump_dramcfg_low(pvt->dclr1, 1);
871 * Determine if ganged and then dump memory sizes for first controller,
872 * and if NOT ganged dump info for 2nd controller.
874 ganged = dct_ganging_enabled(pvt);
876 amd64_debug_display_dimm_sizes(0, pvt);
879 amd64_debug_display_dimm_sizes(1, pvt);
882 /* Read in both of DBAM registers */
883 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
885 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
887 if (boot_cpu_data.x86 >= 0x10)
888 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
892 * NOTE: CPU Revision Dependent code: Rev E and Rev F
894 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
895 * set the shift factor for the DCSB and DCSM values.
897 * ->dcs_mask_notused, RevE:
899 * To find the max InputAddr for the csrow, start with the base address and set
900 * all bits that are "don't care" bits in the test at the start of section
903 * The "don't care" bits are all set bits in the mask and all bits in the gaps
904 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
905 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
908 * ->dcs_mask_notused, RevF and later:
910 * To find the max InputAddr for the csrow, start with the base address and set
911 * all bits that are "don't care" bits in the test at the start of NPT section
914 * The "don't care" bits are all set bits in the mask and all bits in the gaps
915 * between bit ranges [36:27] and [21:13].
917 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
918 * which are all bits in the above-mentioned gaps.
920 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
923 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
924 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
925 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
926 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
927 pvt->dcs_shift = REV_E_DCS_SHIFT;
931 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
932 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
933 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
934 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
941 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
943 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
947 amd64_set_dct_base_and_mask(pvt);
949 for (cs = 0; cs < pvt->cs_count; cs++) {
950 reg = K8_DCSB0 + (cs * 4);
951 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
952 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
953 cs, pvt->dcsb0[cs], reg);
955 /* If DCT are NOT ganged, then read in DCT1's base */
956 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
957 reg = F10_DCSB1 + (cs * 4);
958 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
960 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
961 cs, pvt->dcsb1[cs], reg);
967 for (cs = 0; cs < pvt->num_dcsm; cs++) {
968 reg = K8_DCSM0 + (cs * 4);
969 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
970 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
971 cs, pvt->dcsm0[cs], reg);
973 /* If DCT are NOT ganged, then read in DCT1's mask */
974 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
975 reg = F10_DCSM1 + (cs * 4);
976 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
978 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
979 cs, pvt->dcsm1[cs], reg);
986 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
990 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
991 if (pvt->dchr0 & DDR3_MODE)
992 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
994 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
996 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
999 debugf1(" Memory type is: %s\n", edac_mem_types[type]);
1005 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1006 * and the later RevF memory controllers (DDR vs DDR2)
1009 * number of memory channels in operation
1011 * contents of the DCL0_LOW register
1013 static int k8_early_channel_count(struct amd64_pvt *pvt)
1017 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1021 if (pvt->ext_model >= K8_REV_F)
1022 /* RevF (NPT) and later */
1023 flag = pvt->dclr0 & F10_WIDTH_128;
1025 /* RevE and earlier */
1026 flag = pvt->dclr0 & REVE_WIDTH_128;
1031 return (flag) ? 2 : 1;
1034 /* extract the ERROR ADDRESS for the K8 CPUs */
1035 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1036 struct err_regs *info)
1038 return (((u64) (info->nbeah & 0xff)) << 32) +
1039 (info->nbeal & ~0x03);
1043 * Read the Base and Limit registers for K8 based Memory controllers; extract
1044 * fields from the 'raw' reg into separate data fields
1046 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1048 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1051 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1053 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1055 /* Extract parts into separate data entries */
1056 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1057 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1058 pvt->dram_rw_en[dram] = (low & 0x3);
1060 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1063 * Extract parts into separate data entries. Limit is the HIGHEST memory
1064 * location of the region, so lower 24 bits need to be all ones
1066 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1067 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1068 pvt->dram_DstNode[dram] = (low & 0x7);
1071 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1072 struct err_regs *err_info, u64 sys_addr)
1074 struct mem_ctl_info *src_mci;
1079 syndrome = extract_syndrome(err_info);
1081 /* CHIPKILL enabled */
1082 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
1083 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1086 * Syndrome didn't map, so we don't know which of the
1087 * 2 DIMMs is in error. So we need to ID 'both' of them
1090 amd64_mc_printk(mci, KERN_WARNING,
1091 "unknown syndrome 0x%04x - possible "
1092 "error reporting race\n", syndrome);
1093 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1098 * non-chipkill ecc mode
1100 * The k8 documentation is unclear about how to determine the
1101 * channel number when using non-chipkill memory. This method
1102 * was obtained from email communication with someone at AMD.
1103 * (Wish the email was placed in this comment - norsk)
1105 channel = ((sys_addr & BIT(3)) != 0);
1109 * Find out which node the error address belongs to. This may be
1110 * different from the node that detected the error.
1112 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1114 amd64_mc_printk(mci, KERN_ERR,
1115 "failed to map error address 0x%lx to a node\n",
1116 (unsigned long)sys_addr);
1117 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1121 /* Now map the sys_addr to a CSROW */
1122 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1124 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1126 error_address_to_page_and_offset(sys_addr, &page, &offset);
1128 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1129 channel, EDAC_MOD_STR);
1133 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1137 if (pvt->ext_model >= K8_REV_F)
1138 dbam_map = ddr2_dbam;
1139 else if (pvt->ext_model >= K8_REV_D)
1140 dbam_map = ddr2_dbam_revD;
1142 dbam_map = ddr2_dbam_revCG;
1144 return dbam_map[cs_mode];
1148 * Get the number of DCT channels in use.
1151 * number of Memory Channels in operation
1153 * contents of the DCL0_LOW register
1155 static int f10_early_channel_count(struct amd64_pvt *pvt)
1157 int dbams[] = { DBAM0, DBAM1 };
1158 int i, j, channels = 0;
1161 /* If we are in 128 bit mode, then we are using 2 channels */
1162 if (pvt->dclr0 & F10_WIDTH_128) {
1168 * Need to check if in unganged mode: In such, there are 2 channels,
1169 * but they are not in 128 bit mode and thus the above 'dclr0' status
1172 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1173 * their CSEnable bit on. If so, then SINGLE DIMM case.
1175 debugf0("Data width is not 128 bits - need more decoding\n");
1178 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1179 * is more than just one DIMM present in unganged mode. Need to check
1180 * both controllers since DIMMs can be placed in either one.
1182 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1183 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1186 for (j = 0; j < 4; j++) {
1187 if (DBAM_DIMM(j, dbam) > 0) {
1197 debugf0("MCT channel count: %d\n", channels);
1206 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1210 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1211 dbam_map = ddr3_dbam;
1213 dbam_map = ddr2_dbam;
1215 return dbam_map[cs_mode];
1218 /* Enable extended configuration access via 0xCF8 feature */
1219 static void amd64_setup(struct amd64_pvt *pvt)
1223 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1225 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1226 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1227 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1230 /* Restore the extended configuration access via 0xCF8 feature */
1231 static void amd64_teardown(struct amd64_pvt *pvt)
1235 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1237 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1238 if (pvt->flags.cf8_extcfg)
1239 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1240 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1243 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1244 struct err_regs *info)
1246 return (((u64) (info->nbeah & 0xffff)) << 32) +
1247 (info->nbeal & ~0x01);
1251 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1252 * fields from the 'raw' reg into separate data fields.
1254 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1256 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1258 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1260 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1261 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1263 /* read the 'raw' DRAM BASE Address register */
1264 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1266 /* Read from the ECS data register */
1267 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1269 /* Extract parts into separate data entries */
1270 pvt->dram_rw_en[dram] = (low_base & 0x3);
1272 if (pvt->dram_rw_en[dram] == 0)
1275 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1277 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1278 (((u64)low_base & 0xFFFF0000) << 8);
1280 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1281 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1283 /* read the 'raw' LIMIT registers */
1284 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1286 /* Read from the ECS data register for the HIGH portion */
1287 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1289 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1290 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1293 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1294 * memory location of the region, so low 24 bits need to be all ones.
1296 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1297 (((u64) low_limit & 0xFFFF0000) << 8) |
1301 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1304 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1305 &pvt->dram_ctl_select_low)) {
1306 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1307 "High range addresses at: 0x%x\n",
1308 pvt->dram_ctl_select_low,
1309 dct_sel_baseaddr(pvt));
1311 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1312 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1313 (dct_dram_enabled(pvt) ? "yes" : "no"));
1315 if (!dct_ganging_enabled(pvt))
1316 debugf0(" Address range split per DCT: %s\n",
1317 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1319 debugf0(" DCT data interleave for ECC: %s, "
1320 "DRAM cleared since last warm reset: %s\n",
1321 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1322 (dct_memory_cleared(pvt) ? "yes" : "no"));
1324 debugf0(" DCT channel interleave: %s, "
1325 "DCT interleave bits selector: 0x%x\n",
1326 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1327 dct_sel_interleave_addr(pvt));
1330 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1331 &pvt->dram_ctl_select_high);
1335 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1336 * Interleaving Modes.
1338 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1339 int hi_range_sel, u32 intlv_en)
1341 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1343 if (dct_ganging_enabled(pvt))
1345 else if (hi_range_sel)
1347 else if (dct_interleave_enabled(pvt)) {
1349 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1351 if (dct_sel_interleave_addr(pvt) == 0)
1352 cs = sys_addr >> 6 & 1;
1353 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1354 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1356 if (dct_sel_interleave_addr(pvt) & 1)
1357 cs = (sys_addr >> 9 & 1) ^ temp;
1359 cs = (sys_addr >> 6 & 1) ^ temp;
1360 } else if (intlv_en & 4)
1361 cs = sys_addr >> 15 & 1;
1362 else if (intlv_en & 2)
1363 cs = sys_addr >> 14 & 1;
1364 else if (intlv_en & 1)
1365 cs = sys_addr >> 13 & 1;
1367 cs = sys_addr >> 12 & 1;
1368 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1369 cs = ~dct_sel_high & 1;
1376 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1380 else if (intlv_en == 3)
1382 else if (intlv_en == 7)
1388 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1389 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1390 u32 dct_sel_base_addr,
1391 u64 dct_sel_base_off,
1392 u32 hole_valid, u32 hole_off,
1398 if (!(dct_sel_base_addr & 0xFFFF0000) &&
1399 hole_valid && (sys_addr >= 0x100000000ULL))
1400 chan_off = hole_off << 16;
1402 chan_off = dct_sel_base_off;
1404 if (hole_valid && (sys_addr >= 0x100000000ULL))
1405 chan_off = hole_off << 16;
1407 chan_off = dram_base & 0xFFFFF8000000ULL;
1410 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1411 (chan_off & 0x0000FFFFFF800000ULL);
1414 /* Hack for the time being - Can we get this from BIOS?? */
1415 #define CH0SPARE_RANK 0
1416 #define CH1SPARE_RANK 1
1419 * checks if the csrow passed in is marked as SPARED, if so returns the new
1422 static inline int f10_process_possible_spare(int csrow,
1423 u32 cs, struct amd64_pvt *pvt)
1428 /* Depending on channel, isolate respective SPARING info */
1430 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1431 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1432 if (swap_done && (csrow == bad_dram_cs))
1433 csrow = CH1SPARE_RANK;
1435 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1436 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1437 if (swap_done && (csrow == bad_dram_cs))
1438 csrow = CH0SPARE_RANK;
1444 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1445 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1448 * -EINVAL: NOT FOUND
1449 * 0..csrow = Chip-Select Row
1451 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1453 struct mem_ctl_info *mci;
1454 struct amd64_pvt *pvt;
1455 u32 cs_base, cs_mask;
1456 int cs_found = -EINVAL;
1459 mci = mci_lookup[nid];
1463 pvt = mci->pvt_info;
1465 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1467 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1469 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1470 if (!(cs_base & K8_DCSB_CS_ENABLE))
1474 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1475 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1476 * of the actual address.
1478 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1481 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1482 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1484 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1486 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1487 csrow, cs_base, cs_mask);
1489 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1491 debugf1(" Final CSMask=0x%x\n", cs_mask);
1492 debugf1(" (InputAddr & ~CSMask)=0x%x "
1493 "(CSBase & ~CSMask)=0x%x\n",
1494 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1496 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1497 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1499 debugf1(" MATCH csrow=%d\n", cs_found);
1506 /* For a given @dram_range, check if @sys_addr falls within it. */
1507 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1508 u64 sys_addr, int *nid, int *chan_sel)
1510 int node_id, cs_found = -EINVAL, high_range = 0;
1511 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1512 u32 hole_valid, tmp, dct_sel_base, channel;
1513 u64 dram_base, chan_addr, dct_sel_base_off;
1515 dram_base = pvt->dram_base[dram_range];
1516 intlv_en = pvt->dram_IntlvEn[dram_range];
1518 node_id = pvt->dram_DstNode[dram_range];
1519 intlv_sel = pvt->dram_IntlvSel[dram_range];
1521 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1522 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1525 * This assumes that one node's DHAR is the same as all the other
1528 hole_off = (pvt->dhar & 0x0000FF80);
1529 hole_valid = (pvt->dhar & 0x1);
1530 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1532 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1533 hole_off, hole_valid, intlv_sel);
1536 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1539 dct_sel_base = dct_sel_baseaddr(pvt);
1542 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1543 * select between DCT0 and DCT1.
1545 if (dct_high_range_enabled(pvt) &&
1546 !dct_ganging_enabled(pvt) &&
1547 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1550 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1552 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1553 dct_sel_base_off, hole_valid,
1554 hole_off, dram_base);
1556 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1558 /* remove Node ID (in case of memory interleaving) */
1559 tmp = chan_addr & 0xFC0;
1561 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1563 /* remove channel interleave and hash */
1564 if (dct_interleave_enabled(pvt) &&
1565 !dct_high_range_enabled(pvt) &&
1566 !dct_ganging_enabled(pvt)) {
1567 if (dct_sel_interleave_addr(pvt) != 1)
1568 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1570 tmp = chan_addr & 0xFC0;
1571 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1576 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1577 chan_addr, (u32)(chan_addr >> 8));
1579 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1581 if (cs_found >= 0) {
1583 *chan_sel = channel;
1588 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1589 int *node, int *chan_sel)
1591 int dram_range, cs_found = -EINVAL;
1592 u64 dram_base, dram_limit;
1594 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1596 if (!pvt->dram_rw_en[dram_range])
1599 dram_base = pvt->dram_base[dram_range];
1600 dram_limit = pvt->dram_limit[dram_range];
1602 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1604 cs_found = f10_match_to_this_node(pvt, dram_range,
1615 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1616 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1618 * The @sys_addr is usually an error address received from the hardware
1621 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1622 struct err_regs *err_info,
1625 struct amd64_pvt *pvt = mci->pvt_info;
1627 int nid, csrow, chan = 0;
1630 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1633 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1637 error_address_to_page_and_offset(sys_addr, &page, &offset);
1639 syndrome = extract_syndrome(err_info);
1642 * We need the syndromes for channel detection only when we're
1643 * ganged. Otherwise @chan should already contain the channel at
1646 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1647 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1650 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1654 * Channel unknown, report all channels on this CSROW as failed.
1656 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1657 edac_mc_handle_ce(mci, page, offset, syndrome,
1658 csrow, chan, EDAC_MOD_STR);
1662 * debug routine to display the memory sizes of all logical DIMMs and its
1665 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1667 int dimm, size0, size1, factor = 0;
1671 if (boot_cpu_data.x86 == 0xf) {
1672 if (pvt->dclr0 & F10_WIDTH_128)
1675 /* K8 families < revF not supported yet */
1676 if (pvt->ext_model < K8_REV_F)
1682 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1683 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1685 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1686 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1688 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1690 /* Dump memory sizes for DIMM and its CSROWs */
1691 for (dimm = 0; dimm < 4; dimm++) {
1694 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1695 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1698 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1699 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1701 edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
1702 dimm * 2, size0 << factor,
1703 dimm * 2 + 1, size1 << factor);
1707 static struct amd64_family_type amd64_family_types[] = {
1710 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1711 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1713 .early_channel_count = k8_early_channel_count,
1714 .get_error_address = k8_get_error_address,
1715 .read_dram_base_limit = k8_read_dram_base_limit,
1716 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1717 .dbam_to_cs = k8_dbam_to_chip_select,
1722 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1723 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1725 .early_channel_count = f10_early_channel_count,
1726 .get_error_address = f10_get_error_address,
1727 .read_dram_base_limit = f10_read_dram_base_limit,
1728 .read_dram_ctl_register = f10_read_dram_ctl_register,
1729 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1730 .dbam_to_cs = f10_dbam_to_chip_select,
1735 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1736 unsigned int device,
1737 struct pci_dev *related)
1739 struct pci_dev *dev = NULL;
1741 dev = pci_get_device(vendor, device, dev);
1743 if ((dev->bus->number == related->bus->number) &&
1744 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1746 dev = pci_get_device(vendor, device, dev);
1753 * These are tables of eigenvectors (one per line) which can be used for the
1754 * construction of the syndrome tables. The modified syndrome search algorithm
1755 * uses those to find the symbol in error and thus the DIMM.
1757 * Algorithm courtesy of Ross LaFetra from AMD.
1759 static u16 x4_vectors[] = {
1760 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1761 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1762 0x0001, 0x0002, 0x0004, 0x0008,
1763 0x1013, 0x3032, 0x4044, 0x8088,
1764 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1765 0x4857, 0xc4fe, 0x13cc, 0x3288,
1766 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1767 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1768 0x15c1, 0x2a42, 0x89ac, 0x4758,
1769 0x2b03, 0x1602, 0x4f0c, 0xca08,
1770 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1771 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1772 0x2b87, 0x164e, 0x642c, 0xdc18,
1773 0x40b9, 0x80de, 0x1094, 0x20e8,
1774 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1775 0x11c1, 0x2242, 0x84ac, 0x4c58,
1776 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1777 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1778 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1779 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1780 0x16b3, 0x3d62, 0x4f34, 0x8518,
1781 0x1e2f, 0x391a, 0x5cac, 0xf858,
1782 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1783 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1784 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1785 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1786 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1787 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1788 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1789 0x185d, 0x2ca6, 0x7914, 0x9e28,
1790 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1791 0x4199, 0x82ee, 0x19f4, 0x2e58,
1792 0x4807, 0xc40e, 0x130c, 0x3208,
1793 0x1905, 0x2e0a, 0x5804, 0xac08,
1794 0x213f, 0x132a, 0xadfc, 0x5ba8,
1795 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1798 static u16 x8_vectors[] = {
1799 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1800 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1801 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1802 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1803 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1804 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1805 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1806 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1807 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1808 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1809 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1810 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1811 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1812 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1813 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1814 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1815 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1816 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1817 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1820 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1823 unsigned int i, err_sym;
1825 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1827 int v_idx = err_sym * v_dim;
1828 int v_end = (err_sym + 1) * v_dim;
1830 /* walk over all 16 bits of the syndrome */
1831 for (i = 1; i < (1U << 16); i <<= 1) {
1833 /* if bit is set in that eigenvector... */
1834 if (v_idx < v_end && vectors[v_idx] & i) {
1835 u16 ev_comp = vectors[v_idx++];
1837 /* ... and bit set in the modified syndrome, */
1847 /* can't get to zero, move to next symbol */
1852 debugf0("syndrome(%x) not found\n", syndrome);
1856 static int map_err_sym_to_channel(int err_sym, int sym_size)
1869 return err_sym >> 4;
1875 /* imaginary bits not in a DIMM */
1877 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1889 return err_sym >> 3;
1895 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1897 struct amd64_pvt *pvt = mci->pvt_info;
1900 if (pvt->syn_type == 8)
1901 err_sym = decode_syndrome(syndrome, x8_vectors,
1902 ARRAY_SIZE(x8_vectors),
1904 else if (pvt->syn_type == 4)
1905 err_sym = decode_syndrome(syndrome, x4_vectors,
1906 ARRAY_SIZE(x4_vectors),
1909 amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n",
1910 __func__, pvt->syn_type);
1914 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1918 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1919 * ADDRESS and process.
1921 static void amd64_handle_ce(struct mem_ctl_info *mci,
1922 struct err_regs *info)
1924 struct amd64_pvt *pvt = mci->pvt_info;
1927 /* Ensure that the Error Address is VALID */
1928 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
1929 amd64_mc_printk(mci, KERN_ERR,
1930 "HW has no ERROR_ADDRESS available\n");
1931 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1935 sys_addr = pvt->ops->get_error_address(mci, info);
1937 amd64_mc_printk(mci, KERN_ERR,
1938 "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1940 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
1943 /* Handle any Un-correctable Errors (UEs) */
1944 static void amd64_handle_ue(struct mem_ctl_info *mci,
1945 struct err_regs *info)
1947 struct amd64_pvt *pvt = mci->pvt_info;
1948 struct mem_ctl_info *log_mci, *src_mci = NULL;
1955 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
1956 amd64_mc_printk(mci, KERN_CRIT,
1957 "HW has no ERROR_ADDRESS available\n");
1958 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1962 sys_addr = pvt->ops->get_error_address(mci, info);
1965 * Find out which node the error address belongs to. This may be
1966 * different from the node that detected the error.
1968 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1970 amd64_mc_printk(mci, KERN_CRIT,
1971 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
1972 (unsigned long)sys_addr);
1973 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1979 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1981 amd64_mc_printk(mci, KERN_CRIT,
1982 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
1983 (unsigned long)sys_addr);
1984 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1986 error_address_to_page_and_offset(sys_addr, &page, &offset);
1987 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1991 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1992 struct err_regs *info)
1994 u32 ec = ERROR_CODE(info->nbsl);
1995 u32 xec = EXT_ERROR_CODE(info->nbsl);
1996 int ecc_type = (info->nbsh >> 13) & 0x3;
1998 /* Bail early out if this was an 'observed' error */
1999 if (PP(ec) == K8_NBSL_PP_OBS)
2002 /* Do only ECC errors */
2003 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2007 amd64_handle_ce(mci, info);
2008 else if (ecc_type == 1)
2009 amd64_handle_ue(mci, info);
2012 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
2014 struct mem_ctl_info *mci = mci_lookup[node_id];
2015 struct err_regs regs;
2017 regs.nbsl = (u32) m->status;
2018 regs.nbsh = (u32)(m->status >> 32);
2019 regs.nbeal = (u32) m->addr;
2020 regs.nbeah = (u32)(m->addr >> 32);
2023 __amd64_decode_bus_error(mci, ®s);
2026 * Check the UE bit of the NB status high register, if set generate some
2027 * logs. If NOT a GART error, then process the event as a NO-INFO event.
2028 * If it was a GART error, skip that process.
2030 * FIXME: this should go somewhere else, if at all.
2032 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
2033 edac_mc_handle_ue_no_info(mci, "UE bit is set");
2039 * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2040 * 2) AMD Family index value
2043 * Upon return of 0, the following filled in:
2045 * struct pvt->addr_f1_ctl
2046 * struct pvt->misc_f3_ctl
2048 * Filled in with related device funcitions of 'dram_f2_ctl'
2049 * These devices are "reserved" via the pci_get_device()
2051 * Upon return of 1 (error status):
2055 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2057 const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
2059 /* Reserve the ADDRESS MAP Device */
2060 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2061 amd64_dev->addr_f1_ctl,
2064 if (!pvt->addr_f1_ctl) {
2065 amd64_printk(KERN_ERR, "error address map device not found: "
2066 "vendor %x device 0x%x (broken BIOS?)\n",
2067 PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
2071 /* Reserve the MISC Device */
2072 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2073 amd64_dev->misc_f3_ctl,
2076 if (!pvt->misc_f3_ctl) {
2077 pci_dev_put(pvt->addr_f1_ctl);
2078 pvt->addr_f1_ctl = NULL;
2080 amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2081 "vendor %x device 0x%x (broken BIOS?)\n",
2082 PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
2086 debugf1(" Addr Map device PCI Bus ID:\t%s\n",
2087 pci_name(pvt->addr_f1_ctl));
2088 debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
2089 pci_name(pvt->dram_f2_ctl));
2090 debugf1(" Misc device PCI Bus ID:\t%s\n",
2091 pci_name(pvt->misc_f3_ctl));
2096 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2098 pci_dev_put(pvt->addr_f1_ctl);
2099 pci_dev_put(pvt->misc_f3_ctl);
2103 * Retrieve the hardware registers of the memory controller (this includes the
2104 * 'Address Map' and 'Misc' device regs)
2106 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2113 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2114 * those are Read-As-Zero
2116 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2117 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2119 /* check first whether TOP_MEM2 is enabled */
2120 rdmsrl(MSR_K8_SYSCFG, msr_val);
2121 if (msr_val & (1U << 21)) {
2122 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2123 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2125 debugf0(" TOP_MEM2 disabled.\n");
2127 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2129 if (pvt->ops->read_dram_ctl_register)
2130 pvt->ops->read_dram_ctl_register(pvt);
2132 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2134 * Call CPU specific READ function to get the DRAM Base and
2135 * Limit values from the DCT.
2137 pvt->ops->read_dram_base_limit(pvt, dram);
2140 * Only print out debug info on rows with both R and W Enabled.
2141 * Normal processing, compiler should optimize this whole 'if'
2142 * debug output block away.
2144 if (pvt->dram_rw_en[dram] != 0) {
2145 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2146 "DRAM-LIMIT: 0x%016llx\n",
2148 pvt->dram_base[dram],
2149 pvt->dram_limit[dram]);
2151 debugf1(" IntlvEn=%s %s %s "
2152 "IntlvSel=%d DstNode=%d\n",
2153 pvt->dram_IntlvEn[dram] ?
2154 "Enabled" : "Disabled",
2155 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2156 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2157 pvt->dram_IntlvSel[dram],
2158 pvt->dram_DstNode[dram]);
2162 amd64_read_dct_base_mask(pvt);
2164 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2165 amd64_read_dbam_reg(pvt);
2167 amd64_read_pci_cfg(pvt->misc_f3_ctl,
2168 F10_ONLINE_SPARE, &pvt->online_spare);
2170 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2171 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2173 if (boot_cpu_data.x86 >= 0x10) {
2174 if (!dct_ganging_enabled(pvt)) {
2175 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
2176 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
2178 amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp);
2181 if (boot_cpu_data.x86 == 0x10 &&
2182 boot_cpu_data.x86_model > 7 &&
2183 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2189 amd64_dump_misc_regs(pvt);
2193 * NOTE: CPU Revision Dependent code
2196 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2197 * k8 private pointer to -->
2198 * DRAM Bank Address mapping register
2200 * DCL register where dual_channel_active is
2202 * The DBAM register consists of 4 sets of 4 bits each definitions:
2205 * 0-3 CSROWs 0 and 1
2206 * 4-7 CSROWs 2 and 3
2207 * 8-11 CSROWs 4 and 5
2208 * 12-15 CSROWs 6 and 7
2210 * Values range from: 0 to 15
2211 * The meaning of the values depends on CPU revision and dual-channel state,
2212 * see relevant BKDG more info.
2214 * The memory controller provides for total of only 8 CSROWs in its current
2215 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2216 * single channel or two (2) DIMMs in dual channel mode.
2218 * The following code logic collapses the various tables for CSROW based on CPU
2222 * The number of PAGE_SIZE pages on the specified CSROW number it
2226 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2228 u32 cs_mode, nr_pages;
2231 * The math on this doesn't look right on the surface because x/2*4 can
2232 * be simplified to x*2 but this expression makes use of the fact that
2233 * it is integral math where 1/2=0. This intermediate value becomes the
2234 * number of bits to shift the DBAM register to extract the proper CSROW
2237 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2239 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2242 * If dual channel then double the memory size of single channel.
2243 * Channel count is 1 or 2
2245 nr_pages <<= (pvt->channel_count - 1);
2247 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2248 debugf0(" nr_pages= %u channel-count = %d\n",
2249 nr_pages, pvt->channel_count);
2255 * Initialize the array of csrow attribute instances, based on the values
2256 * from pci config hardware registers.
2258 static int amd64_init_csrows(struct mem_ctl_info *mci)
2260 struct csrow_info *csrow;
2261 struct amd64_pvt *pvt;
2262 u64 input_addr_min, input_addr_max, sys_addr;
2265 pvt = mci->pvt_info;
2267 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2269 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2270 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2271 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2274 for (i = 0; i < pvt->cs_count; i++) {
2275 csrow = &mci->csrows[i];
2277 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2278 debugf1("----CSROW %d EMPTY for node %d\n", i,
2283 debugf1("----CSROW %d VALID for MC node %d\n",
2284 i, pvt->mc_node_id);
2287 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2288 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2289 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2290 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2291 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2292 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2293 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2294 /* 8 bytes of resolution */
2296 csrow->mtype = amd64_determine_memory_type(pvt);
2298 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2299 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2300 (unsigned long)input_addr_min,
2301 (unsigned long)input_addr_max);
2302 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2303 (unsigned long)sys_addr, csrow->page_mask);
2304 debugf1(" nr_pages: %u first_page: 0x%lx "
2305 "last_page: 0x%lx\n",
2306 (unsigned)csrow->nr_pages,
2307 csrow->first_page, csrow->last_page);
2310 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2312 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2314 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2315 EDAC_S4ECD4ED : EDAC_SECDED;
2317 csrow->edac_mode = EDAC_NONE;
2323 /* get all cores on this DCT */
2324 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2328 for_each_online_cpu(cpu)
2329 if (amd_get_nb_id(cpu) == nid)
2330 cpumask_set_cpu(cpu, mask);
2333 /* check MCG_CTL on all the cpus on this node */
2334 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2340 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2341 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2346 get_cpus_on_this_dct_cpumask(mask, nid);
2348 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2350 for_each_cpu(cpu, mask) {
2351 struct msr *reg = per_cpu_ptr(msrs, cpu);
2352 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2354 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2356 (nbe ? "enabled" : "disabled"));
2364 free_cpumask_var(mask);
2368 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2370 cpumask_var_t cmask;
2373 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2374 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2379 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2381 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2383 for_each_cpu(cpu, cmask) {
2385 struct msr *reg = per_cpu_ptr(msrs, cpu);
2388 if (reg->l & K8_MSR_MCGCTL_NBE)
2389 pvt->flags.nb_mce_enable = 1;
2391 reg->l |= K8_MSR_MCGCTL_NBE;
2394 * Turn off NB MCE reporting only when it was off before
2396 if (!pvt->flags.nb_mce_enable)
2397 reg->l &= ~K8_MSR_MCGCTL_NBE;
2400 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2402 free_cpumask_var(cmask);
2407 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2409 struct amd64_pvt *pvt = mci->pvt_info;
2410 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2412 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2414 /* turn on UECCn and CECCEn bits */
2415 pvt->old_nbctl = value & mask;
2416 pvt->nbctl_mcgctl_saved = 1;
2419 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2421 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2422 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2425 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2427 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2428 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2429 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2431 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2432 amd64_printk(KERN_WARNING,
2433 "This node reports that DRAM ECC is "
2434 "currently Disabled; ENABLING now\n");
2436 pvt->flags.nb_ecc_prev = 0;
2438 /* Attempt to turn on DRAM ECC Enable */
2439 value |= K8_NBCFG_ECC_ENABLE;
2440 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2442 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2444 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2445 amd64_printk(KERN_WARNING,
2446 "Hardware rejects Enabling DRAM ECC checking\n"
2447 "Check memory DIMM configuration\n");
2449 amd64_printk(KERN_DEBUG,
2450 "Hardware accepted DRAM ECC Enable\n");
2453 pvt->flags.nb_ecc_prev = 1;
2456 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2457 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2458 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2460 pvt->ctl_error_info.nbcfg = value;
2463 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2465 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2467 if (!pvt->nbctl_mcgctl_saved)
2470 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2472 value |= pvt->old_nbctl;
2474 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2476 /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
2477 if (!pvt->flags.nb_ecc_prev) {
2478 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2479 value &= ~K8_NBCFG_ECC_ENABLE;
2480 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2483 /* restore the NB Enable MCGCTL bit */
2484 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2485 amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
2489 * EDAC requires that the BIOS have ECC enabled before taking over the
2490 * processing of ECC errors. This is because the BIOS can properly initialize
2491 * the memory system completely. A command line option allows to force-enable
2492 * hardware ECC later in amd64_enable_ecc_error_reporting().
2494 static const char *ecc_msg =
2495 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2496 " Either enable ECC checking or force module loading by setting "
2497 "'ecc_enable_override'.\n"
2498 " (Note that use of the override may cause unknown side effects.)\n";
2500 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2504 bool nb_mce_en = false;
2506 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2508 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2510 amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
2511 "is currently disabled, set F3x%x[22] (%s).\n",
2512 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2514 amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
2516 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2518 amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
2519 "0x%08x[4] on node %d to enable.\n",
2520 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2522 if (!ecc_enabled || !nb_mce_en) {
2523 if (!ecc_enable_override) {
2524 amd64_printk(KERN_NOTICE, "%s", ecc_msg);
2527 amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
2534 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2535 ARRAY_SIZE(amd64_inj_attrs) +
2538 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2540 static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2542 unsigned int i = 0, j = 0;
2544 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2545 sysfs_attrs[i] = amd64_dbg_attrs[i];
2547 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2548 sysfs_attrs[i] = amd64_inj_attrs[j];
2550 sysfs_attrs[i] = terminator;
2552 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2555 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2557 struct amd64_pvt *pvt = mci->pvt_info;
2559 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2560 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2562 if (pvt->nbcap & K8_NBCAP_SECDED)
2563 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2565 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2566 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2568 mci->edac_cap = amd64_determine_edac_cap(pvt);
2569 mci->mod_name = EDAC_MOD_STR;
2570 mci->mod_ver = EDAC_AMD64_VERSION;
2571 mci->ctl_name = pvt->ctl_name;
2572 mci->dev_name = pci_name(pvt->dram_f2_ctl);
2573 mci->ctl_page_to_phys = NULL;
2575 /* memory scrubber interface */
2576 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2577 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2581 * returns a pointer to the family descriptor on success, NULL otherwise.
2583 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2585 u8 fam = boot_cpu_data.x86;
2586 struct amd64_family_type *fam_type = NULL;
2590 fam_type = &amd64_family_types[K8_CPUS];
2591 pvt->ctl_name = fam_type->ctl_name;
2592 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2595 fam_type = &amd64_family_types[F10_CPUS];
2596 pvt->ctl_name = fam_type->ctl_name;
2597 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2601 amd64_printk(KERN_ERR, "Unsupported family!\n");
2605 amd64_printk(KERN_INFO, "%s %s detected.\n", pvt->ctl_name,
2607 (pvt->ext_model >= K8_REV_F ? "revF or later"
2608 : "revE or earlier")
2614 * Init stuff for this DRAM Controller device.
2616 * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
2617 * Space feature MUST be enabled on ALL Processors prior to actually reading
2618 * from the ECS registers. Since the loading of the module can occur on any
2619 * 'core', and cores don't 'see' all the other processors ECS data when the
2620 * others are NOT enabled. Our solution is to first enable ECS access in this
2621 * routine on all processors, gather some data in a amd64_pvt structure and
2622 * later come back in a finish-setup function to perform that final
2623 * initialization. See also amd64_init_2nd_stage() for that.
2625 static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
2628 struct amd64_pvt *pvt = NULL;
2629 struct amd64_family_type *fam_type = NULL;
2633 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2637 pvt->mc_node_id = get_node_id(dram_f2_ctl);
2639 pvt->dram_f2_ctl = dram_f2_ctl;
2640 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2641 pvt->mc_type_index = mc_type_index;
2642 pvt->ops = family_ops(mc_type_index);
2645 fam_type = amd64_per_family_init(pvt);
2650 * We have the dram_f2_ctl device as an argument, now go reserve its
2651 * sibling devices from the PCI system.
2654 err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
2659 err = amd64_check_ecc_enabled(pvt);
2664 * Key operation here: setup of HW prior to performing ops on it. Some
2665 * setup is required to access ECS data. After this is performed, the
2666 * 'teardown' function must be called upon error and normal exit paths.
2668 if (boot_cpu_data.x86 >= 0x10)
2672 * Save the pointer to the private data for use in 2nd initialization
2675 pvt_lookup[pvt->mc_node_id] = pvt;
2680 amd64_free_mc_sibling_devices(pvt);
2690 * This is the finishing stage of the init code. Needs to be performed after all
2691 * MCs' hardware have been prepped for accessing extended config space.
2693 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2695 int node_id = pvt->mc_node_id;
2696 struct mem_ctl_info *mci;
2699 amd64_read_mc_registers(pvt);
2702 * We need to determine how many memory channels there are. Then use
2703 * that information for calculating the size of the dynamic instance
2704 * tables in the 'mci' structure
2706 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2707 if (pvt->channel_count < 0)
2711 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2715 mci->pvt_info = pvt;
2717 mci->dev = &pvt->dram_f2_ctl->dev;
2718 amd64_setup_mci_misc_attributes(mci);
2720 if (amd64_init_csrows(mci))
2721 mci->edac_cap = EDAC_FLAG_NONE;
2723 amd64_enable_ecc_error_reporting(mci);
2724 amd64_set_mc_sysfs_attributes(mci);
2727 if (edac_mc_add_mc(mci)) {
2728 debugf1("failed edac_mc_add_mc()\n");
2732 mci_lookup[node_id] = mci;
2733 pvt_lookup[node_id] = NULL;
2735 /* register stuff with EDAC MCE */
2736 if (report_gart_errors)
2737 amd_report_gart_errors(true);
2739 amd_register_ecc_decoder(amd64_decode_bus_error);
2747 debugf0("failure to init 2nd stage: ret=%d\n", ret);
2749 amd64_restore_ecc_error_reporting(pvt);
2751 if (boot_cpu_data.x86 > 0xf)
2752 amd64_teardown(pvt);
2754 amd64_free_mc_sibling_devices(pvt);
2756 kfree(pvt_lookup[pvt->mc_node_id]);
2757 pvt_lookup[node_id] = NULL;
2763 static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
2764 const struct pci_device_id *mc_type)
2768 debugf0("(MC node=%d)\n", get_node_id(pdev));
2770 ret = pci_enable_device(pdev);
2774 ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
2777 debugf0("ret=%d\n", ret);
2782 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2784 struct mem_ctl_info *mci;
2785 struct amd64_pvt *pvt;
2787 /* Remove from EDAC CORE tracking list */
2788 mci = edac_mc_del_mc(&pdev->dev);
2792 pvt = mci->pvt_info;
2794 amd64_restore_ecc_error_reporting(pvt);
2796 if (boot_cpu_data.x86 > 0xf)
2797 amd64_teardown(pvt);
2799 amd64_free_mc_sibling_devices(pvt);
2801 /* unregister from EDAC MCE */
2802 amd_report_gart_errors(false);
2803 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2805 /* Free the EDAC CORE resources */
2806 mci->pvt_info = NULL;
2807 mci_lookup[pvt->mc_node_id] = NULL;
2814 * This table is part of the interface for loading drivers for PCI devices. The
2815 * PCI core identifies what devices are on a system during boot, and then
2816 * inquiry this table to see if this driver is for a given device found.
2818 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2820 .vendor = PCI_VENDOR_ID_AMD,
2821 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2822 .subvendor = PCI_ANY_ID,
2823 .subdevice = PCI_ANY_ID,
2826 .driver_data = K8_CPUS
2829 .vendor = PCI_VENDOR_ID_AMD,
2830 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2831 .subvendor = PCI_ANY_ID,
2832 .subdevice = PCI_ANY_ID,
2835 .driver_data = F10_CPUS
2839 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2841 static struct pci_driver amd64_pci_driver = {
2842 .name = EDAC_MOD_STR,
2843 .probe = amd64_init_one_instance,
2844 .remove = __devexit_p(amd64_remove_one_instance),
2845 .id_table = amd64_pci_table,
2848 static void amd64_setup_pci_device(void)
2850 struct mem_ctl_info *mci;
2851 struct amd64_pvt *pvt;
2856 mci = mci_lookup[0];
2859 pvt = mci->pvt_info;
2861 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
2864 if (!amd64_ctl_pci) {
2865 pr_warning("%s(): Unable to create PCI control\n",
2868 pr_warning("%s(): PCI error report via EDAC not set\n",
2874 static int __init amd64_edac_init(void)
2876 int nb, err = -ENODEV;
2877 bool load_ok = false;
2879 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2883 if (amd_cache_northbridges() < 0)
2886 msrs = msrs_alloc();
2890 err = pci_register_driver(&amd64_pci_driver);
2895 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
2896 * amd64_pvt structs. These will be used in the 2nd stage init function
2897 * to finish initialization of the MC instances.
2900 for (nb = 0; nb < amd_nb_num(); nb++) {
2901 if (!pvt_lookup[nb])
2904 err = amd64_init_2nd_stage(pvt_lookup[nb]);
2912 amd64_setup_pci_device();
2917 pci_unregister_driver(&amd64_pci_driver);
2925 static void __exit amd64_edac_exit(void)
2928 edac_pci_release_generic_ctl(amd64_ctl_pci);
2930 pci_unregister_driver(&amd64_pci_driver);
2936 module_init(amd64_edac_init);
2937 module_exit(amd64_edac_exit);
2939 MODULE_LICENSE("GPL");
2940 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2941 "Dave Peterson, Thayne Harbaugh");
2942 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2943 EDAC_AMD64_VERSION);
2945 module_param(edac_op_state, int, 0444);
2946 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");