amd64_edac: Fix DCT argument type
[pandora-kernel.git] / drivers / edac / amd64_edac.c
index 495b4d5..cc4f887 100644 (file)
@@ -24,51 +24,6 @@ static atomic_t drv_instances = ATOMIC_INIT(0);
 static struct mem_ctl_info **mcis;
 static struct ecc_settings **ecc_stngs;
 
-/*
- * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
- * later.
- */
-static int ddr2_dbam_revCG[] = {
-                          [0]          = 32,
-                          [1]          = 64,
-                          [2]          = 128,
-                          [3]          = 256,
-                          [4]          = 512,
-                          [5]          = 1024,
-                          [6]          = 2048,
-};
-
-static int ddr2_dbam_revD[] = {
-                          [0]          = 32,
-                          [1]          = 64,
-                          [2 ... 3]    = 128,
-                          [4]          = 256,
-                          [5]          = 512,
-                          [6]          = 256,
-                          [7]          = 512,
-                          [8 ... 9]    = 1024,
-                          [10]         = 2048,
-};
-
-static int ddr2_dbam[] = { [0]         = 128,
-                          [1]          = 256,
-                          [2 ... 4]    = 512,
-                          [5 ... 6]    = 1024,
-                          [7 ... 8]    = 2048,
-                          [9 ... 10]   = 4096,
-                          [11]         = 8192,
-};
-
-static int ddr3_dbam[] = { [0]         = -1,
-                          [1]          = 256,
-                          [2]          = 512,
-                          [3 ... 4]    = -1,
-                          [5 ... 6]    = 1024,
-                          [7 ... 8]    = 2048,
-                          [9 ... 10]   = 4096,
-                          [11]         = 8192,
-};
-
 /*
  * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
  * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
@@ -76,8 +31,6 @@ static int ddr3_dbam[] = { [0]                = -1,
  *
  *FIXME: Produce a better mapping/linearisation.
  */
-
-
 struct scrubrate {
        u32 scrubval;           /* bit pattern for scrub rate */
        u32 bandwidth;          /* bandwidth consumed (bytes/sec) */
@@ -229,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
 
        scrubval = scrubrates[i].scrubval;
 
-       pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+       pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
 
        if (scrubval)
                return scrubrates[i].bandwidth;
@@ -240,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
 {
        struct amd64_pvt *pvt = mci->pvt_info;
+       u32 min_scrubrate = 0x5;
 
-       return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
+       if (boot_cpu_data.x86 == 0xf)
+               min_scrubrate = 0x0;
+
+       return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
 }
 
 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
@@ -250,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
        u32 scrubval = 0;
        int i, retval = -EINVAL;
 
-       amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
+       amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
 
        scrubval = scrubval & 0x001F;
 
@@ -269,7 +226,8 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
  * returns true if the SysAddr given by sys_addr matches the
  * DRAM base/limit associated with node_id
  */
-static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, int nid)
+static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
+                                  unsigned nid)
 {
        u64 addr;
 
@@ -295,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
                                                u64 sys_addr)
 {
        struct amd64_pvt *pvt;
-       int node_id;
+       unsigned node_id;
        u32 intlv_en, bits;
 
        /*
@@ -345,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
        }
 
 found:
-       return edac_mc_find(node_id);
+       return edac_mc_find((int)node_id);
 
 err_no_match:
        debugf2("sys_addr 0x%lx doesn't match any node\n",
@@ -393,6 +351,9 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
 #define for_each_chip_select(i, dct, pvt) \
        for (i = 0; i < pvt->csels[dct].b_cnt; i++)
 
+#define chip_select_base(i, dct, pvt) \
+       pvt->csels[dct].csbases[i]
+
 #define for_each_chip_select_mask(i, dct, pvt) \
        for (i = 0; i < pvt->csels[dct].m_cnt; i++)
 
@@ -642,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
 {
        struct amd64_pvt *pvt;
-       int node_id, intlv_shift;
+       unsigned node_id, intlv_shift;
        u64 bits, dram_addr;
        u32 intlv_sel;
 
@@ -656,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
         */
        pvt = mci->pvt_info;
        node_id = pvt->mc_node_id;
-       BUG_ON((node_id < 0) || (node_id > 7));
 
-       intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
+       BUG_ON(node_id > 7);
 
+       intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
        if (intlv_shift == 0) {
                debugf1("    InputAddr 0x%lx translates to DramAddr of "
                        "same value\n", (unsigned long)input_addr);
@@ -788,11 +749,6 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
 
 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
 
-static u16 extract_syndrome(struct err_regs *err)
-{
-       return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
-}
-
 /*
  * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
  * are ECC capable.
@@ -812,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
        return edac_cap;
 }
 
-
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
 
 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
 {
@@ -843,11 +798,11 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
        debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
        debugf1("  NB two channel DRAM capable: %s\n",
-               (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
+               (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
 
        debugf1("  ECC capable: %s, ChipKill ECC capable: %s\n",
-               (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
-               (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
+               (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+               (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
 
        amd64_dump_dramcfg_low(pvt->dclr0, 0);
 
@@ -861,15 +816,15 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
 
        debugf1("  DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
 
-       amd64_debug_display_dimm_sizes(0, pvt);
+       amd64_debug_display_dimm_sizes(pvt, 0);
 
        /* everything below this point is Fam10h and above */
        if (boot_cpu_data.x86 == 0xf)
                return;
 
-       amd64_debug_display_dimm_sizes(1, pvt);
+       amd64_debug_display_dimm_sizes(pvt, 1);
 
-       amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
+       amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
 
        /* Only if NOT ganged does dclr1 have valid info */
        if (!dct_ganging_enabled(pvt))
@@ -900,8 +855,8 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
        prep_chip_selects(pvt);
 
        for_each_chip_select(cs, 0, pvt) {
-               u32 reg0   = DCSB0 + (cs * 4);
-               u32 reg1   = DCSB1 + (cs * 4);
+               int reg0   = DCSB0 + (cs * 4);
+               int reg1   = DCSB1 + (cs * 4);
                u32 *base0 = &pvt->csels[0].csbases[cs];
                u32 *base1 = &pvt->csels[1].csbases[cs];
 
@@ -918,8 +873,8 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
        }
 
        for_each_chip_select_mask(cs, 0, pvt) {
-               u32 reg0   = DCSM0 + (cs * 4);
-               u32 reg1   = DCSM1 + (cs * 4);
+               int reg0   = DCSM0 + (cs * 4);
+               int reg1   = DCSM1 + (cs * 4);
                u32 *mask0 = &pvt->csels[0].csmasks[cs];
                u32 *mask1 = &pvt->csels[1].csmasks[cs];
 
@@ -964,7 +919,7 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
 
        if (pvt->ext_model >= K8_REV_F)
                /* RevF (NPT) and later */
-               flag = pvt->dclr0 & F10_WIDTH_128;
+               flag = pvt->dclr0 & WIDTH_128;
        else
                /* RevE and earlier */
                flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -975,17 +930,23 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
        return (flag) ? 2 : 1;
 }
 
-/* Extract the ERROR ADDRESS for the K8 CPUs */
-static u64 k8_get_error_address(struct mem_ctl_info *mci,
-                               struct err_regs *info)
+/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
+static u64 get_error_address(struct mce *m)
 {
-       return (((u64) (info->nbeah & 0xff)) << 32) +
-                       (info->nbeal & ~0x03);
+       u8 start_bit = 1;
+       u8 end_bit   = 47;
+
+       if (boot_cpu_data.x86 == 0xf) {
+               start_bit = 3;
+               end_bit   = 39;
+       }
+
+       return m->addr & GENMASK(start_bit, end_bit);
 }
 
 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
 {
-       u32 off = range << 3;
+       int off = range << 3;
 
        amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off,  &pvt->ranges[range].base.lo);
        amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
@@ -1000,18 +961,16 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
        amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
 }
 
-static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
-                                   struct err_regs *err_info, u64 sys_addr)
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+                                   u16 syndrome)
 {
        struct mem_ctl_info *src_mci;
+       struct amd64_pvt *pvt = mci->pvt_info;
        int channel, csrow;
        u32 page, offset;
-       u16 syndrome;
-
-       syndrome = extract_syndrome(err_info);
 
        /* CHIPKILL enabled */
-       if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
+       if (pvt->nbcfg & NBCFG_CHIPKILL) {
                channel = get_channel_from_ecc_syndrome(mci, syndrome);
                if (channel < 0) {
                        /*
@@ -1060,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
        }
 }
 
-static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr2_cs_size(unsigned i, bool dct_width)
 {
-       int *dbam_map;
+       unsigned shift = 0;
 
-       if (pvt->ext_model >= K8_REV_F)
-               dbam_map = ddr2_dbam;
-       else if (pvt->ext_model >= K8_REV_D)
-               dbam_map = ddr2_dbam_revD;
+       if (i <= 2)
+               shift = i;
+       else if (!(i & 0x1))
+               shift = i >> 1;
        else
-               dbam_map = ddr2_dbam_revCG;
+               shift = (i + 1) >> 1;
 
-       return dbam_map[cs_mode];
+       return 128 << (shift + !!dct_width);
+}
+
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+                                 unsigned cs_mode)
+{
+       u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+       if (pvt->ext_model >= K8_REV_F) {
+               WARN_ON(cs_mode > 11);
+               return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
+       }
+       else if (pvt->ext_model >= K8_REV_D) {
+               WARN_ON(cs_mode > 10);
+
+               if (cs_mode == 3 || cs_mode == 8)
+                       return 32 << (cs_mode - 1);
+               else
+                       return 32 << cs_mode;
+       }
+       else {
+               WARN_ON(cs_mode > 6);
+               return 32 << cs_mode;
+       }
 }
 
 /*
@@ -1082,15 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
  * Pass back:
  *     contents of the DCL0_LOW register
  */
-static int f10_early_channel_count(struct amd64_pvt *pvt)
+static int f1x_early_channel_count(struct amd64_pvt *pvt)
 {
        int i, j, channels = 0;
 
-       /* If we are in 128 bit mode, then we are using 2 channels */
-       if (pvt->dclr0 & F10_WIDTH_128) {
-               channels = 2;
-               return channels;
-       }
+       /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
+       if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+               return 2;
 
        /*
         * Need to check if in unganged mode: In such, there are 2 channels,
@@ -1126,35 +1106,64 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
        return channels;
 }
 
-static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr3_cs_size(unsigned i, bool dct_width)
 {
-       int *dbam_map;
+       unsigned shift = 0;
+       int cs_size = 0;
 
-       if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
-               dbam_map = ddr3_dbam;
+       if (i == 0 || i == 3 || i == 4)
+               cs_size = -1;
+       else if (i <= 2)
+               shift = i;
+       else if (i == 12)
+               shift = 7;
+       else if (!(i & 0x1))
+               shift = i >> 1;
        else
-               dbam_map = ddr2_dbam;
+               shift = (i + 1) >> 1;
+
+       if (cs_size != -1)
+               cs_size = (128 * (1 << !!dct_width)) << shift;
 
-       return dbam_map[cs_mode];
+       return cs_size;
 }
 
-static u64 f10_get_error_address(struct mem_ctl_info *mci,
-                       struct err_regs *info)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+                                  unsigned cs_mode)
 {
-       return (((u64) (info->nbeah & 0xffff)) << 32) +
-                       (info->nbeal & ~0x01);
+       u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+       WARN_ON(cs_mode > 11);
+
+       if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+               return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
+       else
+               return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
+}
+
+/*
+ * F15h supports only 64bit DCT interfaces
+ */
+static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+                                  unsigned cs_mode)
+{
+       WARN_ON(cs_mode > 12);
+
+       return ddr3_cs_size(cs_mode, false);
 }
 
-static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+static void read_dram_ctl_register(struct amd64_pvt *pvt)
 {
 
+       if (boot_cpu_data.x86 == 0xf)
+               return;
+
        if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
                debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
                        pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
 
-               debugf0("  mode: %s, All DCTs on: %s\n",
-                       (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
-                       (dct_dram_enabled(pvt) ? "yes"   : "no"));
+               debugf0("  DCTs operate in %s mode.\n",
+                       (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
 
                if (!dct_ganging_enabled(pvt))
                        debugf0("  Address range split per DCT: %s\n",
@@ -1178,10 +1187,10 @@ static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
  * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
  * Interleaving Modes.
  */
-static u8 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
                                bool hi_range_sel, u8 intlv_en)
 {
-       u32 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
+       u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
 
        if (dct_ganging_enabled(pvt))
                return 0;
@@ -1216,14 +1225,13 @@ static u8 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
 }
 
 /* Convert the sys_addr to the normalized DCT address */
-static u64 f10_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
                                 u64 sys_addr, bool hi_rng,
                                 u32 dct_sel_base_addr)
 {
        u64 chan_off;
        u64 dram_base           = get_dram_base(pvt, range);
        u64 hole_off            = f10_dhar_offset(pvt);
-       u32 hole_valid          = dhar_valid(pvt);
        u64 dct_sel_base_off    = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
 
        if (hi_rng) {
@@ -1240,7 +1248,7 @@ static u64 f10_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
                 */
                if ((!(dct_sel_base_addr >> 16) ||
                     dct_sel_base_addr < dhar_base(pvt)) &&
-                   hole_valid &&
+                   dhar_valid(pvt) &&
                    (sys_addr >= BIT_64(32)))
                        chan_off = hole_off;
                else
@@ -1255,7 +1263,7 @@ static u64 f10_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
                 * else
                 *      remove dram base to normalize to DCT address
                 */
-               if (hole_valid && (sys_addr >= BIT_64(32)))
+               if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
                        chan_off = hole_off;
                else
                        chan_off = dram_base;
@@ -1264,30 +1272,23 @@ static u64 f10_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
        return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
 }
 
-/* Hack for the time being - Can we get this from BIOS?? */
-#define        CH0SPARE_RANK   0
-#define        CH1SPARE_RANK   1
-
 /*
  * checks if the csrow passed in is marked as SPARED, if so returns the new
  * spare row
  */
 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
 {
-       u32 swap_done;
-       u32 bad_dram_cs;
+       int tmp_cs;
 
-       /* Depending on channel, isolate respective SPARING info */
-       if (dct) {
-               swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
-               bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
-               if (swap_done && (csrow == bad_dram_cs))
-                       csrow = CH1SPARE_RANK;
-       } else {
-               swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
-               bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
-               if (swap_done && (csrow == bad_dram_cs))
-                       csrow = CH0SPARE_RANK;
+       if (online_spare_swap_done(pvt, dct) &&
+           csrow == online_spare_bad_dramcs(pvt, dct)) {
+
+               for_each_chip_select(tmp_cs, dct, pvt) {
+                       if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
+                               csrow = tmp_cs;
+                               break;
+                       }
+               }
        }
        return csrow;
 }
@@ -1300,7 +1301,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
  *     -EINVAL:  NOT FOUND
  *     0..csrow = Chip-Select Row
  */
-static int f10_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
 {
        struct mem_ctl_info *mci;
        struct amd64_pvt *pvt;
@@ -1341,13 +1342,49 @@ static int f10_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
        return cs_found;
 }
 
+/*
+ * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
+ * swapped with a region located at the bottom of memory so that the GPU can use
+ * the interleaved region and thus two channels.
+ */
+static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
+{
+       u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
+
+       if (boot_cpu_data.x86 == 0x10) {
+               /* only revC3 and revE have that feature */
+               if (boot_cpu_data.x86_model < 4 ||
+                   (boot_cpu_data.x86_model < 0xa &&
+                    boot_cpu_data.x86_mask < 3))
+                       return sys_addr;
+       }
+
+       amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
+
+       if (!(swap_reg & 0x1))
+               return sys_addr;
+
+       swap_base       = (swap_reg >> 3) & 0x7f;
+       swap_limit      = (swap_reg >> 11) & 0x7f;
+       rgn_size        = (swap_reg >> 20) & 0x7f;
+       tmp_addr        = sys_addr >> 27;
+
+       if (!(sys_addr >> 34) &&
+           (((tmp_addr >= swap_base) &&
+            (tmp_addr <= swap_limit)) ||
+            (tmp_addr < rgn_size)))
+               return sys_addr ^ (u64)swap_base << 27;
+
+       return sys_addr;
+}
+
 /* For a given @dram_range, check if @sys_addr falls within it. */
-static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
+static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
                                  u64 sys_addr, int *nid, int *chan_sel)
 {
        int cs_found = -EINVAL;
        u64 chan_addr;
-       u32 tmp, dct_sel_base;
+       u32 dct_sel_base;
        u8 channel;
        bool high_range = false;
 
@@ -1358,9 +1395,22 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
        debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
                range, sys_addr, get_dram_limit(pvt, range));
 
+       if (dhar_valid(pvt) &&
+           dhar_base(pvt) <= sys_addr &&
+           sys_addr < BIT_64(32)) {
+               amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+                           sys_addr);
+               return -EINVAL;
+       }
+
        if (intlv_en &&
-           (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+           (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
+               amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
+                          intlv_en, intlv_sel);
                return -EINVAL;
+       }
+
+       sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
 
        dct_sel_base = dct_sel_baseaddr(pvt);
 
@@ -1373,31 +1423,39 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
           ((sys_addr >> 27) >= (dct_sel_base >> 11)))
                high_range = true;
 
-       channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+       channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
 
-       chan_addr = f10_get_norm_dct_addr(pvt, range, sys_addr,
+       chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
                                          high_range, dct_sel_base);
 
-       /* remove Node ID (in case of node interleaving) */
-       tmp = chan_addr & 0xFC0;
-
-       chan_addr = ((chan_addr >> hweight8(intlv_en)) & GENMASK(12, 47)) | tmp;
+       /* Remove node interleaving, see F1x120 */
+       if (intlv_en)
+               chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
+                           (chan_addr & 0xfff);
 
-       /* remove channel interleave and hash */
+       /* remove channel interleave */
        if (dct_interleave_enabled(pvt) &&
           !dct_high_range_enabled(pvt) &&
           !dct_ganging_enabled(pvt)) {
-               if (dct_sel_interleave_addr(pvt) != 1)
-                       chan_addr = (chan_addr >> 1) & GENMASK(6, 63);
-               else {
-                       tmp = chan_addr & 0xFC0;
-                       chan_addr = ((chan_addr & GENMASK(14, 63)) >> 1) | tmp;
-               }
+
+               if (dct_sel_interleave_addr(pvt) != 1) {
+                       if (dct_sel_interleave_addr(pvt) == 0x3)
+                               /* hash 9 */
+                               chan_addr = ((chan_addr >> 10) << 9) |
+                                            (chan_addr & 0x1ff);
+                       else
+                               /* A[6] or hash 6 */
+                               chan_addr = ((chan_addr >> 7) << 6) |
+                                            (chan_addr & 0x3f);
+               } else
+                       /* A[12] */
+                       chan_addr = ((chan_addr >> 13) << 12) |
+                                    (chan_addr & 0xfff);
        }
 
-       debugf1("   (ChannelAddrLong=0x%llx)\n", chan_addr);
+       debugf1("   Normalized DCT addr: 0x%llx\n", chan_addr);
 
-       cs_found = f10_lookup_addr_in_dct(chan_addr, node_id, channel);
+       cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
 
        if (cs_found >= 0) {
                *nid = node_id;
@@ -1406,10 +1464,11 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
        return cs_found;
 }
 
-static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
                                       int *node, int *chan_sel)
 {
-       int range, cs_found = -EINVAL;
+       int cs_found = -EINVAL;
+       unsigned range;
 
        for (range = 0; range < DRAM_RANGES; range++) {
 
@@ -1419,7 +1478,7 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
                if ((get_dram_base(pvt, range)  <= sys_addr) &&
                    (get_dram_limit(pvt, range) >= sys_addr)) {
 
-                       cs_found = f10_match_to_this_node(pvt, range,
+                       cs_found = f1x_match_to_this_node(pvt, range,
                                                          sys_addr, node,
                                                          chan_sel);
                        if (cs_found >= 0)
@@ -1436,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
  * The @sys_addr is usually an error address received from the hardware
  * (MCX_ADDR).
  */
-static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
-                                    struct err_regs *err_info,
-                                    u64 sys_addr)
+static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+                                    u16 syndrome)
 {
        struct amd64_pvt *pvt = mci->pvt_info;
        u32 page, offset;
        int nid, csrow, chan = 0;
-       u16 syndrome;
 
-       csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+       csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
 
        if (csrow < 0) {
                edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
@@ -1454,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
 
        error_address_to_page_and_offset(sys_addr, &page, &offset);
 
-       syndrome = extract_syndrome(err_info);
-
        /*
         * We need the syndromes for channel detection only when we're
         * ganged. Otherwise @chan should already contain the channel at
         * this point.
         */
-       if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
+       if (dct_ganging_enabled(pvt))
                chan = get_channel_from_ecc_syndrome(mci, syndrome);
 
        if (chan >= 0)
@@ -1480,14 +1535,14 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
  * debug routine to display the memory sizes of all logical DIMMs and its
  * CSROWs
  */
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
 {
        int dimm, size0, size1, factor = 0;
        u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
        u32 dbam  = ctrl ? pvt->dbam1 : pvt->dbam0;
 
        if (boot_cpu_data.x86 == 0xf) {
-               if (pvt->dclr0 & F10_WIDTH_128)
+               if (pvt->dclr0 & WIDTH_128)
                        factor = 1;
 
                /* K8 families < revF not supported yet */
@@ -1510,11 +1565,13 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
 
                size0 = 0;
                if (dcsb[dimm*2] & DCSB_CS_ENABLE)
-                       size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+                       size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+                                                    DBAM_DIMM(dimm, dbam));
 
                size1 = 0;
                if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
-                       size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+                       size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+                                                    DBAM_DIMM(dimm, dbam));
 
                amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
                                dimm * 2,     size0 << factor,
@@ -1529,7 +1586,6 @@ static struct amd64_family_type amd64_family_types[] = {
                .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
                .ops = {
                        .early_channel_count    = k8_early_channel_count,
-                       .get_error_address      = k8_get_error_address,
                        .map_sysaddr_to_csrow   = k8_map_sysaddr_to_csrow,
                        .dbam_to_cs             = k8_dbam_to_chip_select,
                        .read_dct_pci_cfg       = k8_read_dct_pci_cfg,
@@ -1540,17 +1596,20 @@ static struct amd64_family_type amd64_family_types[] = {
                .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
                .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
                .ops = {
-                       .early_channel_count    = f10_early_channel_count,
-                       .get_error_address      = f10_get_error_address,
-                       .read_dram_ctl_register = f10_read_dram_ctl_register,
-                       .map_sysaddr_to_csrow   = f10_map_sysaddr_to_csrow,
+                       .early_channel_count    = f1x_early_channel_count,
+                       .map_sysaddr_to_csrow   = f1x_map_sysaddr_to_csrow,
                        .dbam_to_cs             = f10_dbam_to_chip_select,
                        .read_dct_pci_cfg       = f10_read_dct_pci_cfg,
                }
        },
        [F15_CPUS] = {
                .ctl_name = "F15h",
+               .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
+               .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
                .ops = {
+                       .early_channel_count    = f1x_early_channel_count,
+                       .map_sysaddr_to_csrow   = f1x_map_sysaddr_to_csrow,
+                       .dbam_to_cs             = f15_dbam_to_chip_select,
                        .read_dct_pci_cfg       = f15_read_dct_pci_cfg,
                }
        },
@@ -1721,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
        struct amd64_pvt *pvt = mci->pvt_info;
        int err_sym = -1;
 
-       if (pvt->syn_type == 8)
+       if (pvt->ecc_sym_sz == 8)
                err_sym = decode_syndrome(syndrome, x8_vectors,
                                          ARRAY_SIZE(x8_vectors),
-                                         pvt->syn_type);
-       else if (pvt->syn_type == 4)
+                                         pvt->ecc_sym_sz);
+       else if (pvt->ecc_sym_sz == 4)
                err_sym = decode_syndrome(syndrome, x4_vectors,
                                          ARRAY_SIZE(x4_vectors),
-                                         pvt->syn_type);
+                                         pvt->ecc_sym_sz);
        else {
-               amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
+               amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
                return err_sym;
        }
 
-       return map_err_sym_to_channel(err_sym, pvt->syn_type);
+       return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
 }
 
 /*
  * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
  * ADDRESS and process.
  */
-static void amd64_handle_ce(struct mem_ctl_info *mci,
-                           struct err_regs *info)
+static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
 {
        struct amd64_pvt *pvt = mci->pvt_info;
        u64 sys_addr;
+       u16 syndrome;
 
        /* Ensure that the Error Address is VALID */
-       if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+       if (!(m->status & MCI_STATUS_ADDRV)) {
                amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
                edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
                return;
        }
 
-       sys_addr = pvt->ops->get_error_address(mci, info);
+       sys_addr = get_error_address(m);
+       syndrome = extract_syndrome(m->status);
 
        amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
 
-       pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
+       pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
 }
 
 /* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci,
-                           struct err_regs *info)
+static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
 {
-       struct amd64_pvt *pvt = mci->pvt_info;
        struct mem_ctl_info *log_mci, *src_mci = NULL;
        int csrow;
        u64 sys_addr;
@@ -1773,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
 
        log_mci = mci;
 
-       if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+       if (!(m->status & MCI_STATUS_ADDRV)) {
                amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
                edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
                return;
        }
 
-       sys_addr = pvt->ops->get_error_address(mci, info);
+       sys_addr = get_error_address(m);
 
        /*
         * Find out which node the error address belongs to. This may be
@@ -1807,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
 }
 
 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
-                                           struct err_regs *info)
+                                           struct mce *m)
 {
-       u16 ec = EC(info->nbsl);
-       u8 xec = XEC(info->nbsl, 0x1f);
-       int ecc_type = (info->nbsh >> 13) & 0x3;
+       u16 ec = EC(m->status);
+       u8 xec = XEC(m->status, 0x1f);
+       u8 ecc_type = (m->status >> 45) & 0x3;
 
        /* Bail early out if this was an 'observed' error */
-       if (PP(ec) == K8_NBSL_PP_OBS)
+       if (PP(ec) == NBSL_PP_OBS)
                return;
 
        /* Do only ECC errors */
@@ -1822,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
                return;
 
        if (ecc_type == 2)
-               amd64_handle_ce(mci, info);
+               amd64_handle_ce(mci, m);
        else if (ecc_type == 1)
-               amd64_handle_ue(mci, info);
+               amd64_handle_ue(mci, m);
 }
 
 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
 {
        struct mem_ctl_info *mci = mcis[node_id];
-       struct err_regs regs;
-
-       regs.nbsl  = (u32) m->status;
-       regs.nbsh  = (u32)(m->status >> 32);
-       regs.nbeal = (u32) m->addr;
-       regs.nbeah = (u32)(m->addr >> 32);
-       regs.nbcfg = nbcfg;
-
-       __amd64_decode_bus_error(mci, &regs);
-
-       /*
-        * Check the UE bit of the NB status high register, if set generate some
-        * logs. If NOT a GART error, then process the event as a NO-INFO event.
-        * If it was a GART error, skip that process.
-        *
-        * FIXME: this should go somewhere else, if at all.
-        */
-       if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
-               edac_mc_handle_ue_no_info(mci, "UE bit is set");
 
+       __amd64_decode_bus_error(mci, m);
 }
 
 /*
@@ -1898,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
  */
 static void read_mc_regs(struct amd64_pvt *pvt)
 {
+       struct cpuinfo_x86 *c = &boot_cpu_data;
        u64 msr_val;
        u32 tmp;
-       int range;
+       unsigned range;
 
        /*
         * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -1917,10 +1958,9 @@ static void read_mc_regs(struct amd64_pvt *pvt)
        } else
                debugf0("  TOP_MEM2 disabled.\n");
 
-       amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
+       amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
 
-       if (pvt->ops->read_dram_ctl_register)
-               pvt->ops->read_dram_ctl_register(pvt);
+       read_dram_ctl_register(pvt);
 
        for (range = 0; range < DRAM_RANGES; range++) {
                u8 rw;
@@ -1960,19 +2000,16 @@ static void read_mc_regs(struct amd64_pvt *pvt)
                amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
        }
 
-       if (boot_cpu_data.x86 >= 0x10) {
+       pvt->ecc_sym_sz = 4;
+
+       if (c->x86 >= 0x10) {
                amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
                amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
-       }
-
-       if (boot_cpu_data.x86 == 0x10 &&
-           boot_cpu_data.x86_model > 7 &&
-           /* F3x180[EccSymbolSize]=1 => x8 symbols */
-           tmp & BIT(25))
-               pvt->syn_type = 8;
-       else
-               pvt->syn_type = 4;
 
+               /* F10h, revD and later can do x8 ECC too */
+               if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+                       pvt->ecc_sym_sz = 8;
+       }
        dump_misc_regs(pvt);
 }
 
@@ -2010,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
  *     encompasses
  *
  */
-static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
 {
        u32 cs_mode, nr_pages;
 
@@ -2023,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
         */
        cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
 
-       nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
+       nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
 
        /*
         * If dual channel then double the memory size of single channel.
@@ -2050,14 +2087,13 @@ static int init_csrows(struct mem_ctl_info *mci)
        u32 val;
        int i, empty = 1;
 
-       amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
+       amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
 
        pvt->nbcfg = val;
-       pvt->ctl_error_info.nbcfg = val;
 
        debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
                pvt->mc_node_id, val,
-               !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
+               !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
 
        for_each_chip_select(i, 0, pvt) {
                csrow = &mci->csrows[i];
@@ -2072,7 +2108,7 @@ static int init_csrows(struct mem_ctl_info *mci)
                        i, pvt->mc_node_id);
 
                empty = 0;
-               csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+               csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
                find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
                sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
                csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
@@ -2099,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci)
                /*
                 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
                 */
-               if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+               if (pvt->nbcfg & NBCFG_ECC_ENABLE)
                        csrow->edac_mode =
-                           (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+                           (pvt->nbcfg & NBCFG_CHIPKILL) ?
                            EDAC_S4ECD4ED : EDAC_SECDED;
                else
                        csrow->edac_mode = EDAC_NONE;
@@ -2111,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci)
 }
 
 /* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
 {
        int cpu;
 
@@ -2121,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
 }
 
 /* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
 {
        cpumask_var_t mask;
        int cpu, nbe;
@@ -2138,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
 
        for_each_cpu(cpu, mask) {
                struct msr *reg = per_cpu_ptr(msrs, cpu);
-               nbe = reg->l & K8_MSR_MCGCTL_NBE;
+               nbe = reg->l & MSR_MCGCTL_NBE;
 
                debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
                        cpu, reg->q,
@@ -2173,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
                struct msr *reg = per_cpu_ptr(msrs, cpu);
 
                if (on) {
-                       if (reg->l & K8_MSR_MCGCTL_NBE)
+                       if (reg->l & MSR_MCGCTL_NBE)
                                s->flags.nb_mce_enable = 1;
 
-                       reg->l |= K8_MSR_MCGCTL_NBE;
+                       reg->l |= MSR_MCGCTL_NBE;
                } else {
                        /*
                         * Turn off NB MCE reporting only when it was off before
                         */
                        if (!s->flags.nb_mce_enable)
-                               reg->l &= ~K8_MSR_MCGCTL_NBE;
+                               reg->l &= ~MSR_MCGCTL_NBE;
                }
        }
        wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2196,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
                                       struct pci_dev *F3)
 {
        bool ret = true;
-       u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+       u32 value, mask = 0x3;          /* UECC/CECC enable */
 
        if (toggle_ecc_err_reporting(s, nid, ON)) {
                amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
                return false;
        }
 
-       amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+       amd64_read_pci_cfg(F3, NBCTL, &value);
 
-       /* turn on UECCEn and CECCEn bits */
        s->old_nbctl   = value & mask;
        s->nbctl_valid = true;
 
        value |= mask;
-       amd64_write_pci_cfg(F3, K8_NBCTL, value);
+       amd64_write_pci_cfg(F3, NBCTL, value);
 
-       amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+       amd64_read_pci_cfg(F3, NBCFG, &value);
 
-       debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
-               nid, value,
-               !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+       debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+               nid, value, !!(value & NBCFG_ECC_ENABLE));
 
-       if (!(value & K8_NBCFG_ECC_ENABLE)) {
+       if (!(value & NBCFG_ECC_ENABLE)) {
                amd64_warn("DRAM ECC disabled on this node, enabling...\n");
 
                s->flags.nb_ecc_prev = 0;
 
                /* Attempt to turn on DRAM ECC Enable */
-               value |= K8_NBCFG_ECC_ENABLE;
-               amd64_write_pci_cfg(F3, K8_NBCFG, value);
+               value |= NBCFG_ECC_ENABLE;
+               amd64_write_pci_cfg(F3, NBCFG, value);
 
-               amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+               amd64_read_pci_cfg(F3, NBCFG, &value);
 
-               if (!(value & K8_NBCFG_ECC_ENABLE)) {
+               if (!(value & NBCFG_ECC_ENABLE)) {
                        amd64_warn("Hardware rejected DRAM ECC enable,"
                                   "check memory DIMM configuration.\n");
                        ret = false;
@@ -2240,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
                s->flags.nb_ecc_prev = 1;
        }
 
-       debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
-               nid, value,
-               !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+       debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+               nid, value, !!(value & NBCFG_ECC_ENABLE));
 
        return ret;
 }
@@ -2250,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
                                        struct pci_dev *F3)
 {
-       u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+       u32 value, mask = 0x3;          /* UECC/CECC enable */
+
 
        if (!s->nbctl_valid)
                return;
 
-       amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+       amd64_read_pci_cfg(F3, NBCTL, &value);
        value &= ~mask;
        value |= s->old_nbctl;
 
-       amd64_write_pci_cfg(F3, K8_NBCTL, value);
+       amd64_write_pci_cfg(F3, NBCTL, value);
 
        /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
        if (!s->flags.nb_ecc_prev) {
-               amd64_read_pci_cfg(F3, K8_NBCFG, &value);
-               value &= ~K8_NBCFG_ECC_ENABLE;
-               amd64_write_pci_cfg(F3, K8_NBCFG, value);
+               amd64_read_pci_cfg(F3, NBCFG, &value);
+               value &= ~NBCFG_ECC_ENABLE;
+               amd64_write_pci_cfg(F3, NBCFG, value);
        }
 
        /* restore the NB Enable MCGCTL bit */
@@ -2291,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
        u8 ecc_en = 0;
        bool nb_mce_en = false;
 
-       amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+       amd64_read_pci_cfg(F3, NBCFG, &value);
 
-       ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
+       ecc_en = !!(value & NBCFG_ECC_ENABLE);
        amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
 
        nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
@@ -2331,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
        mci->mc_driver_sysfs_attributes = sysfs_attrs;
 }
 
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
+                                struct amd64_family_type *fam)
 {
        struct amd64_pvt *pvt = mci->pvt_info;
 
        mci->mtype_cap          = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
        mci->edac_ctl_cap       = EDAC_FLAG_NONE;
 
-       if (pvt->nbcap & K8_NBCAP_SECDED)
+       if (pvt->nbcap & NBCAP_SECDED)
                mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
 
-       if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+       if (pvt->nbcap & NBCAP_CHIPKILL)
                mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
 
        mci->edac_cap           = amd64_determine_edac_cap(pvt);
        mci->mod_name           = EDAC_MOD_STR;
        mci->mod_ver            = EDAC_AMD64_VERSION;
-       mci->ctl_name           = pvt->ctl_name;
+       mci->ctl_name           = fam->ctl_name;
        mci->dev_name           = pci_name(pvt->F2);
        mci->ctl_page_to_phys   = NULL;
 
@@ -2368,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
        case 0xf:
                fam_type                = &amd64_family_types[K8_CPUS];
                pvt->ops                = &amd64_family_types[K8_CPUS].ops;
-               pvt->ctl_name           = fam_type->ctl_name;
-               pvt->min_scrubrate      = K8_MIN_SCRUB_RATE_BITS;
                break;
+
        case 0x10:
                fam_type                = &amd64_family_types[F10_CPUS];
                pvt->ops                = &amd64_family_types[F10_CPUS].ops;
-               pvt->ctl_name           = fam_type->ctl_name;
-               pvt->min_scrubrate      = F10_MIN_SCRUB_RATE_BITS;
+               break;
+
+       case 0x15:
+               fam_type                = &amd64_family_types[F15_CPUS];
+               pvt->ops                = &amd64_family_types[F15_CPUS].ops;
                break;
 
        default:
@@ -2385,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
 
        pvt->ext_model = boot_cpu_data.x86_model >> 4;
 
-       amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+       amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
                     (fam == 0xf ?
                                (pvt->ext_model >= K8_REV_F  ? "revF or later "
                                                             : "revE or earlier ")
@@ -2439,7 +2476,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
        mci->pvt_info = pvt;
        mci->dev = &pvt->F2->dev;
 
-       setup_mci_misc_attrs(mci);
+       setup_mci_misc_attrs(mci, fam_type);
 
        if (init_csrows(mci))
                mci->edac_cap = EDAC_FLAG_NONE;
@@ -2582,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
                .class          = 0,
                .class_mask     = 0,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_AMD,
+               .device         = PCI_DEVICE_ID_AMD_15H_NB_F2,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .class          = 0,
+               .class_mask     = 0,
+       },
+
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2622,7 +2668,7 @@ static int __init amd64_edac_init(void)
 {
        int err = -ENODEV;
 
-       edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+       printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
 
        opstate_init();