1 /* Intel i7 core/Nehalem Memory Controller kernel module
3 * This driver supports the memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
11 * Copyright (c) 2009-2010 by:
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
14 * Red Hat Inc. http://www.redhat.com
16 * Forked and adapted from the i5400_edac driver
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/pci.h>
31 #include <linux/pci_ids.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/dmi.h>
35 #include <linux/edac.h>
36 #include <linux/mmzone.h>
37 #include <linux/smp.h>
39 #include <asm/processor.h>
40 #include <asm/div64.h>
42 #include "edac_core.h"
45 static LIST_HEAD(i7core_edac_list);
46 static DEFINE_MUTEX(i7core_edac_lock);
49 static int use_pci_fixup;
50 module_param(use_pci_fixup, int, 0444);
51 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
53 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
54 * registers start at bus 255, and are not reported by BIOS.
55 * We currently find devices with only 2 sockets. In order to support more QPI
56 * Quick Path Interconnect, just increment this number.
58 #define MAX_SOCKET_BUSES 2
62 * Alter this version for the module when modifications are made
64 #define I7CORE_REVISION " Ver: 1.0.0"
65 #define EDAC_MOD_STR "i7core_edac"
70 #define i7core_printk(level, fmt, arg...) \
71 edac_printk(level, "i7core", fmt, ##arg)
73 #define i7core_mc_printk(mci, level, fmt, arg...) \
74 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
77 * i7core Memory Controller Registers
80 /* OFFSETS for Device 0 Function 0 */
82 #define MC_CFG_CONTROL 0x90
83 #define MC_CFG_UNLOCK 0x02
84 #define MC_CFG_LOCK 0x00
86 /* OFFSETS for Device 3 Function 0 */
88 #define MC_CONTROL 0x48
89 #define MC_STATUS 0x4c
90 #define MC_MAX_DOD 0x64
93 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
94 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
97 #define MC_TEST_ERR_RCV1 0x60
98 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
100 #define MC_TEST_ERR_RCV0 0x64
101 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
102 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
104 /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
105 #define MC_SSRCONTROL 0x48
106 #define SSR_MODE_DISABLE 0x00
107 #define SSR_MODE_ENABLE 0x01
108 #define SSR_MODE_MASK 0x03
110 #define MC_SCRUB_CONTROL 0x4c
111 #define STARTSCRUB (1 << 24)
112 #define SCRUBINTERVAL_MASK 0xffffff
114 #define MC_COR_ECC_CNT_0 0x80
115 #define MC_COR_ECC_CNT_1 0x84
116 #define MC_COR_ECC_CNT_2 0x88
117 #define MC_COR_ECC_CNT_3 0x8c
118 #define MC_COR_ECC_CNT_4 0x90
119 #define MC_COR_ECC_CNT_5 0x94
121 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
122 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
125 /* OFFSETS for Devices 4,5 and 6 Function 0 */
127 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
128 #define THREE_DIMMS_PRESENT (1 << 24)
129 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
130 #define QUAD_RANK_PRESENT (1 << 22)
131 #define REGISTERED_DIMM (1 << 15)
133 #define MC_CHANNEL_MAPPER 0x60
134 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
135 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
137 #define MC_CHANNEL_RANK_PRESENT 0x7c
138 #define RANK_PRESENT_MASK 0xffff
140 #define MC_CHANNEL_ADDR_MATCH 0xf0
141 #define MC_CHANNEL_ERROR_MASK 0xf8
142 #define MC_CHANNEL_ERROR_INJECT 0xfc
143 #define INJECT_ADDR_PARITY 0x10
144 #define INJECT_ECC 0x08
145 #define MASK_CACHELINE 0x06
146 #define MASK_FULL_CACHELINE 0x06
147 #define MASK_MSB32_CACHELINE 0x04
148 #define MASK_LSB32_CACHELINE 0x02
149 #define NO_MASK_CACHELINE 0x00
150 #define REPEAT_EN 0x01
152 /* OFFSETS for Devices 4,5 and 6 Function 1 */
154 #define MC_DOD_CH_DIMM0 0x48
155 #define MC_DOD_CH_DIMM1 0x4c
156 #define MC_DOD_CH_DIMM2 0x50
157 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
158 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
159 #define DIMM_PRESENT_MASK (1 << 9)
160 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
161 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
162 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
163 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
164 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
165 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
166 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
167 #define MC_DOD_NUMCOL_MASK 3
168 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
170 #define MC_RANK_PRESENT 0x7c
172 #define MC_SAG_CH_0 0x80
173 #define MC_SAG_CH_1 0x84
174 #define MC_SAG_CH_2 0x88
175 #define MC_SAG_CH_3 0x8c
176 #define MC_SAG_CH_4 0x90
177 #define MC_SAG_CH_5 0x94
178 #define MC_SAG_CH_6 0x98
179 #define MC_SAG_CH_7 0x9c
181 #define MC_RIR_LIMIT_CH_0 0x40
182 #define MC_RIR_LIMIT_CH_1 0x44
183 #define MC_RIR_LIMIT_CH_2 0x48
184 #define MC_RIR_LIMIT_CH_3 0x4C
185 #define MC_RIR_LIMIT_CH_4 0x50
186 #define MC_RIR_LIMIT_CH_5 0x54
187 #define MC_RIR_LIMIT_CH_6 0x58
188 #define MC_RIR_LIMIT_CH_7 0x5C
189 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
191 #define MC_RIR_WAY_CH 0x80
192 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
193 #define MC_RIR_WAY_RANK_MASK 0x7
200 #define MAX_DIMMS 3 /* Max DIMMS per channel */
201 #define MAX_MCR_FUNC 4
202 #define MAX_CHAN_FUNC 3
212 struct i7core_inject {
219 /* Error address mask */
220 int channel, dimm, rank, bank, page, col;
223 struct i7core_channel {
228 struct pci_id_descr {
235 struct pci_id_table {
236 const struct pci_id_descr *descr;
241 struct list_head list;
243 struct pci_dev **pdev;
245 struct mem_ctl_info *mci;
249 struct pci_dev *pci_noncore;
250 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
251 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
253 struct i7core_dev *i7core_dev;
255 struct i7core_info info;
256 struct i7core_inject inject;
257 struct i7core_channel channel[NUM_CHANS];
259 int ce_count_available;
260 int csrow_map[NUM_CHANS][MAX_DIMMS];
262 /* ECC corrected errors counts per udimm */
263 unsigned long udimm_ce_count[MAX_DIMMS];
264 int udimm_last_ce_count[MAX_DIMMS];
265 /* ECC corrected errors counts per rdimm */
266 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
267 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
269 bool is_registered, enable_scrub;
271 /* Fifo double buffers */
272 struct mce mce_entry[MCE_LOG_LEN];
273 struct mce mce_outentry[MCE_LOG_LEN];
275 /* Fifo in/out counters */
276 unsigned mce_in, mce_out;
278 /* Count indicator to show errors not got */
279 unsigned mce_overrun;
281 /* DCLK Frequency used for computing scrub rate */
284 /* Struct to control EDAC polling */
285 struct edac_pci_ctl_info *i7core_pci;
288 #define PCI_DESCR(device, function, device_id) \
290 .func = (function), \
291 .dev_id = (device_id)
293 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
294 /* Memory controller */
295 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
296 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
297 /* Exists only for RDIMM */
298 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
299 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
302 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
303 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
304 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
305 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
308 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
309 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
310 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
311 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
314 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
315 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
316 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
317 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
319 /* Generic Non-core registers */
321 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
322 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
323 * the probing code needs to test for the other address in case of
324 * failure of this one
326 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
330 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
331 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
332 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
333 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
335 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
336 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
337 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
338 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
340 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
341 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
342 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
343 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
346 * This is the PCI device has an alternate address on some
347 * processors like Core i7 860
349 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
352 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
353 /* Memory controller */
354 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
355 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
356 /* Exists only for RDIMM */
357 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
358 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
361 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
362 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
363 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
364 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
367 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
368 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
369 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
370 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
373 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
374 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
375 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
376 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
378 /* Generic Non-core registers */
379 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
383 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
384 static const struct pci_id_table pci_dev_table[] = {
385 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
386 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
387 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
388 {0,} /* 0 terminated list. */
392 * pci_device_id table for which devices we are looking for
394 static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
397 {0,} /* 0 terminated list. */
400 /****************************************************************************
401 Anciliary status routines
402 ****************************************************************************/
404 /* MC_CONTROL bits */
405 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
406 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
409 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
410 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
412 /* MC_MAX_DOD read functions */
413 static inline int numdimms(u32 dimms)
415 return (dimms & 0x3) + 1;
418 static inline int numrank(u32 rank)
420 static int ranks[4] = { 1, 2, 4, -EINVAL };
422 return ranks[rank & 0x3];
425 static inline int numbank(u32 bank)
427 static int banks[4] = { 4, 8, 16, -EINVAL };
429 return banks[bank & 0x3];
432 static inline int numrow(u32 row)
434 static int rows[8] = {
435 1 << 12, 1 << 13, 1 << 14, 1 << 15,
436 1 << 16, -EINVAL, -EINVAL, -EINVAL,
439 return rows[row & 0x7];
442 static inline int numcol(u32 col)
444 static int cols[8] = {
445 1 << 10, 1 << 11, 1 << 12, -EINVAL,
447 return cols[col & 0x3];
450 static struct i7core_dev *get_i7core_dev(u8 socket)
452 struct i7core_dev *i7core_dev;
454 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
455 if (i7core_dev->socket == socket)
462 static struct i7core_dev *alloc_i7core_dev(u8 socket,
463 const struct pci_id_table *table)
465 struct i7core_dev *i7core_dev;
467 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
471 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
473 if (!i7core_dev->pdev) {
478 i7core_dev->socket = socket;
479 i7core_dev->n_devs = table->n_devs;
480 list_add_tail(&i7core_dev->list, &i7core_edac_list);
485 static void free_i7core_dev(struct i7core_dev *i7core_dev)
487 list_del(&i7core_dev->list);
488 kfree(i7core_dev->pdev);
492 /****************************************************************************
493 Memory check routines
494 ****************************************************************************/
495 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
498 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
504 for (i = 0; i < i7core_dev->n_devs; i++) {
505 if (!i7core_dev->pdev[i])
508 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510 return i7core_dev->pdev[i];
518 * i7core_get_active_channels() - gets the number of channels and csrows
519 * @socket: Quick Path Interconnect socket
520 * @channels: Number of channels that will be returned
521 * @csrows: Number of csrows found
523 * Since EDAC core needs to know in advance the number of available channels
524 * and csrows, in order to allocate memory for csrows/channels, it is needed
525 * to run two similar steps. At the first step, implemented on this function,
526 * it checks the number of csrows/channels present at one socket.
527 * this is used in order to properly allocate the size of mci components.
529 * It should be noticed that none of the current available datasheets explain
530 * or even mention how csrows are seen by the memory controller. So, we need
531 * to add a fake description for csrows.
532 * So, this driver is attributing one DIMM memory for one csrow.
534 static int i7core_get_active_channels(const u8 socket, unsigned *channels,
537 struct pci_dev *pdev = NULL;
544 pdev = get_pdev_slot_func(socket, 3, 0);
546 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
551 /* Device 3 function 0 reads */
552 pci_read_config_dword(pdev, MC_STATUS, &status);
553 pci_read_config_dword(pdev, MC_CONTROL, &control);
555 for (i = 0; i < NUM_CHANS; i++) {
557 /* Check if the channel is active */
558 if (!(control & (1 << (8 + i))))
561 /* Check if the channel is disabled */
562 if (status & (1 << i))
565 pdev = get_pdev_slot_func(socket, i + 4, 1);
567 i7core_printk(KERN_ERR, "Couldn't find socket %d "
572 /* Devices 4-6 function 1 */
573 pci_read_config_dword(pdev,
574 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575 pci_read_config_dword(pdev,
576 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577 pci_read_config_dword(pdev,
578 MC_DOD_CH_DIMM2, &dimm_dod[2]);
582 for (j = 0; j < 3; j++) {
583 if (!DIMM_PRESENT(dimm_dod[j]))
589 debugf0("Number of active channels on socket %d: %d\n",
595 static int get_dimm_config(struct mem_ctl_info *mci)
597 struct i7core_pvt *pvt = mci->pvt_info;
598 struct csrow_info *csr;
599 struct pci_dev *pdev;
602 unsigned long last_page = 0;
605 struct dimm_info *dimm;
607 /* Get data from the MC register, function 0 */
608 pdev = pvt->pci_mcr[0];
612 /* Device 3 function 0 reads */
613 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
614 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
615 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
616 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
618 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
619 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
620 pvt->info.max_dod, pvt->info.ch_map);
622 if (ECC_ENABLED(pvt)) {
623 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
625 mode = EDAC_S8ECD8ED;
627 mode = EDAC_S4ECD4ED;
629 debugf0("ECC disabled\n");
633 /* FIXME: need to handle the error codes */
634 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
636 numdimms(pvt->info.max_dod),
637 numrank(pvt->info.max_dod >> 2),
638 numbank(pvt->info.max_dod >> 4),
639 numrow(pvt->info.max_dod >> 6),
640 numcol(pvt->info.max_dod >> 9));
642 for (i = 0; i < NUM_CHANS; i++) {
643 u32 data, dimm_dod[3], value[8];
645 if (!pvt->pci_ch[i][0])
648 if (!CH_ACTIVE(pvt, i)) {
649 debugf0("Channel %i is not active\n", i);
652 if (CH_DISABLED(pvt, i)) {
653 debugf0("Channel %i is disabled\n", i);
657 /* Devices 4-6 function 0 */
658 pci_read_config_dword(pvt->pci_ch[i][0],
659 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
661 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
664 if (data & REGISTERED_DIMM)
669 if (data & THREE_DIMMS_PRESENT)
670 pvt->channel[i].dimms = 3;
671 else if (data & SINGLE_QUAD_RANK_PRESENT)
672 pvt->channel[i].dimms = 1;
674 pvt->channel[i].dimms = 2;
677 /* Devices 4-6 function 1 */
678 pci_read_config_dword(pvt->pci_ch[i][1],
679 MC_DOD_CH_DIMM0, &dimm_dod[0]);
680 pci_read_config_dword(pvt->pci_ch[i][1],
681 MC_DOD_CH_DIMM1, &dimm_dod[1]);
682 pci_read_config_dword(pvt->pci_ch[i][1],
683 MC_DOD_CH_DIMM2, &dimm_dod[2]);
685 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
686 "%d ranks, %cDIMMs\n",
688 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
690 pvt->channel[i].ranks,
691 (data & REGISTERED_DIMM) ? 'R' : 'U');
693 for (j = 0; j < 3; j++) {
694 u32 banks, ranks, rows, cols;
697 if (!DIMM_PRESENT(dimm_dod[j]))
700 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
701 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
702 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
703 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
705 /* DDR3 has 8 I/O banks */
706 size = (rows * cols * banks * ranks) >> (20 - 3);
708 pvt->channel[i].dimms++;
710 debugf0("\tdimm %d %d Mb offset: %x, "
711 "bank: %d, rank: %d, row: %#x, col: %#x\n",
713 RANKOFFSET(dimm_dod[j]),
714 banks, ranks, rows, cols);
716 npages = MiB_TO_PAGES(size);
718 csr = &mci->csrows[csrow];
719 csr->first_page = last_page + 1;
721 csr->last_page = last_page;
722 csr->nr_pages = npages;
725 csr->csrow_idx = csrow;
726 csr->nr_channels = 1;
728 csr->channels[0].chan_idx = i;
729 csr->channels[0].ce_count = 0;
731 pvt->csrow_map[i][j] = csrow;
733 dimm = csr->channels[0].dimm;
736 dimm->dtype = DEV_X4;
739 dimm->dtype = DEV_X8;
742 dimm->dtype = DEV_X16;
745 dimm->dtype = DEV_UNKNOWN;
748 snprintf(dimm->label, sizeof(dimm->label),
749 "CPU#%uChannel#%u_DIMM#%u",
750 pvt->i7core_dev->socket, i, j);
752 dimm->edac_mode = mode;
756 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
757 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
758 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
759 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
760 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
761 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
762 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
763 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
764 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
765 for (j = 0; j < 8; j++)
766 debugf1("\t\t%#x\t%#x\t%#x\n",
767 (value[j] >> 27) & 0x1,
768 (value[j] >> 24) & 0x7,
769 (value[j] & ((1 << 24) - 1)));
775 /****************************************************************************
776 Error insertion routines
777 ****************************************************************************/
779 /* The i7core has independent error injection features per channel.
780 However, to have a simpler code, we don't allow enabling error injection
781 on more than one channel.
782 Also, since a change at an inject parameter will be applied only at enable,
783 we're disabling error injection on all write calls to the sysfs nodes that
784 controls the error code injection.
786 static int disable_inject(const struct mem_ctl_info *mci)
788 struct i7core_pvt *pvt = mci->pvt_info;
790 pvt->inject.enable = 0;
792 if (!pvt->pci_ch[pvt->inject.channel][0])
795 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
796 MC_CHANNEL_ERROR_INJECT, 0);
802 * i7core inject inject.section
804 * accept and store error injection inject.section value
805 * bit 0 - refers to the lower 32-byte half cacheline
806 * bit 1 - refers to the upper 32-byte half cacheline
808 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
809 const char *data, size_t count)
811 struct i7core_pvt *pvt = mci->pvt_info;
815 if (pvt->inject.enable)
818 rc = strict_strtoul(data, 10, &value);
819 if ((rc < 0) || (value > 3))
822 pvt->inject.section = (u32) value;
826 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
829 struct i7core_pvt *pvt = mci->pvt_info;
830 return sprintf(data, "0x%08x\n", pvt->inject.section);
836 * accept and store error injection inject.section value
837 * bit 0 - repeat enable - Enable error repetition
838 * bit 1 - inject ECC error
839 * bit 2 - inject parity error
841 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
842 const char *data, size_t count)
844 struct i7core_pvt *pvt = mci->pvt_info;
848 if (pvt->inject.enable)
851 rc = strict_strtoul(data, 10, &value);
852 if ((rc < 0) || (value > 7))
855 pvt->inject.type = (u32) value;
859 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
862 struct i7core_pvt *pvt = mci->pvt_info;
863 return sprintf(data, "0x%08x\n", pvt->inject.type);
867 * i7core_inject_inject.eccmask_store
869 * The type of error (UE/CE) will depend on the inject.eccmask value:
870 * Any bits set to a 1 will flip the corresponding ECC bit
871 * Correctable errors can be injected by flipping 1 bit or the bits within
872 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
873 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
874 * uncorrectable error to be injected.
876 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
877 const char *data, size_t count)
879 struct i7core_pvt *pvt = mci->pvt_info;
883 if (pvt->inject.enable)
886 rc = strict_strtoul(data, 10, &value);
890 pvt->inject.eccmask = (u32) value;
894 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
897 struct i7core_pvt *pvt = mci->pvt_info;
898 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
904 * The type of error (UE/CE) will depend on the inject.eccmask value:
905 * Any bits set to a 1 will flip the corresponding ECC bit
906 * Correctable errors can be injected by flipping 1 bit or the bits within
907 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
908 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
909 * uncorrectable error to be injected.
912 #define DECLARE_ADDR_MATCH(param, limit) \
913 static ssize_t i7core_inject_store_##param( \
914 struct mem_ctl_info *mci, \
915 const char *data, size_t count) \
917 struct i7core_pvt *pvt; \
921 debugf1("%s()\n", __func__); \
922 pvt = mci->pvt_info; \
924 if (pvt->inject.enable) \
925 disable_inject(mci); \
927 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
930 rc = strict_strtoul(data, 10, &value); \
931 if ((rc < 0) || (value >= limit)) \
935 pvt->inject.param = value; \
940 static ssize_t i7core_inject_show_##param( \
941 struct mem_ctl_info *mci, \
944 struct i7core_pvt *pvt; \
946 pvt = mci->pvt_info; \
947 debugf1("%s() pvt=%p\n", __func__, pvt); \
948 if (pvt->inject.param < 0) \
949 return sprintf(data, "any\n"); \
951 return sprintf(data, "%d\n", pvt->inject.param);\
954 #define ATTR_ADDR_MATCH(param) \
958 .mode = (S_IRUGO | S_IWUSR) \
960 .show = i7core_inject_show_##param, \
961 .store = i7core_inject_store_##param, \
964 DECLARE_ADDR_MATCH(channel, 3);
965 DECLARE_ADDR_MATCH(dimm, 3);
966 DECLARE_ADDR_MATCH(rank, 4);
967 DECLARE_ADDR_MATCH(bank, 32);
968 DECLARE_ADDR_MATCH(page, 0x10000);
969 DECLARE_ADDR_MATCH(col, 0x4000);
971 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
976 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
977 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
980 for (count = 0; count < 10; count++) {
983 pci_write_config_dword(dev, where, val);
984 pci_read_config_dword(dev, where, &read);
990 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
991 "write=%08x. Read=%08x\n",
992 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
999 * This routine prepares the Memory Controller for error injection.
1000 * The error will be injected when some process tries to write to the
1001 * memory that matches the given criteria.
1002 * The criteria can be set in terms of a mask where dimm, rank, bank, page
1003 * and col can be specified.
1004 * A -1 value for any of the mask items will make the MCU to ignore
1005 * that matching criteria for error injection.
1007 * It should be noticed that the error will only happen after a write operation
1008 * on a memory that matches the condition. if REPEAT_EN is not enabled at
1009 * inject mask, then it will produce just one error. Otherwise, it will repeat
1010 * until the injectmask would be cleaned.
1012 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1013 * is reliable enough to check if the MC is using the
1014 * three channels. However, this is not clear at the datasheet.
1016 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1017 const char *data, size_t count)
1019 struct i7core_pvt *pvt = mci->pvt_info;
1025 if (!pvt->pci_ch[pvt->inject.channel][0])
1028 rc = strict_strtoul(data, 10, &enable);
1033 pvt->inject.enable = 1;
1035 disable_inject(mci);
1039 /* Sets pvt->inject.dimm mask */
1040 if (pvt->inject.dimm < 0)
1043 if (pvt->channel[pvt->inject.channel].dimms > 2)
1044 mask |= (pvt->inject.dimm & 0x3LL) << 35;
1046 mask |= (pvt->inject.dimm & 0x1LL) << 36;
1049 /* Sets pvt->inject.rank mask */
1050 if (pvt->inject.rank < 0)
1053 if (pvt->channel[pvt->inject.channel].dimms > 2)
1054 mask |= (pvt->inject.rank & 0x1LL) << 34;
1056 mask |= (pvt->inject.rank & 0x3LL) << 34;
1059 /* Sets pvt->inject.bank mask */
1060 if (pvt->inject.bank < 0)
1063 mask |= (pvt->inject.bank & 0x15LL) << 30;
1065 /* Sets pvt->inject.page mask */
1066 if (pvt->inject.page < 0)
1069 mask |= (pvt->inject.page & 0xffff) << 14;
1071 /* Sets pvt->inject.column mask */
1072 if (pvt->inject.col < 0)
1075 mask |= (pvt->inject.col & 0x3fff);
1079 * bits 1-2: MASK_HALF_CACHELINE
1081 * bit 4: INJECT_ADDR_PARITY
1084 injectmask = (pvt->inject.type & 1) |
1085 (pvt->inject.section & 0x3) << 1 |
1086 (pvt->inject.type & 0x6) << (3 - 1);
1088 /* Unlock writes to registers - this register is write only */
1089 pci_write_config_dword(pvt->pci_noncore,
1090 MC_CFG_CONTROL, 0x2);
1092 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1093 MC_CHANNEL_ADDR_MATCH, mask);
1094 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1095 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1097 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1098 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1100 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1101 MC_CHANNEL_ERROR_INJECT, injectmask);
1104 * This is something undocumented, based on my tests
1105 * Without writing 8 to this register, errors aren't injected. Not sure
1108 pci_write_config_dword(pvt->pci_noncore,
1111 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1113 mask, pvt->inject.eccmask, injectmask);
1119 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1122 struct i7core_pvt *pvt = mci->pvt_info;
1125 if (!pvt->pci_ch[pvt->inject.channel][0])
1128 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1129 MC_CHANNEL_ERROR_INJECT, &injectmask);
1131 debugf0("Inject error read: 0x%018x\n", injectmask);
1133 if (injectmask & 0x0c)
1134 pvt->inject.enable = 1;
1136 return sprintf(data, "%d\n", pvt->inject.enable);
1139 #define DECLARE_COUNTER(param) \
1140 static ssize_t i7core_show_counter_##param( \
1141 struct mem_ctl_info *mci, \
1144 struct i7core_pvt *pvt = mci->pvt_info; \
1146 debugf1("%s() \n", __func__); \
1147 if (!pvt->ce_count_available || (pvt->is_registered)) \
1148 return sprintf(data, "data unavailable\n"); \
1149 return sprintf(data, "%lu\n", \
1150 pvt->udimm_ce_count[param]); \
1153 #define ATTR_COUNTER(param) \
1156 .name = __stringify(udimm##param), \
1157 .mode = (S_IRUGO | S_IWUSR) \
1159 .show = i7core_show_counter_##param \
1170 static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
1171 ATTR_ADDR_MATCH(channel),
1172 ATTR_ADDR_MATCH(dimm),
1173 ATTR_ADDR_MATCH(rank),
1174 ATTR_ADDR_MATCH(bank),
1175 ATTR_ADDR_MATCH(page),
1176 ATTR_ADDR_MATCH(col),
1177 { } /* End of list */
1180 static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
1181 .name = "inject_addrmatch",
1182 .mcidev_attr = i7core_addrmatch_attrs,
1185 static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1189 { .attr = { .name = NULL } }
1192 static const struct mcidev_sysfs_group i7core_udimm_counters = {
1193 .name = "all_channel_counts",
1194 .mcidev_attr = i7core_udimm_counters_attrs,
1197 static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
1200 .name = "inject_section",
1201 .mode = (S_IRUGO | S_IWUSR)
1203 .show = i7core_inject_section_show,
1204 .store = i7core_inject_section_store,
1207 .name = "inject_type",
1208 .mode = (S_IRUGO | S_IWUSR)
1210 .show = i7core_inject_type_show,
1211 .store = i7core_inject_type_store,
1214 .name = "inject_eccmask",
1215 .mode = (S_IRUGO | S_IWUSR)
1217 .show = i7core_inject_eccmask_show,
1218 .store = i7core_inject_eccmask_store,
1220 .grp = &i7core_inject_addrmatch,
1223 .name = "inject_enable",
1224 .mode = (S_IRUGO | S_IWUSR)
1226 .show = i7core_inject_enable_show,
1227 .store = i7core_inject_enable_store,
1229 { } /* End of list */
1232 static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1235 .name = "inject_section",
1236 .mode = (S_IRUGO | S_IWUSR)
1238 .show = i7core_inject_section_show,
1239 .store = i7core_inject_section_store,
1242 .name = "inject_type",
1243 .mode = (S_IRUGO | S_IWUSR)
1245 .show = i7core_inject_type_show,
1246 .store = i7core_inject_type_store,
1249 .name = "inject_eccmask",
1250 .mode = (S_IRUGO | S_IWUSR)
1252 .show = i7core_inject_eccmask_show,
1253 .store = i7core_inject_eccmask_store,
1255 .grp = &i7core_inject_addrmatch,
1258 .name = "inject_enable",
1259 .mode = (S_IRUGO | S_IWUSR)
1261 .show = i7core_inject_enable_show,
1262 .store = i7core_inject_enable_store,
1264 .grp = &i7core_udimm_counters,
1266 { } /* End of list */
1269 /****************************************************************************
1270 Device initialization routines: put/get, init/exit
1271 ****************************************************************************/
1274 * i7core_put_all_devices 'put' all the devices that we have
1275 * reserved via 'get'
1277 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1281 debugf0(__FILE__ ": %s()\n", __func__);
1282 for (i = 0; i < i7core_dev->n_devs; i++) {
1283 struct pci_dev *pdev = i7core_dev->pdev[i];
1286 debugf0("Removing dev %02x:%02x.%d\n",
1288 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1293 static void i7core_put_all_devices(void)
1295 struct i7core_dev *i7core_dev, *tmp;
1297 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1298 i7core_put_devices(i7core_dev);
1299 free_i7core_dev(i7core_dev);
1303 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1305 struct pci_dev *pdev = NULL;
1309 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
1310 * aren't announced by acpi. So, we need to use a legacy scan probing
1313 while (table && table->descr) {
1314 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1315 if (unlikely(!pdev)) {
1316 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1317 pcibios_scan_specific_bus(255-i);
1324 static unsigned i7core_pci_lastbus(void)
1326 int last_bus = 0, bus;
1327 struct pci_bus *b = NULL;
1329 while ((b = pci_find_next_bus(b)) != NULL) {
1331 debugf0("Found bus %d\n", bus);
1336 debugf0("Last bus %d\n", last_bus);
1342 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
1343 * device/functions we want to reference for this driver
1345 * Need to 'get' device 16 func 1 and func 2
1347 static int i7core_get_onedevice(struct pci_dev **prev,
1348 const struct pci_id_table *table,
1349 const unsigned devno,
1350 const unsigned last_bus)
1352 struct i7core_dev *i7core_dev;
1353 const struct pci_id_descr *dev_descr = &table->descr[devno];
1355 struct pci_dev *pdev = NULL;
1359 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1360 dev_descr->dev_id, *prev);
1363 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1364 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1365 * to probe for the alternate address in case of failure
1367 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1368 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1369 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1371 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1372 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1373 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1382 if (dev_descr->optional)
1388 i7core_printk(KERN_INFO,
1389 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1390 dev_descr->dev, dev_descr->func,
1391 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1393 /* End of list, leave */
1396 bus = pdev->bus->number;
1398 socket = last_bus - bus;
1400 i7core_dev = get_i7core_dev(socket);
1402 i7core_dev = alloc_i7core_dev(socket, table);
1409 if (i7core_dev->pdev[devno]) {
1410 i7core_printk(KERN_ERR,
1411 "Duplicated device for "
1412 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1413 bus, dev_descr->dev, dev_descr->func,
1414 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1419 i7core_dev->pdev[devno] = pdev;
1422 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1423 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1424 i7core_printk(KERN_ERR,
1425 "Device PCI ID %04x:%04x "
1426 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1427 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1428 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1429 bus, dev_descr->dev, dev_descr->func);
1433 /* Be sure that the device is enabled */
1434 if (unlikely(pci_enable_device(pdev) < 0)) {
1435 i7core_printk(KERN_ERR,
1437 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1438 bus, dev_descr->dev, dev_descr->func,
1439 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1443 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1444 socket, bus, dev_descr->dev,
1446 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1449 * As stated on drivers/pci/search.c, the reference count for
1450 * @from is always decremented if it is not %NULL. So, as we need
1451 * to get all devices up to null, we need to do a get for the device
1460 static int i7core_get_all_devices(void)
1462 int i, rc, last_bus;
1463 struct pci_dev *pdev = NULL;
1464 const struct pci_id_table *table = pci_dev_table;
1466 last_bus = i7core_pci_lastbus();
1468 while (table && table->descr) {
1469 for (i = 0; i < table->n_devs; i++) {
1472 rc = i7core_get_onedevice(&pdev, table, i,
1479 i7core_put_all_devices();
1490 static int mci_bind_devs(struct mem_ctl_info *mci,
1491 struct i7core_dev *i7core_dev)
1493 struct i7core_pvt *pvt = mci->pvt_info;
1494 struct pci_dev *pdev;
1498 pvt->is_registered = false;
1499 pvt->enable_scrub = false;
1500 for (i = 0; i < i7core_dev->n_devs; i++) {
1501 pdev = i7core_dev->pdev[i];
1505 func = PCI_FUNC(pdev->devfn);
1506 slot = PCI_SLOT(pdev->devfn);
1508 if (unlikely(func > MAX_MCR_FUNC))
1510 pvt->pci_mcr[func] = pdev;
1511 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1512 if (unlikely(func > MAX_CHAN_FUNC))
1514 pvt->pci_ch[slot - 4][func] = pdev;
1515 } else if (!slot && !func) {
1516 pvt->pci_noncore = pdev;
1518 /* Detect the processor family */
1519 switch (pdev->device) {
1520 case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1521 family = "Xeon 35xx/ i7core";
1522 pvt->enable_scrub = false;
1524 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1525 family = "i7-800/i5-700";
1526 pvt->enable_scrub = false;
1528 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1529 family = "Xeon 34xx";
1530 pvt->enable_scrub = false;
1532 case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1533 family = "Xeon 55xx";
1534 pvt->enable_scrub = true;
1536 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1537 family = "Xeon 56xx / i7-900";
1538 pvt->enable_scrub = true;
1542 pvt->enable_scrub = false;
1544 debugf0("Detected a processor type %s\n", family);
1548 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1549 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1550 pdev, i7core_dev->socket);
1552 if (PCI_SLOT(pdev->devfn) == 3 &&
1553 PCI_FUNC(pdev->devfn) == 2)
1554 pvt->is_registered = true;
1560 i7core_printk(KERN_ERR, "Device %d, function %d "
1561 "is out of the expected range\n",
1566 /****************************************************************************
1567 Error check routines
1568 ****************************************************************************/
1569 static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1575 struct i7core_pvt *pvt = mci->pvt_info;
1576 int row = pvt->csrow_map[chan][dimm], i;
1578 for (i = 0; i < add; i++) {
1579 msg = kasprintf(GFP_KERNEL, "Corrected error "
1580 "(Socket=%d channel=%d dimm=%d)",
1581 pvt->i7core_dev->socket, chan, dimm);
1583 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1588 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1594 struct i7core_pvt *pvt = mci->pvt_info;
1595 int add0 = 0, add1 = 0, add2 = 0;
1596 /* Updates CE counters if it is not the first time here */
1597 if (pvt->ce_count_available) {
1598 /* Updates CE counters */
1600 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1601 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1602 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1606 pvt->rdimm_ce_count[chan][2] += add2;
1610 pvt->rdimm_ce_count[chan][1] += add1;
1614 pvt->rdimm_ce_count[chan][0] += add0;
1616 pvt->ce_count_available = 1;
1618 /* Store the new values */
1619 pvt->rdimm_last_ce_count[chan][2] = new2;
1620 pvt->rdimm_last_ce_count[chan][1] = new1;
1621 pvt->rdimm_last_ce_count[chan][0] = new0;
1623 /*updated the edac core */
1625 i7core_rdimm_update_csrow(mci, chan, 0, add0);
1627 i7core_rdimm_update_csrow(mci, chan, 1, add1);
1629 i7core_rdimm_update_csrow(mci, chan, 2, add2);
1633 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1635 struct i7core_pvt *pvt = mci->pvt_info;
1637 int i, new0, new1, new2;
1639 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
1640 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1642 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1644 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1646 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1648 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1650 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1652 for (i = 0 ; i < 3; i++) {
1653 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1654 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1655 /*if the channel has 3 dimms*/
1656 if (pvt->channel[i].dimms > 2) {
1657 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1658 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1659 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1661 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1662 DIMM_BOT_COR_ERR(rcv[i][0]);
1663 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1664 DIMM_BOT_COR_ERR(rcv[i][1]);
1668 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1672 /* This function is based on the device 3 function 4 registers as described on:
1673 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1674 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1675 * also available at:
1676 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1678 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1680 struct i7core_pvt *pvt = mci->pvt_info;
1682 int new0, new1, new2;
1684 if (!pvt->pci_mcr[4]) {
1685 debugf0("%s MCR registers not found\n", __func__);
1689 /* Corrected test errors */
1690 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1691 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1693 /* Store the new values */
1694 new2 = DIMM2_COR_ERR(rcv1);
1695 new1 = DIMM1_COR_ERR(rcv0);
1696 new0 = DIMM0_COR_ERR(rcv0);
1698 /* Updates CE counters if it is not the first time here */
1699 if (pvt->ce_count_available) {
1700 /* Updates CE counters */
1701 int add0, add1, add2;
1703 add2 = new2 - pvt->udimm_last_ce_count[2];
1704 add1 = new1 - pvt->udimm_last_ce_count[1];
1705 add0 = new0 - pvt->udimm_last_ce_count[0];
1709 pvt->udimm_ce_count[2] += add2;
1713 pvt->udimm_ce_count[1] += add1;
1717 pvt->udimm_ce_count[0] += add0;
1719 if (add0 | add1 | add2)
1720 i7core_printk(KERN_ERR, "New Corrected error(s): "
1721 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1724 pvt->ce_count_available = 1;
1726 /* Store the new values */
1727 pvt->udimm_last_ce_count[2] = new2;
1728 pvt->udimm_last_ce_count[1] = new1;
1729 pvt->udimm_last_ce_count[0] = new0;
1733 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1734 * Architectures Software Developer’s Manual Volume 3B.
1735 * Nehalem are defined as family 0x06, model 0x1a
1737 * The MCA registers used here are the following ones:
1738 * struct mce field MCA Register
1739 * m->status MSR_IA32_MC8_STATUS
1740 * m->addr MSR_IA32_MC8_ADDR
1741 * m->misc MSR_IA32_MC8_MISC
1742 * In the case of Nehalem, the error information is masked at .status and .misc
1745 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1746 const struct mce *m)
1748 struct i7core_pvt *pvt = mci->pvt_info;
1749 char *type, *optype, *err, *msg;
1750 unsigned long error = m->status & 0x1ff0000l;
1751 u32 optypenum = (m->status >> 4) & 0x07;
1752 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1753 u32 dimm = (m->misc >> 16) & 0x3;
1754 u32 channel = (m->misc >> 18) & 0x3;
1755 u32 syndrome = m->misc >> 32;
1756 u32 errnum = find_first_bit(&error, 32);
1759 if (m->mcgstatus & 1)
1764 switch (optypenum) {
1766 optype = "generic undef request";
1769 optype = "read error";
1772 optype = "write error";
1775 optype = "addr/cmd error";
1778 optype = "scrubbing error";
1781 optype = "reserved";
1787 err = "read ECC error";
1790 err = "RAS ECC error";
1793 err = "write parity error";
1796 err = "redundacy loss";
1802 err = "memory range error";
1805 err = "RTID out of range";
1808 err = "address parity error";
1811 err = "byte enable parity error";
1817 /* FIXME: should convert addr into bank and rank information */
1818 msg = kasprintf(GFP_ATOMIC,
1819 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1820 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1821 type, (long long) m->addr, m->cpu, dimm, channel,
1822 syndrome, core_err_cnt, (long long)m->status,
1823 (long long)m->misc, optype, err);
1827 csrow = pvt->csrow_map[channel][dimm];
1829 /* Call the helper to output message */
1830 if (m->mcgstatus & 1)
1831 edac_mc_handle_fbd_ue(mci, csrow, 0,
1832 0 /* FIXME: should be channel here */, msg);
1833 else if (!pvt->is_registered)
1834 edac_mc_handle_fbd_ce(mci, csrow,
1835 0 /* FIXME: should be channel here */, msg);
1841 * i7core_check_error Retrieve and process errors reported by the
1842 * hardware. Called by the Core module.
1844 static void i7core_check_error(struct mem_ctl_info *mci)
1846 struct i7core_pvt *pvt = mci->pvt_info;
1852 * MCE first step: Copy all mce errors into a temporary buffer
1853 * We use a double buffering here, to reduce the risk of
1857 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1860 goto check_ce_error;
1862 m = pvt->mce_outentry;
1863 if (pvt->mce_in + count > MCE_LOG_LEN) {
1864 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1866 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1872 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1874 pvt->mce_in += count;
1877 if (pvt->mce_overrun) {
1878 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1881 pvt->mce_overrun = 0;
1885 * MCE second step: parse errors and display
1887 for (i = 0; i < count; i++)
1888 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
1891 * Now, let's increment CE error counts
1894 if (!pvt->is_registered)
1895 i7core_udimm_check_mc_ecc_err(mci);
1897 i7core_rdimm_check_mc_ecc_err(mci);
1901 * i7core_mce_check_error Replicates mcelog routine to get errors
1902 * This routine simply queues mcelog errors, and
1903 * return. The error itself should be handled later
1904 * by i7core_check_error.
1905 * WARNING: As this routine should be called at NMI time, extra care should
1906 * be taken to avoid deadlocks, and to be as fast as possible.
1908 static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1911 struct mce *mce = (struct mce *)data;
1912 struct i7core_dev *i7_dev;
1913 struct mem_ctl_info *mci;
1914 struct i7core_pvt *pvt;
1916 i7_dev = get_i7core_dev(mce->socketid);
1921 pvt = mci->pvt_info;
1924 * Just let mcelog handle it if the error is
1925 * outside the memory controller
1927 if (((mce->status & 0xffff) >> 7) != 1)
1930 /* Bank 8 registers are the only ones that we know how to handle */
1935 /* Only handle if it is the right mc controller */
1936 if (mce->socketid != pvt->i7core_dev->socket)
1941 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1947 /* Copy memory error at the ringbuffer */
1948 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1950 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1952 /* Handle fatal errors immediately */
1953 if (mce->mcgstatus & 1)
1954 i7core_check_error(mci);
1956 /* Advise mcelog that the errors were handled */
1960 static struct notifier_block i7_mce_dec = {
1961 .notifier_call = i7core_mce_check_error,
1964 struct memdev_dmi_entry {
1968 u16 phys_mem_array_handle;
1969 u16 mem_err_info_handle;
1986 u16 conf_mem_clk_speed;
1987 } __attribute__((__packed__));
1991 * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1992 * memory devices show the same speed, and if they don't then consider
1993 * all speeds to be invalid.
1995 static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1997 int *dclk_freq = _dclk_freq;
1998 u16 dmi_mem_clk_speed;
2000 if (*dclk_freq == -1)
2003 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
2004 struct memdev_dmi_entry *memdev_dmi_entry =
2005 (struct memdev_dmi_entry *)dh;
2006 unsigned long conf_mem_clk_speed_offset =
2007 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
2008 (unsigned long)&memdev_dmi_entry->type;
2009 unsigned long speed_offset =
2010 (unsigned long)&memdev_dmi_entry->speed -
2011 (unsigned long)&memdev_dmi_entry->type;
2013 /* Check that a DIMM is present */
2014 if (memdev_dmi_entry->size == 0)
2018 * Pick the configured speed if it's available, otherwise
2019 * pick the DIMM speed, or we don't have a speed.
2021 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2023 memdev_dmi_entry->conf_mem_clk_speed;
2024 } else if (memdev_dmi_entry->length > speed_offset) {
2025 dmi_mem_clk_speed = memdev_dmi_entry->speed;
2031 if (*dclk_freq == 0) {
2032 /* First pass, speed was 0 */
2033 if (dmi_mem_clk_speed > 0) {
2034 /* Set speed if a valid speed is read */
2035 *dclk_freq = dmi_mem_clk_speed;
2037 /* Otherwise we don't have a valid speed */
2040 } else if (*dclk_freq > 0 &&
2041 *dclk_freq != dmi_mem_clk_speed) {
2043 * If we have a speed, check that all DIMMS are the same
2044 * speed, otherwise set the speed as invalid.
2052 * The default DCLK frequency is used as a fallback if we
2053 * fail to find anything reliable in the DMI. The value
2054 * is taken straight from the datasheet.
2056 #define DEFAULT_DCLK_FREQ 800
2058 static int get_dclk_freq(void)
2062 dmi_walk(decode_dclk, (void *)&dclk_freq);
2065 return DEFAULT_DCLK_FREQ;
2071 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
2072 * to hardware according to SCRUBINTERVAL formula
2073 * found in datasheet.
2075 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
2077 struct i7core_pvt *pvt = mci->pvt_info;
2078 struct pci_dev *pdev;
2082 /* Get data from the MC register, function 2 */
2083 pdev = pvt->pci_mcr[2];
2087 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
2090 /* Prepare to disable petrol scrub */
2091 dw_scrub &= ~STARTSCRUB;
2092 /* Stop the patrol scrub engine */
2093 write_and_test(pdev, MC_SCRUB_CONTROL,
2094 dw_scrub & ~SCRUBINTERVAL_MASK);
2096 /* Get current status of scrub rate and set bit to disable */
2097 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2098 dw_ssr &= ~SSR_MODE_MASK;
2099 dw_ssr |= SSR_MODE_DISABLE;
2101 const int cache_line_size = 64;
2102 const u32 freq_dclk_mhz = pvt->dclk_freq;
2103 unsigned long long scrub_interval;
2105 * Translate the desired scrub rate to a register value and
2106 * program the corresponding register value.
2108 scrub_interval = (unsigned long long)freq_dclk_mhz *
2109 cache_line_size * 1000000;
2110 do_div(scrub_interval, new_bw);
2112 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2115 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
2117 /* Start the patrol scrub engine */
2118 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2119 STARTSCRUB | dw_scrub);
2121 /* Get current status of scrub rate and set bit to enable */
2122 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2123 dw_ssr &= ~SSR_MODE_MASK;
2124 dw_ssr |= SSR_MODE_ENABLE;
2126 /* Disable or enable scrubbing */
2127 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2133 * get_sdram_scrub_rate This routine convert current scrub rate value
2134 * into byte/sec bandwidth accourding to
2135 * SCRUBINTERVAL formula found in datasheet.
2137 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2139 struct i7core_pvt *pvt = mci->pvt_info;
2140 struct pci_dev *pdev;
2141 const u32 cache_line_size = 64;
2142 const u32 freq_dclk_mhz = pvt->dclk_freq;
2143 unsigned long long scrub_rate;
2146 /* Get data from the MC register, function 2 */
2147 pdev = pvt->pci_mcr[2];
2151 /* Get current scrub control data */
2152 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2154 /* Mask highest 8-bits to 0 */
2155 scrubval &= SCRUBINTERVAL_MASK;
2159 /* Calculate scrub rate value into byte/sec bandwidth */
2160 scrub_rate = (unsigned long long)freq_dclk_mhz *
2161 1000000 * cache_line_size;
2162 do_div(scrub_rate, scrubval);
2163 return (int)scrub_rate;
2166 static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2168 struct i7core_pvt *pvt = mci->pvt_info;
2171 /* Unlock writes to pci registers */
2172 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2174 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2175 pci_lock | MC_CFG_UNLOCK);
2177 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2178 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2181 static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2183 struct i7core_pvt *pvt = mci->pvt_info;
2186 /* Lock writes to pci registers */
2187 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2189 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2190 pci_lock | MC_CFG_LOCK);
2193 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2195 pvt->i7core_pci = edac_pci_create_generic_ctl(
2196 &pvt->i7core_dev->pdev[0]->dev,
2198 if (unlikely(!pvt->i7core_pci))
2199 i7core_printk(KERN_WARNING,
2200 "Unable to setup PCI error report via EDAC\n");
2203 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2205 if (likely(pvt->i7core_pci))
2206 edac_pci_release_generic_ctl(pvt->i7core_pci);
2208 i7core_printk(KERN_ERR,
2209 "Couldn't find mem_ctl_info for socket %d\n",
2210 pvt->i7core_dev->socket);
2211 pvt->i7core_pci = NULL;
2214 static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2216 struct mem_ctl_info *mci = i7core_dev->mci;
2217 struct i7core_pvt *pvt;
2219 if (unlikely(!mci || !mci->pvt_info)) {
2220 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2221 __func__, &i7core_dev->pdev[0]->dev);
2223 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2227 pvt = mci->pvt_info;
2229 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2230 __func__, mci, &i7core_dev->pdev[0]->dev);
2232 /* Disable scrubrate setting */
2233 if (pvt->enable_scrub)
2234 disable_sdram_scrub_setting(mci);
2236 mce_unregister_decode_chain(&i7_mce_dec);
2238 /* Disable EDAC polling */
2239 i7core_pci_ctl_release(pvt);
2241 /* Remove MC sysfs nodes */
2242 edac_mc_del_mc(mci->dev);
2244 debugf1("%s: free mci struct\n", mci->ctl_name);
2245 kfree(mci->ctl_name);
2247 i7core_dev->mci = NULL;
2250 static int i7core_register_mci(struct i7core_dev *i7core_dev)
2252 struct mem_ctl_info *mci;
2253 struct i7core_pvt *pvt;
2254 int rc, channels, csrows;
2256 /* Check the number of active and not disabled channels */
2257 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2258 if (unlikely(rc < 0))
2261 /* allocate a new MC control structure */
2262 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
2266 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2267 __func__, mci, &i7core_dev->pdev[0]->dev);
2269 pvt = mci->pvt_info;
2270 memset(pvt, 0, sizeof(*pvt));
2272 /* Associates i7core_dev and mci for future usage */
2273 pvt->i7core_dev = i7core_dev;
2274 i7core_dev->mci = mci;
2277 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2278 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2281 mci->mtype_cap = MEM_FLAG_DDR3;
2282 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2283 mci->edac_cap = EDAC_FLAG_NONE;
2284 mci->mod_name = "i7core_edac.c";
2285 mci->mod_ver = I7CORE_REVISION;
2286 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2287 i7core_dev->socket);
2288 mci->dev_name = pci_name(i7core_dev->pdev[0]);
2289 mci->ctl_page_to_phys = NULL;
2291 /* Store pci devices at mci for faster access */
2292 rc = mci_bind_devs(mci, i7core_dev);
2293 if (unlikely(rc < 0))
2296 if (pvt->is_registered)
2297 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2299 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2301 /* Get dimm basic config */
2302 get_dimm_config(mci);
2303 /* record ptr to the generic device */
2304 mci->dev = &i7core_dev->pdev[0]->dev;
2305 /* Set the function pointer to an actual operation function */
2306 mci->edac_check = i7core_check_error;
2308 /* Enable scrubrate setting */
2309 if (pvt->enable_scrub)
2310 enable_sdram_scrub_setting(mci);
2312 /* add this new MC control structure to EDAC's list of MCs */
2313 if (unlikely(edac_mc_add_mc(mci))) {
2314 debugf0("MC: " __FILE__
2315 ": %s(): failed edac_mc_add_mc()\n", __func__);
2316 /* FIXME: perhaps some code should go here that disables error
2317 * reporting if we just enabled it
2324 /* Default error mask is any memory */
2325 pvt->inject.channel = 0;
2326 pvt->inject.dimm = -1;
2327 pvt->inject.rank = -1;
2328 pvt->inject.bank = -1;
2329 pvt->inject.page = -1;
2330 pvt->inject.col = -1;
2332 /* allocating generic PCI control info */
2333 i7core_pci_ctl_create(pvt);
2335 /* DCLK for scrub rate setting */
2336 pvt->dclk_freq = get_dclk_freq();
2338 mce_register_decode_chain(&i7_mce_dec);
2343 kfree(mci->ctl_name);
2345 i7core_dev->mci = NULL;
2350 * i7core_probe Probe for ONE instance of device to see if it is
2353 * 0 for FOUND a device
2354 * < 0 for error code
2357 static int __devinit i7core_probe(struct pci_dev *pdev,
2358 const struct pci_device_id *id)
2361 struct i7core_dev *i7core_dev;
2363 /* get the pci devices we want to reserve for our use */
2364 mutex_lock(&i7core_edac_lock);
2367 * All memory controllers are allocated at the first pass.
2369 if (unlikely(probed >= 1)) {
2370 mutex_unlock(&i7core_edac_lock);
2375 rc = i7core_get_all_devices();
2376 if (unlikely(rc < 0))
2379 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2381 rc = i7core_register_mci(i7core_dev);
2382 if (unlikely(rc < 0))
2387 * Nehalem-EX uses a different memory controller. However, as the
2388 * memory controller is not visible on some Nehalem/Nehalem-EP, we
2389 * need to indirectly probe via a X58 PCI device. The same devices
2390 * are found on (some) Nehalem-EX. So, on those machines, the
2391 * probe routine needs to return -ENODEV, as the actual Memory
2392 * Controller registers won't be detected.
2399 i7core_printk(KERN_INFO,
2400 "Driver loaded, %d memory controller(s) found.\n",
2403 mutex_unlock(&i7core_edac_lock);
2407 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2408 i7core_unregister_mci(i7core_dev);
2410 i7core_put_all_devices();
2412 mutex_unlock(&i7core_edac_lock);
2417 * i7core_remove destructor for one instance of device
2420 static void __devexit i7core_remove(struct pci_dev *pdev)
2422 struct i7core_dev *i7core_dev;
2424 debugf0(__FILE__ ": %s()\n", __func__);
2427 * we have a trouble here: pdev value for removal will be wrong, since
2428 * it will point to the X58 register used to detect that the machine
2429 * is a Nehalem or upper design. However, due to the way several PCI
2430 * devices are grouped together to provide MC functionality, we need
2431 * to use a different method for releasing the devices
2434 mutex_lock(&i7core_edac_lock);
2436 if (unlikely(!probed)) {
2437 mutex_unlock(&i7core_edac_lock);
2441 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2442 i7core_unregister_mci(i7core_dev);
2444 /* Release PCI resources */
2445 i7core_put_all_devices();
2449 mutex_unlock(&i7core_edac_lock);
2452 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2455 * i7core_driver pci_driver structure for this module
2458 static struct pci_driver i7core_driver = {
2459 .name = "i7core_edac",
2460 .probe = i7core_probe,
2461 .remove = __devexit_p(i7core_remove),
2462 .id_table = i7core_pci_tbl,
2466 * i7core_init Module entry function
2467 * Try to initialize this module for its devices
2469 static int __init i7core_init(void)
2473 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2475 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2479 i7core_xeon_pci_fixup(pci_dev_table);
2481 pci_rc = pci_register_driver(&i7core_driver);
2486 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2493 * i7core_exit() Module exit function
2494 * Unregister the driver
2496 static void __exit i7core_exit(void)
2498 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2499 pci_unregister_driver(&i7core_driver);
2502 module_init(i7core_init);
2503 module_exit(i7core_exit);
2505 MODULE_LICENSE("GPL");
2506 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2507 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2508 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2511 module_param(edac_op_state, int, 0444);
2512 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");