1 /* Intel 7 core Memory Controller kernel module (Nehalem)
3 * This file may be distributed under the terms of the
4 * GNU General Public License version 2 only.
6 * Copyright (c) 2009 by:
7 * Mauro Carvalho Chehab <mchehab@redhat.com>
9 * Red Hat Inc. http://www.redhat.com
11 * Forked and adapted from the i5400_edac driver
13 * Based on the following public Intel datasheets:
14 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
15 * Datasheet, Volume 2:
16 * http://download.intel.com/design/processor/datashts/320835.pdf
17 * Intel Xeon Processor 5500 Series Datasheet Volume 2
18 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
20 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/slab.h>
28 #include <linux/edac.h>
29 #include <linux/mmzone.h>
30 #include <linux/edac_mce.h>
31 #include <linux/spinlock.h>
33 #include "edac_core.h"
35 /* To use the new pci_[read/write]_config_qword instead of two dword */
39 * Alter this version for the module when modifications are made
41 #define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
42 #define EDAC_MOD_STR "i7core_edac"
44 /* HACK: temporary, just to enable all logs, for now */
46 #define debugf0(fmt, arg...) edac_printk(KERN_INFO, "i7core", fmt, ##arg)
51 #define i7core_printk(level, fmt, arg...) \
52 edac_printk(level, "i7core", fmt, ##arg)
54 #define i7core_mc_printk(mci, level, fmt, arg...) \
55 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
58 * i7core Memory Controller Registers
61 /* OFFSETS for Device 0 Function 0 */
63 #define MC_CFG_CONTROL 0x90
65 /* OFFSETS for Device 3 Function 0 */
67 #define MC_CONTROL 0x48
68 #define MC_STATUS 0x4c
69 #define MC_MAX_DOD 0x64
72 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
73 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
76 #define MC_TEST_ERR_RCV1 0x60
77 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
79 #define MC_TEST_ERR_RCV0 0x64
80 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
81 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
83 /* OFFSETS for Devices 4,5 and 6 Function 0 */
85 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
86 #define THREE_DIMMS_PRESENT (1 << 24)
87 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
88 #define QUAD_RANK_PRESENT (1 << 22)
89 #define REGISTERED_DIMM (1 << 15)
91 #define MC_CHANNEL_MAPPER 0x60
92 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
93 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
95 #define MC_CHANNEL_RANK_PRESENT 0x7c
96 #define RANK_PRESENT_MASK 0xffff
98 #define MC_CHANNEL_ADDR_MATCH 0xf0
99 #define MC_CHANNEL_ERROR_MASK 0xf8
100 #define MC_CHANNEL_ERROR_INJECT 0xfc
101 #define INJECT_ADDR_PARITY 0x10
102 #define INJECT_ECC 0x08
103 #define MASK_CACHELINE 0x06
104 #define MASK_FULL_CACHELINE 0x06
105 #define MASK_MSB32_CACHELINE 0x04
106 #define MASK_LSB32_CACHELINE 0x02
107 #define NO_MASK_CACHELINE 0x00
108 #define REPEAT_EN 0x01
110 /* OFFSETS for Devices 4,5 and 6 Function 1 */
111 #define MC_DOD_CH_DIMM0 0x48
112 #define MC_DOD_CH_DIMM1 0x4c
113 #define MC_DOD_CH_DIMM2 0x50
114 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
115 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
116 #define DIMM_PRESENT_MASK (1 << 9)
117 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
118 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
119 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
120 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
121 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
122 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
123 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
124 #define MC_DOD_NUMCOL_MASK 3
125 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
127 #define MC_RANK_PRESENT 0x7c
129 #define MC_SAG_CH_0 0x80
130 #define MC_SAG_CH_1 0x84
131 #define MC_SAG_CH_2 0x88
132 #define MC_SAG_CH_3 0x8c
133 #define MC_SAG_CH_4 0x90
134 #define MC_SAG_CH_5 0x94
135 #define MC_SAG_CH_6 0x98
136 #define MC_SAG_CH_7 0x9c
138 #define MC_RIR_LIMIT_CH_0 0x40
139 #define MC_RIR_LIMIT_CH_1 0x44
140 #define MC_RIR_LIMIT_CH_2 0x48
141 #define MC_RIR_LIMIT_CH_3 0x4C
142 #define MC_RIR_LIMIT_CH_4 0x50
143 #define MC_RIR_LIMIT_CH_5 0x54
144 #define MC_RIR_LIMIT_CH_6 0x58
145 #define MC_RIR_LIMIT_CH_7 0x5C
146 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
148 #define MC_RIR_WAY_CH 0x80
149 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
150 #define MC_RIR_WAY_RANK_MASK 0x7
157 #define MAX_DIMMS 3 /* Max DIMMS per channel */
158 #define NUM_SOCKETS 2 /* Max number of MC sockets */
159 #define MAX_MCR_FUNC 4
160 #define MAX_CHAN_FUNC 3
170 struct i7core_inject {
178 /* Error address mask */
179 int channel, dimm, rank, bank, page, col;
182 struct i7core_channel {
187 struct pci_id_descr {
191 struct pci_dev *pdev[NUM_SOCKETS];
195 struct pci_dev *pci_noncore[NUM_SOCKETS];
196 struct pci_dev *pci_mcr[NUM_SOCKETS][MAX_MCR_FUNC + 1];
197 struct pci_dev *pci_ch[NUM_SOCKETS][NUM_CHANS][MAX_CHAN_FUNC + 1];
199 struct i7core_info info;
200 struct i7core_inject inject;
201 struct i7core_channel channel[NUM_SOCKETS][NUM_CHANS];
203 int sockets; /* Number of sockets */
204 int channels; /* Number of active channels */
206 int ce_count_available[NUM_SOCKETS];
207 /* ECC corrected errors counts per dimm */
208 unsigned long ce_count[NUM_SOCKETS][MAX_DIMMS];
209 int last_ce_count[NUM_SOCKETS][MAX_DIMMS];
212 struct edac_mce edac_mce;
213 struct mce mce_entry[MCE_LOG_LEN];
218 /* Device name and register DID (Device ID) */
219 struct i7core_dev_info {
220 const char *ctl_name; /* name for this device */
221 u16 fsb_mapping_errors; /* DID for the branchmap,control */
224 #define PCI_DESCR(device, function, device_id) \
226 .func = (function), \
227 .dev_id = (device_id)
229 struct pci_id_descr pci_devs[] = {
230 /* Generic Non-core registers */
231 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) },
233 /* Memory controller */
234 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
235 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
236 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS) }, /* if RDIMM is supported */
237 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
240 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
241 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
242 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
243 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
246 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
247 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
248 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
249 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
252 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
253 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
254 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
255 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
257 #define N_DEVS ARRAY_SIZE(pci_devs)
260 * pci_device_id table for which devices we are looking for
261 * This should match the first device at pci_devs table
263 static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
264 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
265 {0,} /* 0 terminated list. */
269 /* Table of devices attributes supported by this driver */
270 static const struct i7core_dev_info i7core_devs[] = {
272 .ctl_name = "i7 Core",
273 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7_MCR,
277 static struct edac_pci_ctl_info *i7core_pci;
279 /****************************************************************************
280 Anciliary status routines
281 ****************************************************************************/
283 /* MC_CONTROL bits */
284 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
285 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
288 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 3))
289 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
291 /* MC_MAX_DOD read functions */
292 static inline int numdimms(u32 dimms)
294 return (dimms & 0x3) + 1;
297 static inline int numrank(u32 rank)
299 static int ranks[4] = { 1, 2, 4, -EINVAL };
301 return ranks[rank & 0x3];
304 static inline int numbank(u32 bank)
306 static int banks[4] = { 4, 8, 16, -EINVAL };
308 return banks[bank & 0x3];
311 static inline int numrow(u32 row)
313 static int rows[8] = {
314 1 << 12, 1 << 13, 1 << 14, 1 << 15,
315 1 << 16, -EINVAL, -EINVAL, -EINVAL,
318 return rows[row & 0x7];
321 static inline int numcol(u32 col)
323 static int cols[8] = {
324 1 << 10, 1 << 11, 1 << 12, -EINVAL,
326 return cols[col & 0x3];
330 /****************************************************************************
331 Memory check routines
332 ****************************************************************************/
333 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
338 for (i = 0; i < N_DEVS; i++) {
339 if (!pci_devs[i].pdev[socket])
342 if (PCI_SLOT(pci_devs[i].pdev[socket]->devfn) == slot &&
343 PCI_FUNC(pci_devs[i].pdev[socket]->devfn) == func) {
344 return pci_devs[i].pdev[socket];
351 static int i7core_get_active_channels(u8 socket, unsigned *channels,
354 struct pci_dev *pdev = NULL;
361 pdev = get_pdev_slot_func(socket, 3, 0);
363 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
368 /* Device 3 function 0 reads */
369 pci_read_config_dword(pdev, MC_STATUS, &status);
370 pci_read_config_dword(pdev, MC_CONTROL, &control);
372 for (i = 0; i < NUM_CHANS; i++) {
374 /* Check if the channel is active */
375 if (!(control & (1 << (8 + i))))
378 /* Check if the channel is disabled */
379 if (status & (1 << i))
382 pdev = get_pdev_slot_func(socket, i + 4, 1);
384 i7core_printk(KERN_ERR, "Couldn't find socket %d "
389 /* Devices 4-6 function 1 */
390 pci_read_config_dword(pdev,
391 MC_DOD_CH_DIMM0, &dimm_dod[0]);
392 pci_read_config_dword(pdev,
393 MC_DOD_CH_DIMM1, &dimm_dod[1]);
394 pci_read_config_dword(pdev,
395 MC_DOD_CH_DIMM2, &dimm_dod[2]);
399 for (j = 0; j < 3; j++) {
400 if (!DIMM_PRESENT(dimm_dod[j]))
406 debugf0("Number of active channels on socked %d: %d\n",
412 static int get_dimm_config(struct mem_ctl_info *mci, u8 socket)
414 struct i7core_pvt *pvt = mci->pvt_info;
415 struct csrow_info *csr;
416 struct pci_dev *pdev;
418 unsigned long last_page = 0;
422 /* Get data from the MC register, function 0 */
423 pdev = pvt->pci_mcr[socket][0];
427 /* Device 3 function 0 reads */
428 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
429 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
430 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
431 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
433 debugf0("MC control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
434 pvt->info.mc_control, pvt->info.mc_status,
435 pvt->info.max_dod, pvt->info.ch_map);
437 if (ECC_ENABLED(pvt)) {
438 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
440 mode = EDAC_S8ECD8ED;
442 mode = EDAC_S4ECD4ED;
444 debugf0("ECC disabled\n");
448 /* FIXME: need to handle the error codes */
449 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked\n",
450 numdimms(pvt->info.max_dod),
451 numrank(pvt->info.max_dod >> 2),
452 numbank(pvt->info.max_dod >> 4));
453 debugf0("DOD Max rows x colums = 0x%x x 0x%x\n",
454 numrow(pvt->info.max_dod >> 6),
455 numcol(pvt->info.max_dod >> 9));
457 debugf0("Memory channel configuration:\n");
459 for (i = 0; i < NUM_CHANS; i++) {
460 u32 data, dimm_dod[3], value[8];
462 if (!CH_ACTIVE(pvt, i)) {
463 debugf0("Channel %i is not active\n", i);
466 if (CH_DISABLED(pvt, i)) {
467 debugf0("Channel %i is disabled\n", i);
471 /* Devices 4-6 function 0 */
472 pci_read_config_dword(pvt->pci_ch[socket][i][0],
473 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
475 pvt->channel[socket][i].ranks = (data & QUAD_RANK_PRESENT) ?
478 if (data & REGISTERED_DIMM)
483 if (data & THREE_DIMMS_PRESENT)
484 pvt->channel[i].dimms = 3;
485 else if (data & SINGLE_QUAD_RANK_PRESENT)
486 pvt->channel[i].dimms = 1;
488 pvt->channel[i].dimms = 2;
491 /* Devices 4-6 function 1 */
492 pci_read_config_dword(pvt->pci_ch[socket][i][1],
493 MC_DOD_CH_DIMM0, &dimm_dod[0]);
494 pci_read_config_dword(pvt->pci_ch[socket][i][1],
495 MC_DOD_CH_DIMM1, &dimm_dod[1]);
496 pci_read_config_dword(pvt->pci_ch[socket][i][1],
497 MC_DOD_CH_DIMM2, &dimm_dod[2]);
499 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
500 "%d ranks, %cDIMMs\n",
502 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
504 pvt->channel[socket][i].ranks,
505 (data & REGISTERED_DIMM) ? 'R' : 'U');
507 for (j = 0; j < 3; j++) {
508 u32 banks, ranks, rows, cols;
511 if (!DIMM_PRESENT(dimm_dod[j]))
514 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
515 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
516 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
517 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
519 /* DDR3 has 8 I/O banks */
520 size = (rows * cols * banks * ranks) >> (20 - 3);
522 pvt->channel[socket][i].dimms++;
524 debugf0("\tdimm %d (0x%08x) %d Mb offset: %x, "
526 "numrank: %d, numrow: %#x, numcol: %#x\n",
527 j, dimm_dod[j], size,
528 RANKOFFSET(dimm_dod[j]),
529 banks, ranks, rows, cols);
532 npages = size >> (PAGE_SHIFT - 20);
534 npages = size << (20 - PAGE_SHIFT);
537 csr = &mci->csrows[csrow];
538 csr->first_page = last_page + 1;
540 csr->last_page = last_page;
541 csr->nr_pages = npages;
545 csr->csrow_idx = csrow;
546 csr->nr_channels = 1;
548 csr->channels[0].chan_idx = i;
549 csr->channels[0].ce_count = 0;
559 csr->dtype = DEV_X16;
562 csr->dtype = DEV_UNKNOWN;
565 csr->edac_mode = mode;
571 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
572 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
573 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
574 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
575 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
576 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
577 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
578 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
579 debugf0("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
580 for (j = 0; j < 8; j++)
581 debugf0("\t\t%#x\t%#x\t%#x\n",
582 (value[j] >> 27) & 0x1,
583 (value[j] >> 24) & 0x7,
584 (value[j] && ((1 << 24) - 1)));
590 /****************************************************************************
591 Error insertion routines
592 ****************************************************************************/
594 /* The i7core has independent error injection features per channel.
595 However, to have a simpler code, we don't allow enabling error injection
596 on more than one channel.
597 Also, since a change at an inject parameter will be applied only at enable,
598 we're disabling error injection on all write calls to the sysfs nodes that
599 controls the error code injection.
601 static int disable_inject(struct mem_ctl_info *mci)
603 struct i7core_pvt *pvt = mci->pvt_info;
605 pvt->inject.enable = 0;
607 if (!pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0])
610 pci_write_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
611 MC_CHANNEL_ERROR_MASK, 0);
617 * i7core inject inject.socket
619 * accept and store error injection inject.socket value
621 static ssize_t i7core_inject_socket_store(struct mem_ctl_info *mci,
622 const char *data, size_t count)
624 struct i7core_pvt *pvt = mci->pvt_info;
628 rc = strict_strtoul(data, 10, &value);
629 if ((rc < 0) || (value > pvt->sockets))
632 pvt->inject.section = (u32) value;
636 static ssize_t i7core_inject_socket_show(struct mem_ctl_info *mci,
639 struct i7core_pvt *pvt = mci->pvt_info;
640 return sprintf(data, "%d\n", pvt->inject.socket);
644 * i7core inject inject.section
646 * accept and store error injection inject.section value
647 * bit 0 - refers to the lower 32-byte half cacheline
648 * bit 1 - refers to the upper 32-byte half cacheline
650 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
651 const char *data, size_t count)
653 struct i7core_pvt *pvt = mci->pvt_info;
657 if (pvt->inject.enable)
660 rc = strict_strtoul(data, 10, &value);
661 if ((rc < 0) || (value > 3))
664 pvt->inject.section = (u32) value;
668 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
671 struct i7core_pvt *pvt = mci->pvt_info;
672 return sprintf(data, "0x%08x\n", pvt->inject.section);
678 * accept and store error injection inject.section value
679 * bit 0 - repeat enable - Enable error repetition
680 * bit 1 - inject ECC error
681 * bit 2 - inject parity error
683 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
684 const char *data, size_t count)
686 struct i7core_pvt *pvt = mci->pvt_info;
690 if (pvt->inject.enable)
693 rc = strict_strtoul(data, 10, &value);
694 if ((rc < 0) || (value > 7))
697 pvt->inject.type = (u32) value;
701 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
704 struct i7core_pvt *pvt = mci->pvt_info;
705 return sprintf(data, "0x%08x\n", pvt->inject.type);
709 * i7core_inject_inject.eccmask_store
711 * The type of error (UE/CE) will depend on the inject.eccmask value:
712 * Any bits set to a 1 will flip the corresponding ECC bit
713 * Correctable errors can be injected by flipping 1 bit or the bits within
714 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
715 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
716 * uncorrectable error to be injected.
718 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
719 const char *data, size_t count)
721 struct i7core_pvt *pvt = mci->pvt_info;
725 if (pvt->inject.enable)
728 rc = strict_strtoul(data, 10, &value);
732 pvt->inject.eccmask = (u32) value;
736 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
739 struct i7core_pvt *pvt = mci->pvt_info;
740 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
746 * The type of error (UE/CE) will depend on the inject.eccmask value:
747 * Any bits set to a 1 will flip the corresponding ECC bit
748 * Correctable errors can be injected by flipping 1 bit or the bits within
749 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
750 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
751 * uncorrectable error to be injected.
753 static ssize_t i7core_inject_addrmatch_store(struct mem_ctl_info *mci,
754 const char *data, size_t count)
756 struct i7core_pvt *pvt = mci->pvt_info;
761 if (pvt->inject.enable)
765 cmd = strsep((char **) &data, ":");
768 val = strsep((char **) &data, " \n\t");
772 if (!strcasecmp(val, "any"))
775 rc = strict_strtol(val, 10, &value);
776 if ((rc < 0) || (value < 0))
780 if (!strcasecmp(cmd, "channel")) {
782 pvt->inject.channel = value;
785 } else if (!strcasecmp(cmd, "dimm")) {
787 pvt->inject.dimm = value;
790 } else if (!strcasecmp(cmd, "rank")) {
792 pvt->inject.rank = value;
795 } else if (!strcasecmp(cmd, "bank")) {
797 pvt->inject.bank = value;
800 } else if (!strcasecmp(cmd, "page")) {
802 pvt->inject.page = value;
805 } else if (!strcasecmp(cmd, "col") ||
806 !strcasecmp(cmd, "column")) {
808 pvt->inject.col = value;
817 static ssize_t i7core_inject_addrmatch_show(struct mem_ctl_info *mci,
820 struct i7core_pvt *pvt = mci->pvt_info;
821 char channel[4], dimm[4], bank[4], rank[4], page[7], col[7];
823 if (pvt->inject.channel < 0)
824 sprintf(channel, "any");
826 sprintf(channel, "%d", pvt->inject.channel);
827 if (pvt->inject.dimm < 0)
828 sprintf(dimm, "any");
830 sprintf(dimm, "%d", pvt->inject.dimm);
831 if (pvt->inject.bank < 0)
832 sprintf(bank, "any");
834 sprintf(bank, "%d", pvt->inject.bank);
835 if (pvt->inject.rank < 0)
836 sprintf(rank, "any");
838 sprintf(rank, "%d", pvt->inject.rank);
839 if (pvt->inject.page < 0)
840 sprintf(page, "any");
842 sprintf(page, "0x%04x", pvt->inject.page);
843 if (pvt->inject.col < 0)
846 sprintf(col, "0x%04x", pvt->inject.col);
848 return sprintf(data, "channel: %s\ndimm: %s\nbank: %s\n"
849 "rank: %s\npage: %s\ncolumn: %s\n",
850 channel, dimm, bank, rank, page, col);
854 * This routine prepares the Memory Controller for error injection.
855 * The error will be injected when some process tries to write to the
856 * memory that matches the given criteria.
857 * The criteria can be set in terms of a mask where dimm, rank, bank, page
858 * and col can be specified.
859 * A -1 value for any of the mask items will make the MCU to ignore
860 * that matching criteria for error injection.
862 * It should be noticed that the error will only happen after a write operation
863 * on a memory that matches the condition. if REPEAT_EN is not enabled at
864 * inject mask, then it will produce just one error. Otherwise, it will repeat
865 * until the injectmask would be cleaned.
867 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
868 * is reliable enough to check if the MC is using the
869 * three channels. However, this is not clear at the datasheet.
871 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
872 const char *data, size_t count)
874 struct i7core_pvt *pvt = mci->pvt_info;
880 if (!pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0])
883 rc = strict_strtoul(data, 10, &enable);
888 pvt->inject.enable = 1;
894 /* Sets pvt->inject.dimm mask */
895 if (pvt->inject.dimm < 0)
898 if (pvt->channel[pvt->inject.socket][pvt->inject.channel].dimms > 2)
899 mask |= (pvt->inject.dimm & 0x3L) << 35;
901 mask |= (pvt->inject.dimm & 0x1L) << 36;
904 /* Sets pvt->inject.rank mask */
905 if (pvt->inject.rank < 0)
908 if (pvt->channel[pvt->inject.socket][pvt->inject.channel].dimms > 2)
909 mask |= (pvt->inject.rank & 0x1L) << 34;
911 mask |= (pvt->inject.rank & 0x3L) << 34;
914 /* Sets pvt->inject.bank mask */
915 if (pvt->inject.bank < 0)
918 mask |= (pvt->inject.bank & 0x15L) << 30;
920 /* Sets pvt->inject.page mask */
921 if (pvt->inject.page < 0)
924 mask |= (pvt->inject.page & 0xffffL) << 14;
926 /* Sets pvt->inject.column mask */
927 if (pvt->inject.col < 0)
930 mask |= (pvt->inject.col & 0x3fffL);
932 /* Unlock writes to registers */
933 pci_write_config_dword(pvt->pci_noncore[pvt->inject.socket],
934 MC_CFG_CONTROL, 0x2);
937 /* Zeroes error count registers */
938 pci_write_config_dword(pvt->pci_mcr[pvt->inject.socket][4],
939 MC_TEST_ERR_RCV1, 0);
940 pci_write_config_dword(pvt->pci_mcr[pvt->inject.socket][4],
941 MC_TEST_ERR_RCV0, 0);
942 pvt->ce_count_available[pvt->inject.socket] = 0;
946 pci_write_config_qword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
947 MC_CHANNEL_ADDR_MATCH, mask);
949 pci_write_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
950 MC_CHANNEL_ADDR_MATCH, mask);
951 pci_write_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
952 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
958 pci_read_config_qword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
959 MC_CHANNEL_ADDR_MATCH, &rdmask);
960 debugf0("Inject addr match write 0x%016llx, read: 0x%016llx\n",
963 u32 rdmask1, rdmask2;
965 pci_read_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
966 MC_CHANNEL_ADDR_MATCH, &rdmask1);
967 pci_read_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
968 MC_CHANNEL_ADDR_MATCH + 4, &rdmask2);
970 debugf0("Inject addr match write 0x%016llx, read: 0x%08x 0x%08x\n",
971 mask, rdmask1, rdmask2);
975 pci_write_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
976 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
980 * bits 1-2: MASK_HALF_CACHELINE
982 * bit 4: INJECT_ADDR_PARITY
985 injectmask = (pvt->inject.type & 1) |
986 (pvt->inject.section & 0x3) << 1 |
987 (pvt->inject.type & 0x6) << (3 - 1);
989 pci_write_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
990 MC_CHANNEL_ERROR_MASK, injectmask);
993 /* lock writes to registers */
994 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 0);
996 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
998 mask, pvt->inject.eccmask, injectmask);
1004 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1007 struct i7core_pvt *pvt = mci->pvt_info;
1010 pci_read_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
1011 MC_CHANNEL_ERROR_MASK, &injectmask);
1013 debugf0("Inject error read: 0x%018x\n", injectmask);
1015 if (injectmask & 0x0c)
1016 pvt->inject.enable = 1;
1018 return sprintf(data, "%d\n", pvt->inject.enable);
1021 static ssize_t i7core_ce_regs_show(struct mem_ctl_info *mci, char *data)
1023 unsigned i, count, total = 0;
1024 struct i7core_pvt *pvt = mci->pvt_info;
1026 for (i = 0; i < pvt->sockets; i++) {
1027 if (!pvt->ce_count_available[i])
1028 count = sprintf(data, "socket 0 data unavailable\n");
1030 count = sprintf(data, "socket %d, dimm0: %lu\n"
1031 "dimm1: %lu\ndimm2: %lu\n",
1033 pvt->ce_count[i][0],
1034 pvt->ce_count[i][1],
1035 pvt->ce_count[i][2]);
1046 static struct mcidev_sysfs_attribute i7core_inj_attrs[] = {
1049 .name = "inject_socket",
1050 .mode = (S_IRUGO | S_IWUSR)
1052 .show = i7core_inject_socket_show,
1053 .store = i7core_inject_socket_store,
1056 .name = "inject_section",
1057 .mode = (S_IRUGO | S_IWUSR)
1059 .show = i7core_inject_section_show,
1060 .store = i7core_inject_section_store,
1063 .name = "inject_type",
1064 .mode = (S_IRUGO | S_IWUSR)
1066 .show = i7core_inject_type_show,
1067 .store = i7core_inject_type_store,
1070 .name = "inject_eccmask",
1071 .mode = (S_IRUGO | S_IWUSR)
1073 .show = i7core_inject_eccmask_show,
1074 .store = i7core_inject_eccmask_store,
1077 .name = "inject_addrmatch",
1078 .mode = (S_IRUGO | S_IWUSR)
1080 .show = i7core_inject_addrmatch_show,
1081 .store = i7core_inject_addrmatch_store,
1084 .name = "inject_enable",
1085 .mode = (S_IRUGO | S_IWUSR)
1087 .show = i7core_inject_enable_show,
1088 .store = i7core_inject_enable_store,
1091 .name = "corrected_error_counts",
1092 .mode = (S_IRUGO | S_IWUSR)
1094 .show = i7core_ce_regs_show,
1099 /****************************************************************************
1100 Device initialization routines: put/get, init/exit
1101 ****************************************************************************/
1104 * i7core_put_devices 'put' all the devices that we have
1105 * reserved via 'get'
1107 static void i7core_put_devices(void)
1111 for (i = 0; i < NUM_SOCKETS; i++)
1112 for (j = 0; j < N_DEVS; j++)
1113 pci_dev_put(pci_devs[j].pdev[i]);
1117 * i7core_get_devices Find and perform 'get' operation on the MCH's
1118 * device/functions we want to reference for this driver
1120 * Need to 'get' device 16 func 1 and func 2
1122 static int i7core_get_devices(void)
1125 struct pci_dev *pdev = NULL;
1129 for (i = 0; i < N_DEVS; i++) {
1130 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1131 pci_devs[i].dev_id, NULL);
1134 pcibios_scan_specific_bus(254);
1135 pcibios_scan_specific_bus(255);
1137 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1138 pci_devs[i].dev_id, NULL);
1142 bus = pdev->bus->number;
1149 if (socket >= NUM_SOCKETS) {
1150 i7core_printk(KERN_ERR,
1151 "Found unexpected socket for "
1152 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1153 bus, pci_devs[i].dev, pci_devs[i].func,
1154 PCI_VENDOR_ID_INTEL, pci_devs[i].dev_id);
1160 pci_devs[i].pdev[socket] = pdev;
1162 i7core_printk(KERN_ERR,
1163 "Device not found: "
1164 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1165 bus, pci_devs[i].dev, pci_devs[i].func,
1166 PCI_VENDOR_ID_INTEL, pci_devs[i].dev_id);
1168 /* Dev 3 function 2 only exists on chips with RDIMMs */
1169 if ((pci_devs[i].dev == 3) && (pci_devs[i].func == 2))
1172 /* End of list, leave */
1178 if (unlikely(PCI_SLOT(pdev->devfn) != pci_devs[i].dev ||
1179 PCI_FUNC(pdev->devfn) != pci_devs[i].func)) {
1180 i7core_printk(KERN_ERR,
1181 "Device PCI ID %04x:%04x "
1182 "has fn %d.%d instead of fn %d.%d\n",
1183 PCI_VENDOR_ID_INTEL, pci_devs[i].dev_id,
1184 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1185 pci_devs[i].dev, pci_devs[i].func);
1190 /* Be sure that the device is enabled */
1191 rc = pci_enable_device(pdev);
1192 if (unlikely(rc < 0)) {
1193 i7core_printk(KERN_ERR,
1194 "Couldn't enable PCI ID %04x:%04x "
1196 PCI_VENDOR_ID_INTEL, pci_devs[i].dev_id,
1197 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1201 i7core_printk(KERN_INFO,
1202 "Registered socket %d "
1203 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1204 socket, bus, pci_devs[i].dev, pci_devs[i].func,
1205 PCI_VENDOR_ID_INTEL, pci_devs[i].dev_id);
1211 i7core_put_devices();
1215 static int mci_bind_devs(struct mem_ctl_info *mci)
1217 struct i7core_pvt *pvt = mci->pvt_info;
1218 struct pci_dev *pdev;
1219 int i, j, func, slot;
1221 for (i = 0; i < pvt->sockets; i++) {
1222 for (j = 0; j < N_DEVS; j++) {
1223 pdev = pci_devs[j].pdev[i];
1227 func = PCI_FUNC(pdev->devfn);
1228 slot = PCI_SLOT(pdev->devfn);
1230 if (unlikely(func > MAX_MCR_FUNC))
1232 pvt->pci_mcr[i][func] = pdev;
1233 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1234 if (unlikely(func > MAX_CHAN_FUNC))
1236 pvt->pci_ch[i][slot - 4][func] = pdev;
1237 } else if (!slot && !func)
1238 pvt->pci_noncore[i] = pdev;
1242 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1243 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1251 i7core_printk(KERN_ERR, "Device %d, function %d "
1252 "is out of the expected range\n",
1257 /****************************************************************************
1258 Error check routines
1259 ****************************************************************************/
1261 /* This function is based on the device 3 function 4 registers as described on:
1262 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1263 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1264 * also available at:
1265 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1267 static void check_mc_test_err(struct mem_ctl_info *mci, u8 socket)
1269 struct i7core_pvt *pvt = mci->pvt_info;
1271 int new0, new1, new2;
1273 if (!pvt->pci_mcr[socket][4]) {
1274 debugf0("%s MCR registers not found\n",__func__);
1278 /* Corrected error reads */
1279 pci_read_config_dword(pvt->pci_mcr[socket][4], MC_TEST_ERR_RCV1, &rcv1);
1280 pci_read_config_dword(pvt->pci_mcr[socket][4], MC_TEST_ERR_RCV0, &rcv0);
1282 /* Store the new values */
1283 new2 = DIMM2_COR_ERR(rcv1);
1284 new1 = DIMM1_COR_ERR(rcv0);
1285 new0 = DIMM0_COR_ERR(rcv0);
1288 debugf2("%s CE rcv1=0x%08x rcv0=0x%08x, %d %d %d\n",
1289 (pvt->ce_count_available ? "UPDATE" : "READ"),
1290 rcv1, rcv0, new0, new1, new2);
1293 /* Updates CE counters if it is not the first time here */
1294 if (pvt->ce_count_available[socket]) {
1295 /* Updates CE counters */
1296 int add0, add1, add2;
1298 add2 = new2 - pvt->last_ce_count[socket][2];
1299 add1 = new1 - pvt->last_ce_count[socket][1];
1300 add0 = new0 - pvt->last_ce_count[socket][0];
1304 pvt->ce_count[socket][2] += add2;
1308 pvt->ce_count[socket][1] += add1;
1312 pvt->ce_count[socket][0] += add0;
1314 pvt->ce_count_available[socket] = 1;
1316 /* Store the new values */
1317 pvt->last_ce_count[socket][2] = new2;
1318 pvt->last_ce_count[socket][1] = new1;
1319 pvt->last_ce_count[socket][0] = new0;
1322 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1325 debugf0("CPU %d: Machine Check Exception: %16Lx"
1326 "Bank %d: %016Lx\n",
1327 m->cpu, m->mcgstatus, m->bank, m->status);
1329 debugf0("RIP%s %02x:<%016Lx>\n",
1330 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
1333 printk(KERN_EMERG "TSC %llx ", m->tsc);
1335 printk("ADDR %llx ", m->addr);
1337 printk("MISC %llx ", m->misc);
1340 snprintf(msg, sizeof(msg),
1341 "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
1342 "RAS=%d CAS=%d %s Err=0x%lx (%s))",
1343 type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
1344 type, allErrors, error_name[errnum]);
1346 /* Call the helper to output message */
1347 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
1352 * i7core_check_error Retrieve and process errors reported by the
1353 * hardware. Called by the Core module.
1355 static void i7core_check_error(struct mem_ctl_info *mci)
1357 struct i7core_pvt *pvt = mci->pvt_info;
1360 struct mce *m = NULL;
1361 unsigned long flags;
1363 debugf0(__FILE__ ": %s()\n", __func__);
1365 /* Copy all mce errors into a temporary buffer */
1366 spin_lock_irqsave(&pvt->mce_lock, flags);
1367 if (pvt->mce_count) {
1368 m = kmalloc(sizeof(*m) * pvt->mce_count, GFP_ATOMIC);
1370 count = pvt->mce_count;
1371 memcpy(m, &pvt->mce_entry, sizeof(*m) * count);
1375 spin_unlock_irqrestore(&pvt->mce_lock, flags);
1377 /* proccess mcelog errors */
1378 for (i = 0; i < count; i++)
1379 i7core_mce_output_error(mci, &m[i]);
1383 /* check memory count errors */
1384 for (i = 0; i < pvt->sockets; i++)
1385 check_mc_test_err(mci, i);
1389 * i7core_mce_check_error Replicates mcelog routine to get errors
1390 * This routine simply queues mcelog errors, and
1391 * return. The error itself should be handled later
1392 * by i7core_check_error.
1394 static int i7core_mce_check_error(void *priv, struct mce *mce)
1396 struct i7core_pvt *pvt = priv;
1397 unsigned long flags;
1399 debugf0(__FILE__ ": %s()\n", __func__);
1401 spin_lock_irqsave(&pvt->mce_lock, flags);
1402 if (pvt->mce_count < MCE_LOG_LEN) {
1403 memcpy(&pvt->mce_entry[pvt->mce_count], mce, sizeof(*mce));
1406 spin_unlock_irqrestore(&pvt->mce_lock, flags);
1408 /* Advice mcelog that the error were handled */
1410 return 0; // Let's duplicate the log
1414 * i7core_probe Probe for ONE instance of device to see if it is
1417 * 0 for FOUND a device
1418 * < 0 for error code
1420 static int __devinit i7core_probe(struct pci_dev *pdev,
1421 const struct pci_device_id *id)
1423 struct mem_ctl_info *mci;
1424 struct i7core_pvt *pvt;
1425 int num_channels = 0;
1427 int dev_idx = id->driver_data;
1431 if (unlikely(dev_idx >= ARRAY_SIZE(i7core_devs)))
1434 /* get the pci devices we want to reserve for our use */
1435 rc = i7core_get_devices();
1436 if (unlikely(rc < 0))
1440 for (i = NUM_SOCKETS - 1; i > 0; i--)
1441 if (pci_devs[0].pdev[i]) {
1446 for (i = 0; i < sockets; i++) {
1450 /* Check the number of active and not disabled channels */
1451 rc = i7core_get_active_channels(i, &channels, &csrows);
1452 if (unlikely(rc < 0))
1455 num_channels += channels;
1456 num_csrows += csrows;
1459 /* allocate a new MC control structure */
1460 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
1461 if (unlikely(!mci)) {
1466 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
1468 mci->dev = &pdev->dev; /* record ptr to the generic device */
1469 pvt = mci->pvt_info;
1470 memset(pvt, 0, sizeof(*pvt));
1471 pvt->sockets = sockets;
1475 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
1476 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
1479 mci->mtype_cap = MEM_FLAG_DDR3;
1480 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1481 mci->edac_cap = EDAC_FLAG_NONE;
1482 mci->mod_name = "i7core_edac.c";
1483 mci->mod_ver = I7CORE_REVISION;
1484 mci->ctl_name = i7core_devs[dev_idx].ctl_name;
1485 mci->dev_name = pci_name(pdev);
1486 mci->ctl_page_to_phys = NULL;
1487 mci->mc_driver_sysfs_attributes = i7core_inj_attrs;
1488 /* Set the function pointer to an actual operation function */
1489 mci->edac_check = i7core_check_error;
1491 /* Store pci devices at mci for faster access */
1492 rc = mci_bind_devs(mci);
1493 if (unlikely(rc < 0))
1496 /* Get dimm basic config */
1497 for (i = 0; i < sockets; i++)
1498 get_dimm_config(mci, i);
1500 /* add this new MC control structure to EDAC's list of MCs */
1501 if (unlikely(edac_mc_add_mc(mci))) {
1502 debugf0("MC: " __FILE__
1503 ": %s(): failed edac_mc_add_mc()\n", __func__);
1504 /* FIXME: perhaps some code should go here that disables error
1505 * reporting if we just enabled it
1512 /* allocating generic PCI control info */
1513 i7core_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1514 if (unlikely(!i7core_pci)) {
1516 "%s(): Unable to create PCI control\n",
1519 "%s(): PCI error report via EDAC not setup\n",
1523 /* Default error mask is any memory */
1524 pvt->inject.channel = 0;
1525 pvt->inject.dimm = -1;
1526 pvt->inject.rank = -1;
1527 pvt->inject.bank = -1;
1528 pvt->inject.page = -1;
1529 pvt->inject.col = -1;
1531 /* Registers on edac_mce in order to receive memory errors */
1532 pvt->edac_mce.priv = pvt;
1533 pvt->edac_mce.check_error = i7core_mce_check_error;
1534 spin_lock_init(&pvt->mce_lock);
1536 rc = edac_mce_register(&pvt->edac_mce);
1537 if (unlikely (rc < 0)) {
1538 debugf0("MC: " __FILE__
1539 ": %s(): failed edac_mce_register()\n", __func__);
1543 i7core_printk(KERN_INFO, "Driver loaded.\n");
1551 i7core_put_devices();
1556 * i7core_remove destructor for one instance of device
1559 static void __devexit i7core_remove(struct pci_dev *pdev)
1561 struct mem_ctl_info *mci;
1562 struct i7core_pvt *pvt;
1564 debugf0(__FILE__ ": %s()\n", __func__);
1567 edac_pci_release_generic_ctl(i7core_pci);
1570 mci = edac_mc_del_mc(&pdev->dev);
1574 /* Unregisters on edac_mce in order to receive memory errors */
1575 pvt = mci->pvt_info;
1576 edac_mce_unregister(&pvt->edac_mce);
1578 /* retrieve references to resources, and free those resources */
1579 i7core_put_devices();
1584 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
1587 * i7core_driver pci_driver structure for this module
1590 static struct pci_driver i7core_driver = {
1591 .name = "i7core_edac",
1592 .probe = i7core_probe,
1593 .remove = __devexit_p(i7core_remove),
1594 .id_table = i7core_pci_tbl,
1598 * i7core_init Module entry function
1599 * Try to initialize this module for its devices
1601 static int __init i7core_init(void)
1605 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1607 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1610 pci_rc = pci_register_driver(&i7core_driver);
1612 return (pci_rc < 0) ? pci_rc : 0;
1616 * i7core_exit() Module exit function
1617 * Unregister the driver
1619 static void __exit i7core_exit(void)
1621 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1622 pci_unregister_driver(&i7core_driver);
1625 module_init(i7core_init);
1626 module_exit(i7core_exit);
1628 MODULE_LICENSE("GPL");
1629 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
1630 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1631 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
1634 module_param(edac_op_state, int, 0444);
1635 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");