i7core_edac: Fix compilation on 32 bits arch
[pandora-kernel.git] / drivers / edac / i7core_edac.c
1 /* Intel i7 core/Nehalem Memory Controller kernel module
2  *
3  * This driver supports the memory controllers found on the Intel
4  * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5  * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6  * and Westmere-EP.
7  *
8  * This file may be distributed under the terms of the
9  * GNU General Public License version 2 only.
10  *
11  * Copyright (c) 2009-2010 by:
12  *       Mauro Carvalho Chehab <mchehab@redhat.com>
13  *
14  * Red Hat Inc. http://www.redhat.com
15  *
16  * Forked and adapted from the i5400_edac driver
17  *
18  * Based on the following public Intel datasheets:
19  * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20  * Datasheet, Volume 2:
21  *      http://download.intel.com/design/processor/datashts/320835.pdf
22  * Intel Xeon Processor 5500 Series Datasheet Volume 2
23  *      http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24  * also available at:
25  *      http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26  */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/pci.h>
31 #include <linux/pci_ids.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/dmi.h>
35 #include <linux/edac.h>
36 #include <linux/mmzone.h>
37 #include <linux/smp.h>
38 #include <asm/mce.h>
39 #include <asm/processor.h>
40 #include <asm/div64.h>
41
42 #include "edac_core.h"
43
44 /* Static vars */
45 static LIST_HEAD(i7core_edac_list);
46 static DEFINE_MUTEX(i7core_edac_lock);
47 static int probed;
48
49 static int use_pci_fixup;
50 module_param(use_pci_fixup, int, 0444);
51 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
52 /*
53  * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
54  * registers start at bus 255, and are not reported by BIOS.
55  * We currently find devices with only 2 sockets. In order to support more QPI
56  * Quick Path Interconnect, just increment this number.
57  */
58 #define MAX_SOCKET_BUSES        2
59
60
61 /*
62  * Alter this version for the module when modifications are made
63  */
64 #define I7CORE_REVISION    " Ver: 1.0.0"
65 #define EDAC_MOD_STR      "i7core_edac"
66
67 /*
68  * Debug macros
69  */
70 #define i7core_printk(level, fmt, arg...)                       \
71         edac_printk(level, "i7core", fmt, ##arg)
72
73 #define i7core_mc_printk(mci, level, fmt, arg...)               \
74         edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
75
76 /*
77  * i7core Memory Controller Registers
78  */
79
80         /* OFFSETS for Device 0 Function 0 */
81
82 #define MC_CFG_CONTROL  0x90
83   #define MC_CFG_UNLOCK         0x02
84   #define MC_CFG_LOCK           0x00
85
86         /* OFFSETS for Device 3 Function 0 */
87
88 #define MC_CONTROL      0x48
89 #define MC_STATUS       0x4c
90 #define MC_MAX_DOD      0x64
91
92 /*
93  * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
94  * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
95  */
96
97 #define MC_TEST_ERR_RCV1        0x60
98   #define DIMM2_COR_ERR(r)                      ((r) & 0x7fff)
99
100 #define MC_TEST_ERR_RCV0        0x64
101   #define DIMM1_COR_ERR(r)                      (((r) >> 16) & 0x7fff)
102   #define DIMM0_COR_ERR(r)                      ((r) & 0x7fff)
103
104 /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
105 #define MC_SSRCONTROL           0x48
106   #define SSR_MODE_DISABLE      0x00
107   #define SSR_MODE_ENABLE       0x01
108   #define SSR_MODE_MASK         0x03
109
110 #define MC_SCRUB_CONTROL        0x4c
111   #define STARTSCRUB            (1 << 24)
112   #define SCRUBINTERVAL_MASK    0xffffff
113
114 #define MC_COR_ECC_CNT_0        0x80
115 #define MC_COR_ECC_CNT_1        0x84
116 #define MC_COR_ECC_CNT_2        0x88
117 #define MC_COR_ECC_CNT_3        0x8c
118 #define MC_COR_ECC_CNT_4        0x90
119 #define MC_COR_ECC_CNT_5        0x94
120
121 #define DIMM_TOP_COR_ERR(r)                     (((r) >> 16) & 0x7fff)
122 #define DIMM_BOT_COR_ERR(r)                     ((r) & 0x7fff)
123
124
125         /* OFFSETS for Devices 4,5 and 6 Function 0 */
126
127 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
128   #define THREE_DIMMS_PRESENT           (1 << 24)
129   #define SINGLE_QUAD_RANK_PRESENT      (1 << 23)
130   #define QUAD_RANK_PRESENT             (1 << 22)
131   #define REGISTERED_DIMM               (1 << 15)
132
133 #define MC_CHANNEL_MAPPER       0x60
134   #define RDLCH(r, ch)          ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
135   #define WRLCH(r, ch)          ((((r) >> (ch * 6)) & 0x07) - 1)
136
137 #define MC_CHANNEL_RANK_PRESENT 0x7c
138   #define RANK_PRESENT_MASK             0xffff
139
140 #define MC_CHANNEL_ADDR_MATCH   0xf0
141 #define MC_CHANNEL_ERROR_MASK   0xf8
142 #define MC_CHANNEL_ERROR_INJECT 0xfc
143   #define INJECT_ADDR_PARITY    0x10
144   #define INJECT_ECC            0x08
145   #define MASK_CACHELINE        0x06
146   #define MASK_FULL_CACHELINE   0x06
147   #define MASK_MSB32_CACHELINE  0x04
148   #define MASK_LSB32_CACHELINE  0x02
149   #define NO_MASK_CACHELINE     0x00
150   #define REPEAT_EN             0x01
151
152         /* OFFSETS for Devices 4,5 and 6 Function 1 */
153
154 #define MC_DOD_CH_DIMM0         0x48
155 #define MC_DOD_CH_DIMM1         0x4c
156 #define MC_DOD_CH_DIMM2         0x50
157   #define RANKOFFSET_MASK       ((1 << 12) | (1 << 11) | (1 << 10))
158   #define RANKOFFSET(x)         ((x & RANKOFFSET_MASK) >> 10)
159   #define DIMM_PRESENT_MASK     (1 << 9)
160   #define DIMM_PRESENT(x)       (((x) & DIMM_PRESENT_MASK) >> 9)
161   #define MC_DOD_NUMBANK_MASK           ((1 << 8) | (1 << 7))
162   #define MC_DOD_NUMBANK(x)             (((x) & MC_DOD_NUMBANK_MASK) >> 7)
163   #define MC_DOD_NUMRANK_MASK           ((1 << 6) | (1 << 5))
164   #define MC_DOD_NUMRANK(x)             (((x) & MC_DOD_NUMRANK_MASK) >> 5)
165   #define MC_DOD_NUMROW_MASK            ((1 << 4) | (1 << 3) | (1 << 2))
166   #define MC_DOD_NUMROW(x)              (((x) & MC_DOD_NUMROW_MASK) >> 2)
167   #define MC_DOD_NUMCOL_MASK            3
168   #define MC_DOD_NUMCOL(x)              ((x) & MC_DOD_NUMCOL_MASK)
169
170 #define MC_RANK_PRESENT         0x7c
171
172 #define MC_SAG_CH_0     0x80
173 #define MC_SAG_CH_1     0x84
174 #define MC_SAG_CH_2     0x88
175 #define MC_SAG_CH_3     0x8c
176 #define MC_SAG_CH_4     0x90
177 #define MC_SAG_CH_5     0x94
178 #define MC_SAG_CH_6     0x98
179 #define MC_SAG_CH_7     0x9c
180
181 #define MC_RIR_LIMIT_CH_0       0x40
182 #define MC_RIR_LIMIT_CH_1       0x44
183 #define MC_RIR_LIMIT_CH_2       0x48
184 #define MC_RIR_LIMIT_CH_3       0x4C
185 #define MC_RIR_LIMIT_CH_4       0x50
186 #define MC_RIR_LIMIT_CH_5       0x54
187 #define MC_RIR_LIMIT_CH_6       0x58
188 #define MC_RIR_LIMIT_CH_7       0x5C
189 #define MC_RIR_LIMIT_MASK       ((1 << 10) - 1)
190
191 #define MC_RIR_WAY_CH           0x80
192   #define MC_RIR_WAY_OFFSET_MASK        (((1 << 14) - 1) & ~0x7)
193   #define MC_RIR_WAY_RANK_MASK          0x7
194
195 /*
196  * i7core structs
197  */
198
199 #define NUM_CHANS 3
200 #define MAX_DIMMS 3             /* Max DIMMS per channel */
201 #define MAX_MCR_FUNC  4
202 #define MAX_CHAN_FUNC 3
203
204 struct i7core_info {
205         u32     mc_control;
206         u32     mc_status;
207         u32     max_dod;
208         u32     ch_map;
209 };
210
211
212 struct i7core_inject {
213         int     enable;
214
215         u32     section;
216         u32     type;
217         u32     eccmask;
218
219         /* Error address mask */
220         int channel, dimm, rank, bank, page, col;
221 };
222
223 struct i7core_channel {
224         u32             ranks;
225         u32             dimms;
226 };
227
228 struct pci_id_descr {
229         int                     dev;
230         int                     func;
231         int                     dev_id;
232         int                     optional;
233 };
234
235 struct pci_id_table {
236         const struct pci_id_descr       *descr;
237         int                             n_devs;
238 };
239
240 struct i7core_dev {
241         struct list_head        list;
242         u8                      socket;
243         struct pci_dev          **pdev;
244         int                     n_devs;
245         struct mem_ctl_info     *mci;
246 };
247
248 struct i7core_pvt {
249         struct pci_dev  *pci_noncore;
250         struct pci_dev  *pci_mcr[MAX_MCR_FUNC + 1];
251         struct pci_dev  *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
252
253         struct i7core_dev *i7core_dev;
254
255         struct i7core_info      info;
256         struct i7core_inject    inject;
257         struct i7core_channel   channel[NUM_CHANS];
258
259         int             ce_count_available;
260         int             csrow_map[NUM_CHANS][MAX_DIMMS];
261
262                         /* ECC corrected errors counts per udimm */
263         unsigned long   udimm_ce_count[MAX_DIMMS];
264         int             udimm_last_ce_count[MAX_DIMMS];
265                         /* ECC corrected errors counts per rdimm */
266         unsigned long   rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
267         int             rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
268
269         bool            is_registered, enable_scrub;
270
271         /* Fifo double buffers */
272         struct mce              mce_entry[MCE_LOG_LEN];
273         struct mce              mce_outentry[MCE_LOG_LEN];
274
275         /* Fifo in/out counters */
276         unsigned                mce_in, mce_out;
277
278         /* Count indicator to show errors not got */
279         unsigned                mce_overrun;
280
281         /* DCLK Frequency used for computing scrub rate */
282         int                     dclk_freq;
283
284         /* Struct to control EDAC polling */
285         struct edac_pci_ctl_info *i7core_pci;
286 };
287
288 #define PCI_DESCR(device, function, device_id)  \
289         .dev = (device),                        \
290         .func = (function),                     \
291         .dev_id = (device_id)
292
293 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
294                 /* Memory controller */
295         { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR)     },
296         { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD)  },
297                         /* Exists only for RDIMM */
298         { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1  },
299         { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
300
301                 /* Channel 0 */
302         { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
303         { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
304         { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
305         { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC)   },
306
307                 /* Channel 1 */
308         { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
309         { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
310         { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
311         { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC)   },
312
313                 /* Channel 2 */
314         { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
315         { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
316         { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
317         { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC)   },
318
319                 /* Generic Non-core registers */
320         /*
321          * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
322          * On Xeon 55xx, however, it has a different id (8086:2c40). So,
323          * the probing code needs to test for the other address in case of
324          * failure of this one
325          */
326         { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE)  },
327
328 };
329
330 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
331         { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR)         },
332         { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD)      },
333         { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST)     },
334
335         { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
336         { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
337         { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
338         { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC)   },
339
340         { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
341         { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
342         { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
343         { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC)   },
344
345         /*
346          * This is the PCI device has an alternate address on some
347          * processors like Core i7 860
348          */
349         { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE)     },
350 };
351
352 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
353                 /* Memory controller */
354         { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2)     },
355         { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2)  },
356                         /* Exists only for RDIMM */
357         { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1  },
358         { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
359
360                 /* Channel 0 */
361         { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
362         { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
363         { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
364         { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2)   },
365
366                 /* Channel 1 */
367         { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
368         { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
369         { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
370         { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2)   },
371
372                 /* Channel 2 */
373         { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
374         { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
375         { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
376         { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2)   },
377
378                 /* Generic Non-core registers */
379         { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2)  },
380
381 };
382
383 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
384 static const struct pci_id_table pci_dev_table[] = {
385         PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
386         PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
387         PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
388         {0,}                    /* 0 terminated list. */
389 };
390
391 /*
392  *      pci_device_id   table for which devices we are looking for
393  */
394 static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
395         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
396         {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
397         {0,}                    /* 0 terminated list. */
398 };
399
400 /****************************************************************************
401                         Anciliary status routines
402  ****************************************************************************/
403
404         /* MC_CONTROL bits */
405 #define CH_ACTIVE(pvt, ch)      ((pvt)->info.mc_control & (1 << (8 + ch)))
406 #define ECCx8(pvt)              ((pvt)->info.mc_control & (1 << 1))
407
408         /* MC_STATUS bits */
409 #define ECC_ENABLED(pvt)        ((pvt)->info.mc_status & (1 << 4))
410 #define CH_DISABLED(pvt, ch)    ((pvt)->info.mc_status & (1 << ch))
411
412         /* MC_MAX_DOD read functions */
413 static inline int numdimms(u32 dimms)
414 {
415         return (dimms & 0x3) + 1;
416 }
417
418 static inline int numrank(u32 rank)
419 {
420         static int ranks[4] = { 1, 2, 4, -EINVAL };
421
422         return ranks[rank & 0x3];
423 }
424
425 static inline int numbank(u32 bank)
426 {
427         static int banks[4] = { 4, 8, 16, -EINVAL };
428
429         return banks[bank & 0x3];
430 }
431
432 static inline int numrow(u32 row)
433 {
434         static int rows[8] = {
435                 1 << 12, 1 << 13, 1 << 14, 1 << 15,
436                 1 << 16, -EINVAL, -EINVAL, -EINVAL,
437         };
438
439         return rows[row & 0x7];
440 }
441
442 static inline int numcol(u32 col)
443 {
444         static int cols[8] = {
445                 1 << 10, 1 << 11, 1 << 12, -EINVAL,
446         };
447         return cols[col & 0x3];
448 }
449
450 static struct i7core_dev *get_i7core_dev(u8 socket)
451 {
452         struct i7core_dev *i7core_dev;
453
454         list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
455                 if (i7core_dev->socket == socket)
456                         return i7core_dev;
457         }
458
459         return NULL;
460 }
461
462 static struct i7core_dev *alloc_i7core_dev(u8 socket,
463                                            const struct pci_id_table *table)
464 {
465         struct i7core_dev *i7core_dev;
466
467         i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
468         if (!i7core_dev)
469                 return NULL;
470
471         i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
472                                    GFP_KERNEL);
473         if (!i7core_dev->pdev) {
474                 kfree(i7core_dev);
475                 return NULL;
476         }
477
478         i7core_dev->socket = socket;
479         i7core_dev->n_devs = table->n_devs;
480         list_add_tail(&i7core_dev->list, &i7core_edac_list);
481
482         return i7core_dev;
483 }
484
485 static void free_i7core_dev(struct i7core_dev *i7core_dev)
486 {
487         list_del(&i7core_dev->list);
488         kfree(i7core_dev->pdev);
489         kfree(i7core_dev);
490 }
491
492 /****************************************************************************
493                         Memory check routines
494  ****************************************************************************/
495 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
496                                           unsigned func)
497 {
498         struct i7core_dev *i7core_dev = get_i7core_dev(socket);
499         int i;
500
501         if (!i7core_dev)
502                 return NULL;
503
504         for (i = 0; i < i7core_dev->n_devs; i++) {
505                 if (!i7core_dev->pdev[i])
506                         continue;
507
508                 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509                     PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510                         return i7core_dev->pdev[i];
511                 }
512         }
513
514         return NULL;
515 }
516
517 /**
518  * i7core_get_active_channels() - gets the number of channels and csrows
519  * @socket:     Quick Path Interconnect socket
520  * @channels:   Number of channels that will be returned
521  * @csrows:     Number of csrows found
522  *
523  * Since EDAC core needs to know in advance the number of available channels
524  * and csrows, in order to allocate memory for csrows/channels, it is needed
525  * to run two similar steps. At the first step, implemented on this function,
526  * it checks the number of csrows/channels present at one socket.
527  * this is used in order to properly allocate the size of mci components.
528  *
529  * It should be noticed that none of the current available datasheets explain
530  * or even mention how csrows are seen by the memory controller. So, we need
531  * to add a fake description for csrows.
532  * So, this driver is attributing one DIMM memory for one csrow.
533  */
534 static int i7core_get_active_channels(const u8 socket, unsigned *channels,
535                                       unsigned *csrows)
536 {
537         struct pci_dev *pdev = NULL;
538         int i, j;
539         u32 status, control;
540
541         *channels = 0;
542         *csrows = 0;
543
544         pdev = get_pdev_slot_func(socket, 3, 0);
545         if (!pdev) {
546                 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
547                               socket);
548                 return -ENODEV;
549         }
550
551         /* Device 3 function 0 reads */
552         pci_read_config_dword(pdev, MC_STATUS, &status);
553         pci_read_config_dword(pdev, MC_CONTROL, &control);
554
555         for (i = 0; i < NUM_CHANS; i++) {
556                 u32 dimm_dod[3];
557                 /* Check if the channel is active */
558                 if (!(control & (1 << (8 + i))))
559                         continue;
560
561                 /* Check if the channel is disabled */
562                 if (status & (1 << i))
563                         continue;
564
565                 pdev = get_pdev_slot_func(socket, i + 4, 1);
566                 if (!pdev) {
567                         i7core_printk(KERN_ERR, "Couldn't find socket %d "
568                                                 "fn %d.%d!!!\n",
569                                                 socket, i + 4, 1);
570                         return -ENODEV;
571                 }
572                 /* Devices 4-6 function 1 */
573                 pci_read_config_dword(pdev,
574                                 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575                 pci_read_config_dword(pdev,
576                                 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577                 pci_read_config_dword(pdev,
578                                 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579
580                 (*channels)++;
581
582                 for (j = 0; j < 3; j++) {
583                         if (!DIMM_PRESENT(dimm_dod[j]))
584                                 continue;
585                         (*csrows)++;
586                 }
587         }
588
589         debugf0("Number of active channels on socket %d: %d\n",
590                 socket, *channels);
591
592         return 0;
593 }
594
595 static int get_dimm_config(const struct mem_ctl_info *mci)
596 {
597         struct i7core_pvt *pvt = mci->pvt_info;
598         struct csrow_info *csr;
599         struct pci_dev *pdev;
600         int i, j;
601         int csrow = 0;
602         unsigned long last_page = 0;
603         enum edac_type mode;
604         enum mem_type mtype;
605
606         /* Get data from the MC register, function 0 */
607         pdev = pvt->pci_mcr[0];
608         if (!pdev)
609                 return -ENODEV;
610
611         /* Device 3 function 0 reads */
612         pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
613         pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
614         pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
615         pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
616
617         debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
618                 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
619                 pvt->info.max_dod, pvt->info.ch_map);
620
621         if (ECC_ENABLED(pvt)) {
622                 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
623                 if (ECCx8(pvt))
624                         mode = EDAC_S8ECD8ED;
625                 else
626                         mode = EDAC_S4ECD4ED;
627         } else {
628                 debugf0("ECC disabled\n");
629                 mode = EDAC_NONE;
630         }
631
632         /* FIXME: need to handle the error codes */
633         debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
634                 "x%x x 0x%x\n",
635                 numdimms(pvt->info.max_dod),
636                 numrank(pvt->info.max_dod >> 2),
637                 numbank(pvt->info.max_dod >> 4),
638                 numrow(pvt->info.max_dod >> 6),
639                 numcol(pvt->info.max_dod >> 9));
640
641         for (i = 0; i < NUM_CHANS; i++) {
642                 u32 data, dimm_dod[3], value[8];
643
644                 if (!pvt->pci_ch[i][0])
645                         continue;
646
647                 if (!CH_ACTIVE(pvt, i)) {
648                         debugf0("Channel %i is not active\n", i);
649                         continue;
650                 }
651                 if (CH_DISABLED(pvt, i)) {
652                         debugf0("Channel %i is disabled\n", i);
653                         continue;
654                 }
655
656                 /* Devices 4-6 function 0 */
657                 pci_read_config_dword(pvt->pci_ch[i][0],
658                                 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
659
660                 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
661                                                 4 : 2;
662
663                 if (data & REGISTERED_DIMM)
664                         mtype = MEM_RDDR3;
665                 else
666                         mtype = MEM_DDR3;
667 #if 0
668                 if (data & THREE_DIMMS_PRESENT)
669                         pvt->channel[i].dimms = 3;
670                 else if (data & SINGLE_QUAD_RANK_PRESENT)
671                         pvt->channel[i].dimms = 1;
672                 else
673                         pvt->channel[i].dimms = 2;
674 #endif
675
676                 /* Devices 4-6 function 1 */
677                 pci_read_config_dword(pvt->pci_ch[i][1],
678                                 MC_DOD_CH_DIMM0, &dimm_dod[0]);
679                 pci_read_config_dword(pvt->pci_ch[i][1],
680                                 MC_DOD_CH_DIMM1, &dimm_dod[1]);
681                 pci_read_config_dword(pvt->pci_ch[i][1],
682                                 MC_DOD_CH_DIMM2, &dimm_dod[2]);
683
684                 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
685                         "%d ranks, %cDIMMs\n",
686                         i,
687                         RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
688                         data,
689                         pvt->channel[i].ranks,
690                         (data & REGISTERED_DIMM) ? 'R' : 'U');
691
692                 for (j = 0; j < 3; j++) {
693                         u32 banks, ranks, rows, cols;
694                         u32 size, npages;
695
696                         if (!DIMM_PRESENT(dimm_dod[j]))
697                                 continue;
698
699                         banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
700                         ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
701                         rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
702                         cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
703
704                         /* DDR3 has 8 I/O banks */
705                         size = (rows * cols * banks * ranks) >> (20 - 3);
706
707                         pvt->channel[i].dimms++;
708
709                         debugf0("\tdimm %d %d Mb offset: %x, "
710                                 "bank: %d, rank: %d, row: %#x, col: %#x\n",
711                                 j, size,
712                                 RANKOFFSET(dimm_dod[j]),
713                                 banks, ranks, rows, cols);
714
715                         npages = MiB_TO_PAGES(size);
716
717                         csr = &mci->csrows[csrow];
718                         csr->first_page = last_page + 1;
719                         last_page += npages;
720                         csr->last_page = last_page;
721                         csr->nr_pages = npages;
722
723                         csr->page_mask = 0;
724                         csr->grain = 8;
725                         csr->csrow_idx = csrow;
726                         csr->nr_channels = 1;
727
728                         csr->channels[0].chan_idx = i;
729                         csr->channels[0].ce_count = 0;
730
731                         pvt->csrow_map[i][j] = csrow;
732
733                         switch (banks) {
734                         case 4:
735                                 csr->dtype = DEV_X4;
736                                 break;
737                         case 8:
738                                 csr->dtype = DEV_X8;
739                                 break;
740                         case 16:
741                                 csr->dtype = DEV_X16;
742                                 break;
743                         default:
744                                 csr->dtype = DEV_UNKNOWN;
745                         }
746
747                         csr->edac_mode = mode;
748                         csr->mtype = mtype;
749
750                         csrow++;
751                 }
752
753                 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
754                 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
755                 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
756                 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
757                 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
758                 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
759                 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
760                 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
761                 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
762                 for (j = 0; j < 8; j++)
763                         debugf1("\t\t%#x\t%#x\t%#x\n",
764                                 (value[j] >> 27) & 0x1,
765                                 (value[j] >> 24) & 0x7,
766                                 (value[j] & ((1 << 24) - 1)));
767         }
768
769         return 0;
770 }
771
772 /****************************************************************************
773                         Error insertion routines
774  ****************************************************************************/
775
776 /* The i7core has independent error injection features per channel.
777    However, to have a simpler code, we don't allow enabling error injection
778    on more than one channel.
779    Also, since a change at an inject parameter will be applied only at enable,
780    we're disabling error injection on all write calls to the sysfs nodes that
781    controls the error code injection.
782  */
783 static int disable_inject(const struct mem_ctl_info *mci)
784 {
785         struct i7core_pvt *pvt = mci->pvt_info;
786
787         pvt->inject.enable = 0;
788
789         if (!pvt->pci_ch[pvt->inject.channel][0])
790                 return -ENODEV;
791
792         pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
793                                 MC_CHANNEL_ERROR_INJECT, 0);
794
795         return 0;
796 }
797
798 /*
799  * i7core inject inject.section
800  *
801  *      accept and store error injection inject.section value
802  *      bit 0 - refers to the lower 32-byte half cacheline
803  *      bit 1 - refers to the upper 32-byte half cacheline
804  */
805 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
806                                            const char *data, size_t count)
807 {
808         struct i7core_pvt *pvt = mci->pvt_info;
809         unsigned long value;
810         int rc;
811
812         if (pvt->inject.enable)
813                 disable_inject(mci);
814
815         rc = strict_strtoul(data, 10, &value);
816         if ((rc < 0) || (value > 3))
817                 return -EIO;
818
819         pvt->inject.section = (u32) value;
820         return count;
821 }
822
823 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
824                                               char *data)
825 {
826         struct i7core_pvt *pvt = mci->pvt_info;
827         return sprintf(data, "0x%08x\n", pvt->inject.section);
828 }
829
830 /*
831  * i7core inject.type
832  *
833  *      accept and store error injection inject.section value
834  *      bit 0 - repeat enable - Enable error repetition
835  *      bit 1 - inject ECC error
836  *      bit 2 - inject parity error
837  */
838 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
839                                         const char *data, size_t count)
840 {
841         struct i7core_pvt *pvt = mci->pvt_info;
842         unsigned long value;
843         int rc;
844
845         if (pvt->inject.enable)
846                 disable_inject(mci);
847
848         rc = strict_strtoul(data, 10, &value);
849         if ((rc < 0) || (value > 7))
850                 return -EIO;
851
852         pvt->inject.type = (u32) value;
853         return count;
854 }
855
856 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
857                                               char *data)
858 {
859         struct i7core_pvt *pvt = mci->pvt_info;
860         return sprintf(data, "0x%08x\n", pvt->inject.type);
861 }
862
863 /*
864  * i7core_inject_inject.eccmask_store
865  *
866  * The type of error (UE/CE) will depend on the inject.eccmask value:
867  *   Any bits set to a 1 will flip the corresponding ECC bit
868  *   Correctable errors can be injected by flipping 1 bit or the bits within
869  *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
870  *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
871  *   uncorrectable error to be injected.
872  */
873 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
874                                         const char *data, size_t count)
875 {
876         struct i7core_pvt *pvt = mci->pvt_info;
877         unsigned long value;
878         int rc;
879
880         if (pvt->inject.enable)
881                 disable_inject(mci);
882
883         rc = strict_strtoul(data, 10, &value);
884         if (rc < 0)
885                 return -EIO;
886
887         pvt->inject.eccmask = (u32) value;
888         return count;
889 }
890
891 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
892                                               char *data)
893 {
894         struct i7core_pvt *pvt = mci->pvt_info;
895         return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
896 }
897
898 /*
899  * i7core_addrmatch
900  *
901  * The type of error (UE/CE) will depend on the inject.eccmask value:
902  *   Any bits set to a 1 will flip the corresponding ECC bit
903  *   Correctable errors can be injected by flipping 1 bit or the bits within
904  *   a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
905  *   23:16 and 31:24). Flipping bits in two symbol pairs will cause an
906  *   uncorrectable error to be injected.
907  */
908
909 #define DECLARE_ADDR_MATCH(param, limit)                        \
910 static ssize_t i7core_inject_store_##param(                     \
911                 struct mem_ctl_info *mci,                       \
912                 const char *data, size_t count)                 \
913 {                                                               \
914         struct i7core_pvt *pvt;                                 \
915         long value;                                             \
916         int rc;                                                 \
917                                                                 \
918         debugf1("%s()\n", __func__);                            \
919         pvt = mci->pvt_info;                                    \
920                                                                 \
921         if (pvt->inject.enable)                                 \
922                 disable_inject(mci);                            \
923                                                                 \
924         if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
925                 value = -1;                                     \
926         else {                                                  \
927                 rc = strict_strtoul(data, 10, &value);          \
928                 if ((rc < 0) || (value >= limit))               \
929                         return -EIO;                            \
930         }                                                       \
931                                                                 \
932         pvt->inject.param = value;                              \
933                                                                 \
934         return count;                                           \
935 }                                                               \
936                                                                 \
937 static ssize_t i7core_inject_show_##param(                      \
938                 struct mem_ctl_info *mci,                       \
939                 char *data)                                     \
940 {                                                               \
941         struct i7core_pvt *pvt;                                 \
942                                                                 \
943         pvt = mci->pvt_info;                                    \
944         debugf1("%s() pvt=%p\n", __func__, pvt);                \
945         if (pvt->inject.param < 0)                              \
946                 return sprintf(data, "any\n");                  \
947         else                                                    \
948                 return sprintf(data, "%d\n", pvt->inject.param);\
949 }
950
951 #define ATTR_ADDR_MATCH(param)                                  \
952         {                                                       \
953                 .attr = {                                       \
954                         .name = #param,                         \
955                         .mode = (S_IRUGO | S_IWUSR)             \
956                 },                                              \
957                 .show  = i7core_inject_show_##param,            \
958                 .store = i7core_inject_store_##param,           \
959         }
960
961 DECLARE_ADDR_MATCH(channel, 3);
962 DECLARE_ADDR_MATCH(dimm, 3);
963 DECLARE_ADDR_MATCH(rank, 4);
964 DECLARE_ADDR_MATCH(bank, 32);
965 DECLARE_ADDR_MATCH(page, 0x10000);
966 DECLARE_ADDR_MATCH(col, 0x4000);
967
968 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
969 {
970         u32 read;
971         int count;
972
973         debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
974                 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
975                 where, val);
976
977         for (count = 0; count < 10; count++) {
978                 if (count)
979                         msleep(100);
980                 pci_write_config_dword(dev, where, val);
981                 pci_read_config_dword(dev, where, &read);
982
983                 if (read == val)
984                         return 0;
985         }
986
987         i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
988                 "write=%08x. Read=%08x\n",
989                 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
990                 where, val, read);
991
992         return -EINVAL;
993 }
994
995 /*
996  * This routine prepares the Memory Controller for error injection.
997  * The error will be injected when some process tries to write to the
998  * memory that matches the given criteria.
999  * The criteria can be set in terms of a mask where dimm, rank, bank, page
1000  * and col can be specified.
1001  * A -1 value for any of the mask items will make the MCU to ignore
1002  * that matching criteria for error injection.
1003  *
1004  * It should be noticed that the error will only happen after a write operation
1005  * on a memory that matches the condition. if REPEAT_EN is not enabled at
1006  * inject mask, then it will produce just one error. Otherwise, it will repeat
1007  * until the injectmask would be cleaned.
1008  *
1009  * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1010  *    is reliable enough to check if the MC is using the
1011  *    three channels. However, this is not clear at the datasheet.
1012  */
1013 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1014                                        const char *data, size_t count)
1015 {
1016         struct i7core_pvt *pvt = mci->pvt_info;
1017         u32 injectmask;
1018         u64 mask = 0;
1019         int  rc;
1020         long enable;
1021
1022         if (!pvt->pci_ch[pvt->inject.channel][0])
1023                 return 0;
1024
1025         rc = strict_strtoul(data, 10, &enable);
1026         if ((rc < 0))
1027                 return 0;
1028
1029         if (enable) {
1030                 pvt->inject.enable = 1;
1031         } else {
1032                 disable_inject(mci);
1033                 return count;
1034         }
1035
1036         /* Sets pvt->inject.dimm mask */
1037         if (pvt->inject.dimm < 0)
1038                 mask |= 1LL << 41;
1039         else {
1040                 if (pvt->channel[pvt->inject.channel].dimms > 2)
1041                         mask |= (pvt->inject.dimm & 0x3LL) << 35;
1042                 else
1043                         mask |= (pvt->inject.dimm & 0x1LL) << 36;
1044         }
1045
1046         /* Sets pvt->inject.rank mask */
1047         if (pvt->inject.rank < 0)
1048                 mask |= 1LL << 40;
1049         else {
1050                 if (pvt->channel[pvt->inject.channel].dimms > 2)
1051                         mask |= (pvt->inject.rank & 0x1LL) << 34;
1052                 else
1053                         mask |= (pvt->inject.rank & 0x3LL) << 34;
1054         }
1055
1056         /* Sets pvt->inject.bank mask */
1057         if (pvt->inject.bank < 0)
1058                 mask |= 1LL << 39;
1059         else
1060                 mask |= (pvt->inject.bank & 0x15LL) << 30;
1061
1062         /* Sets pvt->inject.page mask */
1063         if (pvt->inject.page < 0)
1064                 mask |= 1LL << 38;
1065         else
1066                 mask |= (pvt->inject.page & 0xffff) << 14;
1067
1068         /* Sets pvt->inject.column mask */
1069         if (pvt->inject.col < 0)
1070                 mask |= 1LL << 37;
1071         else
1072                 mask |= (pvt->inject.col & 0x3fff);
1073
1074         /*
1075          * bit    0: REPEAT_EN
1076          * bits 1-2: MASK_HALF_CACHELINE
1077          * bit    3: INJECT_ECC
1078          * bit    4: INJECT_ADDR_PARITY
1079          */
1080
1081         injectmask = (pvt->inject.type & 1) |
1082                      (pvt->inject.section & 0x3) << 1 |
1083                      (pvt->inject.type & 0x6) << (3 - 1);
1084
1085         /* Unlock writes to registers - this register is write only */
1086         pci_write_config_dword(pvt->pci_noncore,
1087                                MC_CFG_CONTROL, 0x2);
1088
1089         write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1090                                MC_CHANNEL_ADDR_MATCH, mask);
1091         write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1092                                MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1093
1094         write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1095                                MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1096
1097         write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1098                                MC_CHANNEL_ERROR_INJECT, injectmask);
1099
1100         /*
1101          * This is something undocumented, based on my tests
1102          * Without writing 8 to this register, errors aren't injected. Not sure
1103          * why.
1104          */
1105         pci_write_config_dword(pvt->pci_noncore,
1106                                MC_CFG_CONTROL, 8);
1107
1108         debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1109                 " inject 0x%08x\n",
1110                 mask, pvt->inject.eccmask, injectmask);
1111
1112
1113         return count;
1114 }
1115
1116 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1117                                         char *data)
1118 {
1119         struct i7core_pvt *pvt = mci->pvt_info;
1120         u32 injectmask;
1121
1122         if (!pvt->pci_ch[pvt->inject.channel][0])
1123                 return 0;
1124
1125         pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1126                                MC_CHANNEL_ERROR_INJECT, &injectmask);
1127
1128         debugf0("Inject error read: 0x%018x\n", injectmask);
1129
1130         if (injectmask & 0x0c)
1131                 pvt->inject.enable = 1;
1132
1133         return sprintf(data, "%d\n", pvt->inject.enable);
1134 }
1135
1136 #define DECLARE_COUNTER(param)                                  \
1137 static ssize_t i7core_show_counter_##param(                     \
1138                 struct mem_ctl_info *mci,                       \
1139                 char *data)                                     \
1140 {                                                               \
1141         struct i7core_pvt *pvt = mci->pvt_info;                 \
1142                                                                 \
1143         debugf1("%s() \n", __func__);                           \
1144         if (!pvt->ce_count_available || (pvt->is_registered))   \
1145                 return sprintf(data, "data unavailable\n");     \
1146         return sprintf(data, "%lu\n",                           \
1147                         pvt->udimm_ce_count[param]);            \
1148 }
1149
1150 #define ATTR_COUNTER(param)                                     \
1151         {                                                       \
1152                 .attr = {                                       \
1153                         .name = __stringify(udimm##param),      \
1154                         .mode = (S_IRUGO | S_IWUSR)             \
1155                 },                                              \
1156                 .show  = i7core_show_counter_##param            \
1157         }
1158
1159 DECLARE_COUNTER(0);
1160 DECLARE_COUNTER(1);
1161 DECLARE_COUNTER(2);
1162
1163 /*
1164  * Sysfs struct
1165  */
1166
1167 static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
1168         ATTR_ADDR_MATCH(channel),
1169         ATTR_ADDR_MATCH(dimm),
1170         ATTR_ADDR_MATCH(rank),
1171         ATTR_ADDR_MATCH(bank),
1172         ATTR_ADDR_MATCH(page),
1173         ATTR_ADDR_MATCH(col),
1174         { } /* End of list */
1175 };
1176
1177 static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
1178         .name  = "inject_addrmatch",
1179         .mcidev_attr = i7core_addrmatch_attrs,
1180 };
1181
1182 static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1183         ATTR_COUNTER(0),
1184         ATTR_COUNTER(1),
1185         ATTR_COUNTER(2),
1186         { .attr = { .name = NULL } }
1187 };
1188
1189 static const struct mcidev_sysfs_group i7core_udimm_counters = {
1190         .name  = "all_channel_counts",
1191         .mcidev_attr = i7core_udimm_counters_attrs,
1192 };
1193
1194 static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
1195         {
1196                 .attr = {
1197                         .name = "inject_section",
1198                         .mode = (S_IRUGO | S_IWUSR)
1199                 },
1200                 .show  = i7core_inject_section_show,
1201                 .store = i7core_inject_section_store,
1202         }, {
1203                 .attr = {
1204                         .name = "inject_type",
1205                         .mode = (S_IRUGO | S_IWUSR)
1206                 },
1207                 .show  = i7core_inject_type_show,
1208                 .store = i7core_inject_type_store,
1209         }, {
1210                 .attr = {
1211                         .name = "inject_eccmask",
1212                         .mode = (S_IRUGO | S_IWUSR)
1213                 },
1214                 .show  = i7core_inject_eccmask_show,
1215                 .store = i7core_inject_eccmask_store,
1216         }, {
1217                 .grp = &i7core_inject_addrmatch,
1218         }, {
1219                 .attr = {
1220                         .name = "inject_enable",
1221                         .mode = (S_IRUGO | S_IWUSR)
1222                 },
1223                 .show  = i7core_inject_enable_show,
1224                 .store = i7core_inject_enable_store,
1225         },
1226         { }     /* End of list */
1227 };
1228
1229 static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1230         {
1231                 .attr = {
1232                         .name = "inject_section",
1233                         .mode = (S_IRUGO | S_IWUSR)
1234                 },
1235                 .show  = i7core_inject_section_show,
1236                 .store = i7core_inject_section_store,
1237         }, {
1238                 .attr = {
1239                         .name = "inject_type",
1240                         .mode = (S_IRUGO | S_IWUSR)
1241                 },
1242                 .show  = i7core_inject_type_show,
1243                 .store = i7core_inject_type_store,
1244         }, {
1245                 .attr = {
1246                         .name = "inject_eccmask",
1247                         .mode = (S_IRUGO | S_IWUSR)
1248                 },
1249                 .show  = i7core_inject_eccmask_show,
1250                 .store = i7core_inject_eccmask_store,
1251         }, {
1252                 .grp = &i7core_inject_addrmatch,
1253         }, {
1254                 .attr = {
1255                         .name = "inject_enable",
1256                         .mode = (S_IRUGO | S_IWUSR)
1257                 },
1258                 .show  = i7core_inject_enable_show,
1259                 .store = i7core_inject_enable_store,
1260         }, {
1261                 .grp = &i7core_udimm_counters,
1262         },
1263         { }     /* End of list */
1264 };
1265
1266 /****************************************************************************
1267         Device initialization routines: put/get, init/exit
1268  ****************************************************************************/
1269
1270 /*
1271  *      i7core_put_all_devices  'put' all the devices that we have
1272  *                              reserved via 'get'
1273  */
1274 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1275 {
1276         int i;
1277
1278         debugf0(__FILE__ ": %s()\n", __func__);
1279         for (i = 0; i < i7core_dev->n_devs; i++) {
1280                 struct pci_dev *pdev = i7core_dev->pdev[i];
1281                 if (!pdev)
1282                         continue;
1283                 debugf0("Removing dev %02x:%02x.%d\n",
1284                         pdev->bus->number,
1285                         PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1286                 pci_dev_put(pdev);
1287         }
1288 }
1289
1290 static void i7core_put_all_devices(void)
1291 {
1292         struct i7core_dev *i7core_dev, *tmp;
1293
1294         list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1295                 i7core_put_devices(i7core_dev);
1296                 free_i7core_dev(i7core_dev);
1297         }
1298 }
1299
1300 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1301 {
1302         struct pci_dev *pdev = NULL;
1303         int i;
1304
1305         /*
1306          * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
1307          * aren't announced by acpi. So, we need to use a legacy scan probing
1308          * to detect them
1309          */
1310         while (table && table->descr) {
1311                 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1312                 if (unlikely(!pdev)) {
1313                         for (i = 0; i < MAX_SOCKET_BUSES; i++)
1314                                 pcibios_scan_specific_bus(255-i);
1315                 }
1316                 pci_dev_put(pdev);
1317                 table++;
1318         }
1319 }
1320
1321 static unsigned i7core_pci_lastbus(void)
1322 {
1323         int last_bus = 0, bus;
1324         struct pci_bus *b = NULL;
1325
1326         while ((b = pci_find_next_bus(b)) != NULL) {
1327                 bus = b->number;
1328                 debugf0("Found bus %d\n", bus);
1329                 if (bus > last_bus)
1330                         last_bus = bus;
1331         }
1332
1333         debugf0("Last bus %d\n", last_bus);
1334
1335         return last_bus;
1336 }
1337
1338 /*
1339  *      i7core_get_all_devices  Find and perform 'get' operation on the MCH's
1340  *                      device/functions we want to reference for this driver
1341  *
1342  *                      Need to 'get' device 16 func 1 and func 2
1343  */
1344 static int i7core_get_onedevice(struct pci_dev **prev,
1345                                 const struct pci_id_table *table,
1346                                 const unsigned devno,
1347                                 const unsigned last_bus)
1348 {
1349         struct i7core_dev *i7core_dev;
1350         const struct pci_id_descr *dev_descr = &table->descr[devno];
1351
1352         struct pci_dev *pdev = NULL;
1353         u8 bus = 0;
1354         u8 socket = 0;
1355
1356         pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1357                               dev_descr->dev_id, *prev);
1358
1359         /*
1360          * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1361          * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1362          * to probe for the alternate address in case of failure
1363          */
1364         if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1365                 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1366                                       PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1367
1368         if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1369                 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1370                                       PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1371                                       *prev);
1372
1373         if (!pdev) {
1374                 if (*prev) {
1375                         *prev = pdev;
1376                         return 0;
1377                 }
1378
1379                 if (dev_descr->optional)
1380                         return 0;
1381
1382                 if (devno == 0)
1383                         return -ENODEV;
1384
1385                 i7core_printk(KERN_INFO,
1386                         "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1387                         dev_descr->dev, dev_descr->func,
1388                         PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1389
1390                 /* End of list, leave */
1391                 return -ENODEV;
1392         }
1393         bus = pdev->bus->number;
1394
1395         socket = last_bus - bus;
1396
1397         i7core_dev = get_i7core_dev(socket);
1398         if (!i7core_dev) {
1399                 i7core_dev = alloc_i7core_dev(socket, table);
1400                 if (!i7core_dev) {
1401                         pci_dev_put(pdev);
1402                         return -ENOMEM;
1403                 }
1404         }
1405
1406         if (i7core_dev->pdev[devno]) {
1407                 i7core_printk(KERN_ERR,
1408                         "Duplicated device for "
1409                         "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1410                         bus, dev_descr->dev, dev_descr->func,
1411                         PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1412                 pci_dev_put(pdev);
1413                 return -ENODEV;
1414         }
1415
1416         i7core_dev->pdev[devno] = pdev;
1417
1418         /* Sanity check */
1419         if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1420                         PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1421                 i7core_printk(KERN_ERR,
1422                         "Device PCI ID %04x:%04x "
1423                         "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1424                         PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1425                         bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1426                         bus, dev_descr->dev, dev_descr->func);
1427                 return -ENODEV;
1428         }
1429
1430         /* Be sure that the device is enabled */
1431         if (unlikely(pci_enable_device(pdev) < 0)) {
1432                 i7core_printk(KERN_ERR,
1433                         "Couldn't enable "
1434                         "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1435                         bus, dev_descr->dev, dev_descr->func,
1436                         PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1437                 return -ENODEV;
1438         }
1439
1440         debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1441                 socket, bus, dev_descr->dev,
1442                 dev_descr->func,
1443                 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1444
1445         /*
1446          * As stated on drivers/pci/search.c, the reference count for
1447          * @from is always decremented if it is not %NULL. So, as we need
1448          * to get all devices up to null, we need to do a get for the device
1449          */
1450         pci_dev_get(pdev);
1451
1452         *prev = pdev;
1453
1454         return 0;
1455 }
1456
1457 static int i7core_get_all_devices(void)
1458 {
1459         int i, rc, last_bus;
1460         struct pci_dev *pdev = NULL;
1461         const struct pci_id_table *table = pci_dev_table;
1462
1463         last_bus = i7core_pci_lastbus();
1464
1465         while (table && table->descr) {
1466                 for (i = 0; i < table->n_devs; i++) {
1467                         pdev = NULL;
1468                         do {
1469                                 rc = i7core_get_onedevice(&pdev, table, i,
1470                                                           last_bus);
1471                                 if (rc < 0) {
1472                                         if (i == 0) {
1473                                                 i = table->n_devs;
1474                                                 break;
1475                                         }
1476                                         i7core_put_all_devices();
1477                                         return -ENODEV;
1478                                 }
1479                         } while (pdev);
1480                 }
1481                 table++;
1482         }
1483
1484         return 0;
1485 }
1486
1487 static int mci_bind_devs(struct mem_ctl_info *mci,
1488                          struct i7core_dev *i7core_dev)
1489 {
1490         struct i7core_pvt *pvt = mci->pvt_info;
1491         struct pci_dev *pdev;
1492         int i, func, slot;
1493         char *family;
1494
1495         pvt->is_registered = false;
1496         pvt->enable_scrub  = false;
1497         for (i = 0; i < i7core_dev->n_devs; i++) {
1498                 pdev = i7core_dev->pdev[i];
1499                 if (!pdev)
1500                         continue;
1501
1502                 func = PCI_FUNC(pdev->devfn);
1503                 slot = PCI_SLOT(pdev->devfn);
1504                 if (slot == 3) {
1505                         if (unlikely(func > MAX_MCR_FUNC))
1506                                 goto error;
1507                         pvt->pci_mcr[func] = pdev;
1508                 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1509                         if (unlikely(func > MAX_CHAN_FUNC))
1510                                 goto error;
1511                         pvt->pci_ch[slot - 4][func] = pdev;
1512                 } else if (!slot && !func) {
1513                         pvt->pci_noncore = pdev;
1514
1515                         /* Detect the processor family */
1516                         switch (pdev->device) {
1517                         case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1518                                 family = "Xeon 35xx/ i7core";
1519                                 pvt->enable_scrub = false;
1520                                 break;
1521                         case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1522                                 family = "i7-800/i5-700";
1523                                 pvt->enable_scrub = false;
1524                                 break;
1525                         case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1526                                 family = "Xeon 34xx";
1527                                 pvt->enable_scrub = false;
1528                                 break;
1529                         case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1530                                 family = "Xeon 55xx";
1531                                 pvt->enable_scrub = true;
1532                                 break;
1533                         case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1534                                 family = "Xeon 56xx / i7-900";
1535                                 pvt->enable_scrub = true;
1536                                 break;
1537                         default:
1538                                 family = "unknown";
1539                                 pvt->enable_scrub = false;
1540                         }
1541                         debugf0("Detected a processor type %s\n", family);
1542                 } else
1543                         goto error;
1544
1545                 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1546                         PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1547                         pdev, i7core_dev->socket);
1548
1549                 if (PCI_SLOT(pdev->devfn) == 3 &&
1550                         PCI_FUNC(pdev->devfn) == 2)
1551                         pvt->is_registered = true;
1552         }
1553
1554         return 0;
1555
1556 error:
1557         i7core_printk(KERN_ERR, "Device %d, function %d "
1558                       "is out of the expected range\n",
1559                       slot, func);
1560         return -EINVAL;
1561 }
1562
1563 /****************************************************************************
1564                         Error check routines
1565  ****************************************************************************/
1566 static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1567                                       const int chan,
1568                                       const int dimm,
1569                                       const int add)
1570 {
1571         char *msg;
1572         struct i7core_pvt *pvt = mci->pvt_info;
1573         int row = pvt->csrow_map[chan][dimm], i;
1574
1575         for (i = 0; i < add; i++) {
1576                 msg = kasprintf(GFP_KERNEL, "Corrected error "
1577                                 "(Socket=%d channel=%d dimm=%d)",
1578                                 pvt->i7core_dev->socket, chan, dimm);
1579
1580                 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1581                 kfree (msg);
1582         }
1583 }
1584
1585 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1586                                          const int chan,
1587                                          const int new0,
1588                                          const int new1,
1589                                          const int new2)
1590 {
1591         struct i7core_pvt *pvt = mci->pvt_info;
1592         int add0 = 0, add1 = 0, add2 = 0;
1593         /* Updates CE counters if it is not the first time here */
1594         if (pvt->ce_count_available) {
1595                 /* Updates CE counters */
1596
1597                 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1598                 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1599                 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1600
1601                 if (add2 < 0)
1602                         add2 += 0x7fff;
1603                 pvt->rdimm_ce_count[chan][2] += add2;
1604
1605                 if (add1 < 0)
1606                         add1 += 0x7fff;
1607                 pvt->rdimm_ce_count[chan][1] += add1;
1608
1609                 if (add0 < 0)
1610                         add0 += 0x7fff;
1611                 pvt->rdimm_ce_count[chan][0] += add0;
1612         } else
1613                 pvt->ce_count_available = 1;
1614
1615         /* Store the new values */
1616         pvt->rdimm_last_ce_count[chan][2] = new2;
1617         pvt->rdimm_last_ce_count[chan][1] = new1;
1618         pvt->rdimm_last_ce_count[chan][0] = new0;
1619
1620         /*updated the edac core */
1621         if (add0 != 0)
1622                 i7core_rdimm_update_csrow(mci, chan, 0, add0);
1623         if (add1 != 0)
1624                 i7core_rdimm_update_csrow(mci, chan, 1, add1);
1625         if (add2 != 0)
1626                 i7core_rdimm_update_csrow(mci, chan, 2, add2);
1627
1628 }
1629
1630 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1631 {
1632         struct i7core_pvt *pvt = mci->pvt_info;
1633         u32 rcv[3][2];
1634         int i, new0, new1, new2;
1635
1636         /*Read DEV 3: FUN 2:  MC_COR_ECC_CNT regs directly*/
1637         pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1638                                                                 &rcv[0][0]);
1639         pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1640                                                                 &rcv[0][1]);
1641         pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1642                                                                 &rcv[1][0]);
1643         pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1644                                                                 &rcv[1][1]);
1645         pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1646                                                                 &rcv[2][0]);
1647         pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1648                                                                 &rcv[2][1]);
1649         for (i = 0 ; i < 3; i++) {
1650                 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1651                         (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1652                 /*if the channel has 3 dimms*/
1653                 if (pvt->channel[i].dimms > 2) {
1654                         new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1655                         new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1656                         new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1657                 } else {
1658                         new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1659                                         DIMM_BOT_COR_ERR(rcv[i][0]);
1660                         new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1661                                         DIMM_BOT_COR_ERR(rcv[i][1]);
1662                         new2 = 0;
1663                 }
1664
1665                 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1666         }
1667 }
1668
1669 /* This function is based on the device 3 function 4 registers as described on:
1670  * Intel Xeon Processor 5500 Series Datasheet Volume 2
1671  *      http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1672  * also available at:
1673  *      http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1674  */
1675 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1676 {
1677         struct i7core_pvt *pvt = mci->pvt_info;
1678         u32 rcv1, rcv0;
1679         int new0, new1, new2;
1680
1681         if (!pvt->pci_mcr[4]) {
1682                 debugf0("%s MCR registers not found\n", __func__);
1683                 return;
1684         }
1685
1686         /* Corrected test errors */
1687         pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1688         pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1689
1690         /* Store the new values */
1691         new2 = DIMM2_COR_ERR(rcv1);
1692         new1 = DIMM1_COR_ERR(rcv0);
1693         new0 = DIMM0_COR_ERR(rcv0);
1694
1695         /* Updates CE counters if it is not the first time here */
1696         if (pvt->ce_count_available) {
1697                 /* Updates CE counters */
1698                 int add0, add1, add2;
1699
1700                 add2 = new2 - pvt->udimm_last_ce_count[2];
1701                 add1 = new1 - pvt->udimm_last_ce_count[1];
1702                 add0 = new0 - pvt->udimm_last_ce_count[0];
1703
1704                 if (add2 < 0)
1705                         add2 += 0x7fff;
1706                 pvt->udimm_ce_count[2] += add2;
1707
1708                 if (add1 < 0)
1709                         add1 += 0x7fff;
1710                 pvt->udimm_ce_count[1] += add1;
1711
1712                 if (add0 < 0)
1713                         add0 += 0x7fff;
1714                 pvt->udimm_ce_count[0] += add0;
1715
1716                 if (add0 | add1 | add2)
1717                         i7core_printk(KERN_ERR, "New Corrected error(s): "
1718                                       "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1719                                       add0, add1, add2);
1720         } else
1721                 pvt->ce_count_available = 1;
1722
1723         /* Store the new values */
1724         pvt->udimm_last_ce_count[2] = new2;
1725         pvt->udimm_last_ce_count[1] = new1;
1726         pvt->udimm_last_ce_count[0] = new0;
1727 }
1728
1729 /*
1730  * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1731  * Architectures Software Developer’s Manual Volume 3B.
1732  * Nehalem are defined as family 0x06, model 0x1a
1733  *
1734  * The MCA registers used here are the following ones:
1735  *     struct mce field MCA Register
1736  *     m->status        MSR_IA32_MC8_STATUS
1737  *     m->addr          MSR_IA32_MC8_ADDR
1738  *     m->misc          MSR_IA32_MC8_MISC
1739  * In the case of Nehalem, the error information is masked at .status and .misc
1740  * fields
1741  */
1742 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1743                                     const struct mce *m)
1744 {
1745         struct i7core_pvt *pvt = mci->pvt_info;
1746         char *type, *optype, *err, *msg;
1747         unsigned long error = m->status & 0x1ff0000l;
1748         u32 optypenum = (m->status >> 4) & 0x07;
1749         u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1750         u32 dimm = (m->misc >> 16) & 0x3;
1751         u32 channel = (m->misc >> 18) & 0x3;
1752         u32 syndrome = m->misc >> 32;
1753         u32 errnum = find_first_bit(&error, 32);
1754         int csrow;
1755
1756         if (m->mcgstatus & 1)
1757                 type = "FATAL";
1758         else
1759                 type = "NON_FATAL";
1760
1761         switch (optypenum) {
1762         case 0:
1763                 optype = "generic undef request";
1764                 break;
1765         case 1:
1766                 optype = "read error";
1767                 break;
1768         case 2:
1769                 optype = "write error";
1770                 break;
1771         case 3:
1772                 optype = "addr/cmd error";
1773                 break;
1774         case 4:
1775                 optype = "scrubbing error";
1776                 break;
1777         default:
1778                 optype = "reserved";
1779                 break;
1780         }
1781
1782         switch (errnum) {
1783         case 16:
1784                 err = "read ECC error";
1785                 break;
1786         case 17:
1787                 err = "RAS ECC error";
1788                 break;
1789         case 18:
1790                 err = "write parity error";
1791                 break;
1792         case 19:
1793                 err = "redundacy loss";
1794                 break;
1795         case 20:
1796                 err = "reserved";
1797                 break;
1798         case 21:
1799                 err = "memory range error";
1800                 break;
1801         case 22:
1802                 err = "RTID out of range";
1803                 break;
1804         case 23:
1805                 err = "address parity error";
1806                 break;
1807         case 24:
1808                 err = "byte enable parity error";
1809                 break;
1810         default:
1811                 err = "unknown";
1812         }
1813
1814         /* FIXME: should convert addr into bank and rank information */
1815         msg = kasprintf(GFP_ATOMIC,
1816                 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1817                 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1818                 type, (long long) m->addr, m->cpu, dimm, channel,
1819                 syndrome, core_err_cnt, (long long)m->status,
1820                 (long long)m->misc, optype, err);
1821
1822         debugf0("%s", msg);
1823
1824         csrow = pvt->csrow_map[channel][dimm];
1825
1826         /* Call the helper to output message */
1827         if (m->mcgstatus & 1)
1828                 edac_mc_handle_fbd_ue(mci, csrow, 0,
1829                                 0 /* FIXME: should be channel here */, msg);
1830         else if (!pvt->is_registered)
1831                 edac_mc_handle_fbd_ce(mci, csrow,
1832                                 0 /* FIXME: should be channel here */, msg);
1833
1834         kfree(msg);
1835 }
1836
1837 /*
1838  *      i7core_check_error      Retrieve and process errors reported by the
1839  *                              hardware. Called by the Core module.
1840  */
1841 static void i7core_check_error(struct mem_ctl_info *mci)
1842 {
1843         struct i7core_pvt *pvt = mci->pvt_info;
1844         int i;
1845         unsigned count = 0;
1846         struct mce *m;
1847
1848         /*
1849          * MCE first step: Copy all mce errors into a temporary buffer
1850          * We use a double buffering here, to reduce the risk of
1851          * losing an error.
1852          */
1853         smp_rmb();
1854         count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1855                 % MCE_LOG_LEN;
1856         if (!count)
1857                 goto check_ce_error;
1858
1859         m = pvt->mce_outentry;
1860         if (pvt->mce_in + count > MCE_LOG_LEN) {
1861                 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1862
1863                 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1864                 smp_wmb();
1865                 pvt->mce_in = 0;
1866                 count -= l;
1867                 m += l;
1868         }
1869         memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1870         smp_wmb();
1871         pvt->mce_in += count;
1872
1873         smp_rmb();
1874         if (pvt->mce_overrun) {
1875                 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1876                               pvt->mce_overrun);
1877                 smp_wmb();
1878                 pvt->mce_overrun = 0;
1879         }
1880
1881         /*
1882          * MCE second step: parse errors and display
1883          */
1884         for (i = 0; i < count; i++)
1885                 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
1886
1887         /*
1888          * Now, let's increment CE error counts
1889          */
1890 check_ce_error:
1891         if (!pvt->is_registered)
1892                 i7core_udimm_check_mc_ecc_err(mci);
1893         else
1894                 i7core_rdimm_check_mc_ecc_err(mci);
1895 }
1896
1897 /*
1898  * i7core_mce_check_error       Replicates mcelog routine to get errors
1899  *                              This routine simply queues mcelog errors, and
1900  *                              return. The error itself should be handled later
1901  *                              by i7core_check_error.
1902  * WARNING: As this routine should be called at NMI time, extra care should
1903  * be taken to avoid deadlocks, and to be as fast as possible.
1904  */
1905 static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1906                                   void *data)
1907 {
1908         struct mce *mce = (struct mce *)data;
1909         struct i7core_dev *i7_dev;
1910         struct mem_ctl_info *mci;
1911         struct i7core_pvt *pvt;
1912
1913         i7_dev = get_i7core_dev(mce->socketid);
1914         if (!i7_dev)
1915                 return NOTIFY_BAD;
1916
1917         mci = i7_dev->mci;
1918         pvt = mci->pvt_info;
1919
1920         /*
1921          * Just let mcelog handle it if the error is
1922          * outside the memory controller
1923          */
1924         if (((mce->status & 0xffff) >> 7) != 1)
1925                 return NOTIFY_DONE;
1926
1927         /* Bank 8 registers are the only ones that we know how to handle */
1928         if (mce->bank != 8)
1929                 return NOTIFY_DONE;
1930
1931 #ifdef CONFIG_SMP
1932         /* Only handle if it is the right mc controller */
1933         if (mce->socketid != pvt->i7core_dev->socket)
1934                 return NOTIFY_DONE;
1935 #endif
1936
1937         smp_rmb();
1938         if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1939                 smp_wmb();
1940                 pvt->mce_overrun++;
1941                 return NOTIFY_DONE;
1942         }
1943
1944         /* Copy memory error at the ringbuffer */
1945         memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1946         smp_wmb();
1947         pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1948
1949         /* Handle fatal errors immediately */
1950         if (mce->mcgstatus & 1)
1951                 i7core_check_error(mci);
1952
1953         /* Advise mcelog that the errors were handled */
1954         return NOTIFY_STOP;
1955 }
1956
1957 static struct notifier_block i7_mce_dec = {
1958         .notifier_call  = i7core_mce_check_error,
1959 };
1960
1961 struct memdev_dmi_entry {
1962         u8 type;
1963         u8 length;
1964         u16 handle;
1965         u16 phys_mem_array_handle;
1966         u16 mem_err_info_handle;
1967         u16 total_width;
1968         u16 data_width;
1969         u16 size;
1970         u8 form;
1971         u8 device_set;
1972         u8 device_locator;
1973         u8 bank_locator;
1974         u8 memory_type;
1975         u16 type_detail;
1976         u16 speed;
1977         u8 manufacturer;
1978         u8 serial_number;
1979         u8 asset_tag;
1980         u8 part_number;
1981         u8 attributes;
1982         u32 extended_size;
1983         u16 conf_mem_clk_speed;
1984 } __attribute__((__packed__));
1985
1986
1987 /*
1988  * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1989  * memory devices show the same speed, and if they don't then consider
1990  * all speeds to be invalid.
1991  */
1992 static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1993 {
1994         int *dclk_freq = _dclk_freq;
1995         u16 dmi_mem_clk_speed;
1996
1997         if (*dclk_freq == -1)
1998                 return;
1999
2000         if (dh->type == DMI_ENTRY_MEM_DEVICE) {
2001                 struct memdev_dmi_entry *memdev_dmi_entry =
2002                         (struct memdev_dmi_entry *)dh;
2003                 unsigned long conf_mem_clk_speed_offset =
2004                         (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
2005                         (unsigned long)&memdev_dmi_entry->type;
2006                 unsigned long speed_offset =
2007                         (unsigned long)&memdev_dmi_entry->speed -
2008                         (unsigned long)&memdev_dmi_entry->type;
2009
2010                 /* Check that a DIMM is present */
2011                 if (memdev_dmi_entry->size == 0)
2012                         return;
2013
2014                 /*
2015                  * Pick the configured speed if it's available, otherwise
2016                  * pick the DIMM speed, or we don't have a speed.
2017                  */
2018                 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2019                         dmi_mem_clk_speed =
2020                                 memdev_dmi_entry->conf_mem_clk_speed;
2021                 } else if (memdev_dmi_entry->length > speed_offset) {
2022                         dmi_mem_clk_speed = memdev_dmi_entry->speed;
2023                 } else {
2024                         *dclk_freq = -1;
2025                         return;
2026                 }
2027
2028                 if (*dclk_freq == 0) {
2029                         /* First pass, speed was 0 */
2030                         if (dmi_mem_clk_speed > 0) {
2031                                 /* Set speed if a valid speed is read */
2032                                 *dclk_freq = dmi_mem_clk_speed;
2033                         } else {
2034                                 /* Otherwise we don't have a valid speed */
2035                                 *dclk_freq = -1;
2036                         }
2037                 } else if (*dclk_freq > 0 &&
2038                            *dclk_freq != dmi_mem_clk_speed) {
2039                         /*
2040                          * If we have a speed, check that all DIMMS are the same
2041                          * speed, otherwise set the speed as invalid.
2042                          */
2043                         *dclk_freq = -1;
2044                 }
2045         }
2046 }
2047
2048 /*
2049  * The default DCLK frequency is used as a fallback if we
2050  * fail to find anything reliable in the DMI. The value
2051  * is taken straight from the datasheet.
2052  */
2053 #define DEFAULT_DCLK_FREQ 800
2054
2055 static int get_dclk_freq(void)
2056 {
2057         int dclk_freq = 0;
2058
2059         dmi_walk(decode_dclk, (void *)&dclk_freq);
2060
2061         if (dclk_freq < 1)
2062                 return DEFAULT_DCLK_FREQ;
2063
2064         return dclk_freq;
2065 }
2066
2067 /*
2068  * set_sdram_scrub_rate         This routine sets byte/sec bandwidth scrub rate
2069  *                              to hardware according to SCRUBINTERVAL formula
2070  *                              found in datasheet.
2071  */
2072 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
2073 {
2074         struct i7core_pvt *pvt = mci->pvt_info;
2075         struct pci_dev *pdev;
2076         u32 dw_scrub;
2077         u32 dw_ssr;
2078
2079         /* Get data from the MC register, function 2 */
2080         pdev = pvt->pci_mcr[2];
2081         if (!pdev)
2082                 return -ENODEV;
2083
2084         pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
2085
2086         if (new_bw == 0) {
2087                 /* Prepare to disable petrol scrub */
2088                 dw_scrub &= ~STARTSCRUB;
2089                 /* Stop the patrol scrub engine */
2090                 write_and_test(pdev, MC_SCRUB_CONTROL,
2091                                dw_scrub & ~SCRUBINTERVAL_MASK);
2092
2093                 /* Get current status of scrub rate and set bit to disable */
2094                 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2095                 dw_ssr &= ~SSR_MODE_MASK;
2096                 dw_ssr |= SSR_MODE_DISABLE;
2097         } else {
2098                 const int cache_line_size = 64;
2099                 const u32 freq_dclk_mhz = pvt->dclk_freq;
2100                 unsigned long long scrub_interval;
2101                 /*
2102                  * Translate the desired scrub rate to a register value and
2103                  * program the corresponding register value.
2104                  */
2105                 scrub_interval = (unsigned long long)freq_dclk_mhz *
2106                         cache_line_size * 1000000;
2107                 do_div(scrub_interval, new_bw);
2108
2109                 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2110                         return -EINVAL;
2111
2112                 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
2113
2114                 /* Start the patrol scrub engine */
2115                 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2116                                        STARTSCRUB | dw_scrub);
2117
2118                 /* Get current status of scrub rate and set bit to enable */
2119                 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2120                 dw_ssr &= ~SSR_MODE_MASK;
2121                 dw_ssr |= SSR_MODE_ENABLE;
2122         }
2123         /* Disable or enable scrubbing */
2124         pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2125
2126         return new_bw;
2127 }
2128
2129 /*
2130  * get_sdram_scrub_rate         This routine convert current scrub rate value
2131  *                              into byte/sec bandwidth accourding to
2132  *                              SCRUBINTERVAL formula found in datasheet.
2133  */
2134 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2135 {
2136         struct i7core_pvt *pvt = mci->pvt_info;
2137         struct pci_dev *pdev;
2138         const u32 cache_line_size = 64;
2139         const u32 freq_dclk_mhz = pvt->dclk_freq;
2140         unsigned long long scrub_rate;
2141         u32 scrubval;
2142
2143         /* Get data from the MC register, function 2 */
2144         pdev = pvt->pci_mcr[2];
2145         if (!pdev)
2146                 return -ENODEV;
2147
2148         /* Get current scrub control data */
2149         pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2150
2151         /* Mask highest 8-bits to 0 */
2152         scrubval &=  SCRUBINTERVAL_MASK;
2153         if (!scrubval)
2154                 return 0;
2155
2156         /* Calculate scrub rate value into byte/sec bandwidth */
2157         scrub_rate =  (unsigned long long)freq_dclk_mhz *
2158                 1000000 * cache_line_size;
2159         do_div(scrub_rate, scrubval);
2160         return (int)scrub_rate;
2161 }
2162
2163 static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2164 {
2165         struct i7core_pvt *pvt = mci->pvt_info;
2166         u32 pci_lock;
2167
2168         /* Unlock writes to pci registers */
2169         pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2170         pci_lock &= ~0x3;
2171         pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2172                                pci_lock | MC_CFG_UNLOCK);
2173
2174         mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2175         mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2176 }
2177
2178 static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2179 {
2180         struct i7core_pvt *pvt = mci->pvt_info;
2181         u32 pci_lock;
2182
2183         /* Lock writes to pci registers */
2184         pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2185         pci_lock &= ~0x3;
2186         pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2187                                pci_lock | MC_CFG_LOCK);
2188 }
2189
2190 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2191 {
2192         pvt->i7core_pci = edac_pci_create_generic_ctl(
2193                                                 &pvt->i7core_dev->pdev[0]->dev,
2194                                                 EDAC_MOD_STR);
2195         if (unlikely(!pvt->i7core_pci))
2196                 i7core_printk(KERN_WARNING,
2197                               "Unable to setup PCI error report via EDAC\n");
2198 }
2199
2200 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2201 {
2202         if (likely(pvt->i7core_pci))
2203                 edac_pci_release_generic_ctl(pvt->i7core_pci);
2204         else
2205                 i7core_printk(KERN_ERR,
2206                                 "Couldn't find mem_ctl_info for socket %d\n",
2207                                 pvt->i7core_dev->socket);
2208         pvt->i7core_pci = NULL;
2209 }
2210
2211 static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2212 {
2213         struct mem_ctl_info *mci = i7core_dev->mci;
2214         struct i7core_pvt *pvt;
2215
2216         if (unlikely(!mci || !mci->pvt_info)) {
2217                 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2218                         __func__, &i7core_dev->pdev[0]->dev);
2219
2220                 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2221                 return;
2222         }
2223
2224         pvt = mci->pvt_info;
2225
2226         debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2227                 __func__, mci, &i7core_dev->pdev[0]->dev);
2228
2229         /* Disable scrubrate setting */
2230         if (pvt->enable_scrub)
2231                 disable_sdram_scrub_setting(mci);
2232
2233         atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
2234
2235         /* Disable EDAC polling */
2236         i7core_pci_ctl_release(pvt);
2237
2238         /* Remove MC sysfs nodes */
2239         edac_mc_del_mc(mci->dev);
2240
2241         debugf1("%s: free mci struct\n", mci->ctl_name);
2242         kfree(mci->ctl_name);
2243         edac_mc_free(mci);
2244         i7core_dev->mci = NULL;
2245 }
2246
2247 static int i7core_register_mci(struct i7core_dev *i7core_dev)
2248 {
2249         struct mem_ctl_info *mci;
2250         struct i7core_pvt *pvt;
2251         int rc, channels, csrows;
2252
2253         /* Check the number of active and not disabled channels */
2254         rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2255         if (unlikely(rc < 0))
2256                 return rc;
2257
2258         /* allocate a new MC control structure */
2259         mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
2260         if (unlikely(!mci))
2261                 return -ENOMEM;
2262
2263         debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2264                 __func__, mci, &i7core_dev->pdev[0]->dev);
2265
2266         pvt = mci->pvt_info;
2267         memset(pvt, 0, sizeof(*pvt));
2268
2269         /* Associates i7core_dev and mci for future usage */
2270         pvt->i7core_dev = i7core_dev;
2271         i7core_dev->mci = mci;
2272
2273         /*
2274          * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2275          * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2276          * memory channels
2277          */
2278         mci->mtype_cap = MEM_FLAG_DDR3;
2279         mci->edac_ctl_cap = EDAC_FLAG_NONE;
2280         mci->edac_cap = EDAC_FLAG_NONE;
2281         mci->mod_name = "i7core_edac.c";
2282         mci->mod_ver = I7CORE_REVISION;
2283         mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2284                                   i7core_dev->socket);
2285         mci->dev_name = pci_name(i7core_dev->pdev[0]);
2286         mci->ctl_page_to_phys = NULL;
2287
2288         /* Store pci devices at mci for faster access */
2289         rc = mci_bind_devs(mci, i7core_dev);
2290         if (unlikely(rc < 0))
2291                 goto fail0;
2292
2293         if (pvt->is_registered)
2294                 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2295         else
2296                 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2297
2298         /* Get dimm basic config */
2299         get_dimm_config(mci);
2300         /* record ptr to the generic device */
2301         mci->dev = &i7core_dev->pdev[0]->dev;
2302         /* Set the function pointer to an actual operation function */
2303         mci->edac_check = i7core_check_error;
2304
2305         /* Enable scrubrate setting */
2306         if (pvt->enable_scrub)
2307                 enable_sdram_scrub_setting(mci);
2308
2309         /* add this new MC control structure to EDAC's list of MCs */
2310         if (unlikely(edac_mc_add_mc(mci))) {
2311                 debugf0("MC: " __FILE__
2312                         ": %s(): failed edac_mc_add_mc()\n", __func__);
2313                 /* FIXME: perhaps some code should go here that disables error
2314                  * reporting if we just enabled it
2315                  */
2316
2317                 rc = -EINVAL;
2318                 goto fail0;
2319         }
2320
2321         /* Default error mask is any memory */
2322         pvt->inject.channel = 0;
2323         pvt->inject.dimm = -1;
2324         pvt->inject.rank = -1;
2325         pvt->inject.bank = -1;
2326         pvt->inject.page = -1;
2327         pvt->inject.col = -1;
2328
2329         /* allocating generic PCI control info */
2330         i7core_pci_ctl_create(pvt);
2331
2332         /* DCLK for scrub rate setting */
2333         pvt->dclk_freq = get_dclk_freq();
2334
2335         atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
2336
2337         return 0;
2338
2339 fail0:
2340         kfree(mci->ctl_name);
2341         edac_mc_free(mci);
2342         i7core_dev->mci = NULL;
2343         return rc;
2344 }
2345
2346 /*
2347  *      i7core_probe    Probe for ONE instance of device to see if it is
2348  *                      present.
2349  *      return:
2350  *              0 for FOUND a device
2351  *              < 0 for error code
2352  */
2353
2354 static int __devinit i7core_probe(struct pci_dev *pdev,
2355                                   const struct pci_device_id *id)
2356 {
2357         int rc, count = 0;
2358         struct i7core_dev *i7core_dev;
2359
2360         /* get the pci devices we want to reserve for our use */
2361         mutex_lock(&i7core_edac_lock);
2362
2363         /*
2364          * All memory controllers are allocated at the first pass.
2365          */
2366         if (unlikely(probed >= 1)) {
2367                 mutex_unlock(&i7core_edac_lock);
2368                 return -ENODEV;
2369         }
2370         probed++;
2371
2372         rc = i7core_get_all_devices();
2373         if (unlikely(rc < 0))
2374                 goto fail0;
2375
2376         list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2377                 count++;
2378                 rc = i7core_register_mci(i7core_dev);
2379                 if (unlikely(rc < 0))
2380                         goto fail1;
2381         }
2382
2383         /*
2384          * Nehalem-EX uses a different memory controller. However, as the
2385          * memory controller is not visible on some Nehalem/Nehalem-EP, we
2386          * need to indirectly probe via a X58 PCI device. The same devices
2387          * are found on (some) Nehalem-EX. So, on those machines, the
2388          * probe routine needs to return -ENODEV, as the actual Memory
2389          * Controller registers won't be detected.
2390          */
2391         if (!count) {
2392                 rc = -ENODEV;
2393                 goto fail1;
2394         }
2395
2396         i7core_printk(KERN_INFO,
2397                       "Driver loaded, %d memory controller(s) found.\n",
2398                       count);
2399
2400         mutex_unlock(&i7core_edac_lock);
2401         return 0;
2402
2403 fail1:
2404         list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2405                 i7core_unregister_mci(i7core_dev);
2406
2407         i7core_put_all_devices();
2408 fail0:
2409         mutex_unlock(&i7core_edac_lock);
2410         return rc;
2411 }
2412
2413 /*
2414  *      i7core_remove   destructor for one instance of device
2415  *
2416  */
2417 static void __devexit i7core_remove(struct pci_dev *pdev)
2418 {
2419         struct i7core_dev *i7core_dev;
2420
2421         debugf0(__FILE__ ": %s()\n", __func__);
2422
2423         /*
2424          * we have a trouble here: pdev value for removal will be wrong, since
2425          * it will point to the X58 register used to detect that the machine
2426          * is a Nehalem or upper design. However, due to the way several PCI
2427          * devices are grouped together to provide MC functionality, we need
2428          * to use a different method for releasing the devices
2429          */
2430
2431         mutex_lock(&i7core_edac_lock);
2432
2433         if (unlikely(!probed)) {
2434                 mutex_unlock(&i7core_edac_lock);
2435                 return;
2436         }
2437
2438         list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2439                 i7core_unregister_mci(i7core_dev);
2440
2441         /* Release PCI resources */
2442         i7core_put_all_devices();
2443
2444         probed--;
2445
2446         mutex_unlock(&i7core_edac_lock);
2447 }
2448
2449 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2450
2451 /*
2452  *      i7core_driver   pci_driver structure for this module
2453  *
2454  */
2455 static struct pci_driver i7core_driver = {
2456         .name     = "i7core_edac",
2457         .probe    = i7core_probe,
2458         .remove   = __devexit_p(i7core_remove),
2459         .id_table = i7core_pci_tbl,
2460 };
2461
2462 /*
2463  *      i7core_init             Module entry function
2464  *                      Try to initialize this module for its devices
2465  */
2466 static int __init i7core_init(void)
2467 {
2468         int pci_rc;
2469
2470         debugf2("MC: " __FILE__ ": %s()\n", __func__);
2471
2472         /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2473         opstate_init();
2474
2475         if (use_pci_fixup)
2476                 i7core_xeon_pci_fixup(pci_dev_table);
2477
2478         pci_rc = pci_register_driver(&i7core_driver);
2479
2480         if (pci_rc >= 0)
2481                 return 0;
2482
2483         i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2484                       pci_rc);
2485
2486         return pci_rc;
2487 }
2488
2489 /*
2490  *      i7core_exit()   Module exit function
2491  *                      Unregister the driver
2492  */
2493 static void __exit i7core_exit(void)
2494 {
2495         debugf2("MC: " __FILE__ ": %s()\n", __func__);
2496         pci_unregister_driver(&i7core_driver);
2497 }
2498
2499 module_init(i7core_init);
2500 module_exit(i7core_exit);
2501
2502 MODULE_LICENSE("GPL");
2503 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2504 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2505 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2506                    I7CORE_REVISION);
2507
2508 module_param(edac_op_state, int, 0444);
2509 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");