i7core: temporary workaround to allow it to compile against 2.6.30
[deliverable/linux.git] / drivers / edac / i7core_edac.c
1 /* Intel 7 core Memory Controller kernel module (Nehalem)
2 *
3 * This file may be distributed under the terms of the
4 * GNU General Public License version 2 only.
5 *
6 * Copyright (c) 2009 by:
7 * Mauro Carvalho Chehab <mchehab@redhat.com>
8 *
9 * Red Hat Inc. http://www.redhat.com
10 *
11 * Forked and adapted from the i5400_edac driver
12 *
13 * Based on the following public Intel datasheets:
14 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
15 * Datasheet, Volume 2:
16 * http://download.intel.com/design/processor/datashts/320835.pdf
17 * Intel Xeon Processor 5500 Series Datasheet Volume 2
18 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
19 * also available at:
20 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
21 */
22
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/slab.h>
28 #include <linux/edac.h>
29 #include <linux/mmzone.h>
30 #include <linux/edac_mce.h>
31 #include <linux/spinlock.h>
32 #include <asm/processor.h>
33
34 #include "edac_core.h"
35
36 /*
37 * Alter this version for the module when modifications are made
38 */
39 #define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
40 #define EDAC_MOD_STR "i7core_edac"
41
42 /*
43 * Debug macros
44 */
45 #define i7core_printk(level, fmt, arg...) \
46 edac_printk(level, "i7core", fmt, ##arg)
47
48 #define i7core_mc_printk(mci, level, fmt, arg...) \
49 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
50
51 /*
52 * i7core Memory Controller Registers
53 */
54
55 /* OFFSETS for Device 0 Function 0 */
56
57 #define MC_CFG_CONTROL 0x90
58
59 /* OFFSETS for Device 3 Function 0 */
60
61 #define MC_CONTROL 0x48
62 #define MC_STATUS 0x4c
63 #define MC_MAX_DOD 0x64
64
65 /*
66 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
67 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
68 */
69
70 #define MC_TEST_ERR_RCV1 0x60
71 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
72
73 #define MC_TEST_ERR_RCV0 0x64
74 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
75 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
76
77 /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
78 #define MC_COR_ECC_CNT_0 0x80
79 #define MC_COR_ECC_CNT_1 0x84
80 #define MC_COR_ECC_CNT_2 0x88
81 #define MC_COR_ECC_CNT_3 0x8c
82 #define MC_COR_ECC_CNT_4 0x90
83 #define MC_COR_ECC_CNT_5 0x94
84
85 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
86 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
87
88
89 /* OFFSETS for Devices 4,5 and 6 Function 0 */
90
91 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
92 #define THREE_DIMMS_PRESENT (1 << 24)
93 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
94 #define QUAD_RANK_PRESENT (1 << 22)
95 #define REGISTERED_DIMM (1 << 15)
96
97 #define MC_CHANNEL_MAPPER 0x60
98 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
99 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
100
101 #define MC_CHANNEL_RANK_PRESENT 0x7c
102 #define RANK_PRESENT_MASK 0xffff
103
104 #define MC_CHANNEL_ADDR_MATCH 0xf0
105 #define MC_CHANNEL_ERROR_MASK 0xf8
106 #define MC_CHANNEL_ERROR_INJECT 0xfc
107 #define INJECT_ADDR_PARITY 0x10
108 #define INJECT_ECC 0x08
109 #define MASK_CACHELINE 0x06
110 #define MASK_FULL_CACHELINE 0x06
111 #define MASK_MSB32_CACHELINE 0x04
112 #define MASK_LSB32_CACHELINE 0x02
113 #define NO_MASK_CACHELINE 0x00
114 #define REPEAT_EN 0x01
115
116 /* OFFSETS for Devices 4,5 and 6 Function 1 */
117
118 #define MC_DOD_CH_DIMM0 0x48
119 #define MC_DOD_CH_DIMM1 0x4c
120 #define MC_DOD_CH_DIMM2 0x50
121 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
122 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
123 #define DIMM_PRESENT_MASK (1 << 9)
124 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
125 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
126 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
127 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
128 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
129 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
130 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
131 #define MC_DOD_NUMCOL_MASK 3
132 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
133
134 #define MC_RANK_PRESENT 0x7c
135
136 #define MC_SAG_CH_0 0x80
137 #define MC_SAG_CH_1 0x84
138 #define MC_SAG_CH_2 0x88
139 #define MC_SAG_CH_3 0x8c
140 #define MC_SAG_CH_4 0x90
141 #define MC_SAG_CH_5 0x94
142 #define MC_SAG_CH_6 0x98
143 #define MC_SAG_CH_7 0x9c
144
145 #define MC_RIR_LIMIT_CH_0 0x40
146 #define MC_RIR_LIMIT_CH_1 0x44
147 #define MC_RIR_LIMIT_CH_2 0x48
148 #define MC_RIR_LIMIT_CH_3 0x4C
149 #define MC_RIR_LIMIT_CH_4 0x50
150 #define MC_RIR_LIMIT_CH_5 0x54
151 #define MC_RIR_LIMIT_CH_6 0x58
152 #define MC_RIR_LIMIT_CH_7 0x5C
153 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
154
155 #define MC_RIR_WAY_CH 0x80
156 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
157 #define MC_RIR_WAY_RANK_MASK 0x7
158
159 /*
160 * i7core structs
161 */
162
163 #define NUM_CHANS 3
164 #define MAX_DIMMS 3 /* Max DIMMS per channel */
165 #define NUM_SOCKETS 2 /* Max number of MC sockets */
166 #define MAX_MCR_FUNC 4
167 #define MAX_CHAN_FUNC 3
168
169 struct i7core_info {
170 u32 mc_control;
171 u32 mc_status;
172 u32 max_dod;
173 u32 ch_map;
174 };
175
176
177 struct i7core_inject {
178 int enable;
179
180 u8 socket;
181 u32 section;
182 u32 type;
183 u32 eccmask;
184
185 /* Error address mask */
186 int channel, dimm, rank, bank, page, col;
187 };
188
189 struct i7core_channel {
190 u32 ranks;
191 u32 dimms;
192 };
193
194 struct pci_id_descr {
195 int dev;
196 int func;
197 int dev_id;
198 struct pci_dev *pdev[NUM_SOCKETS];
199 };
200
201 struct i7core_pvt {
202 struct pci_dev *pci_noncore[NUM_SOCKETS];
203 struct pci_dev *pci_mcr[NUM_SOCKETS][MAX_MCR_FUNC + 1];
204 struct pci_dev *pci_ch[NUM_SOCKETS][NUM_CHANS][MAX_CHAN_FUNC + 1];
205
206 struct i7core_info info;
207 struct i7core_inject inject;
208 struct i7core_channel channel[NUM_SOCKETS][NUM_CHANS];
209
210 int sockets; /* Number of sockets */
211 int channels; /* Number of active channels */
212
213 int ce_count_available[NUM_SOCKETS];
214 int csrow_map[NUM_SOCKETS][NUM_CHANS][MAX_DIMMS];
215
216 /* ECC corrected errors counts per udimm */
217 unsigned long udimm_ce_count[NUM_SOCKETS][MAX_DIMMS];
218 int udimm_last_ce_count[NUM_SOCKETS][MAX_DIMMS];
219 /* ECC corrected errors counts per rdimm */
220 unsigned long rdimm_ce_count[NUM_SOCKETS][NUM_CHANS][MAX_DIMMS];
221 int rdimm_last_ce_count[NUM_SOCKETS][NUM_CHANS][MAX_DIMMS];
222
223 unsigned int is_registered[NUM_SOCKETS];
224
225 /* mcelog glue */
226 struct edac_mce edac_mce;
227 struct mce mce_entry[MCE_LOG_LEN];
228 unsigned mce_count;
229 spinlock_t mce_lock;
230 };
231
232 /* Device name and register DID (Device ID) */
233 struct i7core_dev_info {
234 const char *ctl_name; /* name for this device */
235 u16 fsb_mapping_errors; /* DID for the branchmap,control */
236 };
237
238 #define PCI_DESCR(device, function, device_id) \
239 .dev = (device), \
240 .func = (function), \
241 .dev_id = (device_id)
242
243 struct pci_id_descr pci_devs[] = {
244 /* Memory controller */
245 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
246 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
247 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS) }, /* if RDIMM */
248 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
249
250 /* Channel 0 */
251 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
252 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
253 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
254 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
255
256 /* Channel 1 */
257 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
258 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
259 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
260 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
261
262 /* Channel 2 */
263 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
264 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
265 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
266 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
267
268 /* Generic Non-core registers */
269 /*
270 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
271 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
272 * the probing code needs to test for the other address in case of
273 * failure of this one
274 */
275 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NOCORE) },
276
277 };
278 #define N_DEVS ARRAY_SIZE(pci_devs)
279
280 /*
281 * pci_device_id table for which devices we are looking for
282 * This should match the first device at pci_devs table
283 */
284 static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
285 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
286 {0,} /* 0 terminated list. */
287 };
288
289
290 /* Table of devices attributes supported by this driver */
291 static const struct i7core_dev_info i7core_devs[] = {
292 {
293 .ctl_name = "i7 Core",
294 .fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7_MCR,
295 },
296 };
297
298 static struct edac_pci_ctl_info *i7core_pci;
299
300 /****************************************************************************
301 Anciliary status routines
302 ****************************************************************************/
303
304 /* MC_CONTROL bits */
305 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
306 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
307
308 /* MC_STATUS bits */
309 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
310 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
311
312 /* MC_MAX_DOD read functions */
313 static inline int numdimms(u32 dimms)
314 {
315 return (dimms & 0x3) + 1;
316 }
317
318 static inline int numrank(u32 rank)
319 {
320 static int ranks[4] = { 1, 2, 4, -EINVAL };
321
322 return ranks[rank & 0x3];
323 }
324
325 static inline int numbank(u32 bank)
326 {
327 static int banks[4] = { 4, 8, 16, -EINVAL };
328
329 return banks[bank & 0x3];
330 }
331
332 static inline int numrow(u32 row)
333 {
334 static int rows[8] = {
335 1 << 12, 1 << 13, 1 << 14, 1 << 15,
336 1 << 16, -EINVAL, -EINVAL, -EINVAL,
337 };
338
339 return rows[row & 0x7];
340 }
341
342 static inline int numcol(u32 col)
343 {
344 static int cols[8] = {
345 1 << 10, 1 << 11, 1 << 12, -EINVAL,
346 };
347 return cols[col & 0x3];
348 }
349
350 /****************************************************************************
351 Memory check routines
352 ****************************************************************************/
353 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
354 unsigned func)
355 {
356 int i;
357
358 for (i = 0; i < N_DEVS; i++) {
359 if (!pci_devs[i].pdev[socket])
360 continue;
361
362 if (PCI_SLOT(pci_devs[i].pdev[socket]->devfn) == slot &&
363 PCI_FUNC(pci_devs[i].pdev[socket]->devfn) == func) {
364 return pci_devs[i].pdev[socket];
365 }
366 }
367
368 return NULL;
369 }
370
371 /**
372 * i7core_get_active_channels() - gets the number of channels and csrows
373 * @socket: Quick Path Interconnect socket
374 * @channels: Number of channels that will be returned
375 * @csrows: Number of csrows found
376 *
377 * Since EDAC core needs to know in advance the number of available channels
378 * and csrows, in order to allocate memory for csrows/channels, it is needed
379 * to run two similar steps. At the first step, implemented on this function,
380 * it checks the number of csrows/channels present at one socket.
381 * this is used in order to properly allocate the size of mci components.
382 *
383 * It should be noticed that none of the current available datasheets explain
384 * or even mention how csrows are seen by the memory controller. So, we need
385 * to add a fake description for csrows.
386 * So, this driver is attributing one DIMM memory for one csrow.
387 */
388 static int i7core_get_active_channels(u8 socket, unsigned *channels,
389 unsigned *csrows)
390 {
391 struct pci_dev *pdev = NULL;
392 int i, j;
393 u32 status, control;
394
395 *channels = 0;
396 *csrows = 0;
397
398 pdev = get_pdev_slot_func(socket, 3, 0);
399 if (!pdev) {
400 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
401 socket);
402 return -ENODEV;
403 }
404
405 /* Device 3 function 0 reads */
406 pci_read_config_dword(pdev, MC_STATUS, &status);
407 pci_read_config_dword(pdev, MC_CONTROL, &control);
408
409 for (i = 0; i < NUM_CHANS; i++) {
410 u32 dimm_dod[3];
411 /* Check if the channel is active */
412 if (!(control & (1 << (8 + i))))
413 continue;
414
415 /* Check if the channel is disabled */
416 if (status & (1 << i))
417 continue;
418
419 pdev = get_pdev_slot_func(socket, i + 4, 1);
420 if (!pdev) {
421 i7core_printk(KERN_ERR, "Couldn't find socket %d "
422 "fn %d.%d!!!\n",
423 socket, i + 4, 1);
424 return -ENODEV;
425 }
426 /* Devices 4-6 function 1 */
427 pci_read_config_dword(pdev,
428 MC_DOD_CH_DIMM0, &dimm_dod[0]);
429 pci_read_config_dword(pdev,
430 MC_DOD_CH_DIMM1, &dimm_dod[1]);
431 pci_read_config_dword(pdev,
432 MC_DOD_CH_DIMM2, &dimm_dod[2]);
433
434 (*channels)++;
435
436 for (j = 0; j < 3; j++) {
437 if (!DIMM_PRESENT(dimm_dod[j]))
438 continue;
439 (*csrows)++;
440 }
441 }
442
443 debugf0("Number of active channels on socket %d: %d\n",
444 socket, *channels);
445
446 return 0;
447 }
448
449 static int get_dimm_config(struct mem_ctl_info *mci, int *csrow, u8 socket)
450 {
451 struct i7core_pvt *pvt = mci->pvt_info;
452 struct csrow_info *csr;
453 struct pci_dev *pdev;
454 int i, j;
455 unsigned long last_page = 0;
456 enum edac_type mode;
457 enum mem_type mtype;
458
459 /* Get data from the MC register, function 0 */
460 pdev = pvt->pci_mcr[socket][0];
461 if (!pdev)
462 return -ENODEV;
463
464 /* Device 3 function 0 reads */
465 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
466 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
467 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
468 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
469
470 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
471 socket, pvt->info.mc_control, pvt->info.mc_status,
472 pvt->info.max_dod, pvt->info.ch_map);
473
474 if (ECC_ENABLED(pvt)) {
475 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
476 if (ECCx8(pvt))
477 mode = EDAC_S8ECD8ED;
478 else
479 mode = EDAC_S4ECD4ED;
480 } else {
481 debugf0("ECC disabled\n");
482 mode = EDAC_NONE;
483 }
484
485 /* FIXME: need to handle the error codes */
486 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
487 "x%x x 0x%x\n",
488 numdimms(pvt->info.max_dod),
489 numrank(pvt->info.max_dod >> 2),
490 numbank(pvt->info.max_dod >> 4),
491 numrow(pvt->info.max_dod >> 6),
492 numcol(pvt->info.max_dod >> 9));
493
494 for (i = 0; i < NUM_CHANS; i++) {
495 u32 data, dimm_dod[3], value[8];
496
497 if (!CH_ACTIVE(pvt, i)) {
498 debugf0("Channel %i is not active\n", i);
499 continue;
500 }
501 if (CH_DISABLED(pvt, i)) {
502 debugf0("Channel %i is disabled\n", i);
503 continue;
504 }
505
506 /* Devices 4-6 function 0 */
507 pci_read_config_dword(pvt->pci_ch[socket][i][0],
508 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
509
510 pvt->channel[socket][i].ranks = (data & QUAD_RANK_PRESENT) ?
511 4 : 2;
512
513 if (data & REGISTERED_DIMM)
514 mtype = MEM_RDDR3;
515 else
516 mtype = MEM_DDR3;
517 #if 0
518 if (data & THREE_DIMMS_PRESENT)
519 pvt->channel[i].dimms = 3;
520 else if (data & SINGLE_QUAD_RANK_PRESENT)
521 pvt->channel[i].dimms = 1;
522 else
523 pvt->channel[i].dimms = 2;
524 #endif
525
526 /* Devices 4-6 function 1 */
527 pci_read_config_dword(pvt->pci_ch[socket][i][1],
528 MC_DOD_CH_DIMM0, &dimm_dod[0]);
529 pci_read_config_dword(pvt->pci_ch[socket][i][1],
530 MC_DOD_CH_DIMM1, &dimm_dod[1]);
531 pci_read_config_dword(pvt->pci_ch[socket][i][1],
532 MC_DOD_CH_DIMM2, &dimm_dod[2]);
533
534 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
535 "%d ranks, %cDIMMs\n",
536 i,
537 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
538 data,
539 pvt->channel[socket][i].ranks,
540 (data & REGISTERED_DIMM) ? 'R' : 'U');
541
542 for (j = 0; j < 3; j++) {
543 u32 banks, ranks, rows, cols;
544 u32 size, npages;
545
546 if (!DIMM_PRESENT(dimm_dod[j]))
547 continue;
548
549 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
550 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
551 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
552 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
553
554 /* DDR3 has 8 I/O banks */
555 size = (rows * cols * banks * ranks) >> (20 - 3);
556
557 pvt->channel[socket][i].dimms++;
558
559 debugf0("\tdimm %d %d Mb offset: %x, "
560 "bank: %d, rank: %d, row: %#x, col: %#x\n",
561 j, size,
562 RANKOFFSET(dimm_dod[j]),
563 banks, ranks, rows, cols);
564
565 #if PAGE_SHIFT > 20
566 npages = size >> (PAGE_SHIFT - 20);
567 #else
568 npages = size << (20 - PAGE_SHIFT);
569 #endif
570
571 csr = &mci->csrows[*csrow];
572 csr->first_page = last_page + 1;
573 last_page += npages;
574 csr->last_page = last_page;
575 csr->nr_pages = npages;
576
577 csr->page_mask = 0;
578 csr->grain = 8;
579 csr->csrow_idx = *csrow;
580 csr->nr_channels = 1;
581
582 csr->channels[0].chan_idx = i;
583 csr->channels[0].ce_count = 0;
584
585 pvt->csrow_map[socket][i][j] = *csrow;
586
587 switch (banks) {
588 case 4:
589 csr->dtype = DEV_X4;
590 break;
591 case 8:
592 csr->dtype = DEV_X8;
593 break;
594 case 16:
595 csr->dtype = DEV_X16;
596 break;
597 default:
598 csr->dtype = DEV_UNKNOWN;
599 }
600
601 csr->edac_mode = mode;
602 csr->mtype = mtype;
603
604 (*csrow)++;
605 }
606
607 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
608 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
609 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
610 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
611 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
612 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
613 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
614 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
615 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
616 for (j = 0; j < 8; j++)
617 debugf1("\t\t%#x\t%#x\t%#x\n",
618 (value[j] >> 27) & 0x1,
619 (value[j] >> 24) & 0x7,
620 (value[j] && ((1 << 24) - 1)));
621 }
622
623 return 0;
624 }
625
626 /****************************************************************************
627 Error insertion routines
628 ****************************************************************************/
629
630 /* The i7core has independent error injection features per channel.
631 However, to have a simpler code, we don't allow enabling error injection
632 on more than one channel.
633 Also, since a change at an inject parameter will be applied only at enable,
634 we're disabling error injection on all write calls to the sysfs nodes that
635 controls the error code injection.
636 */
637 static int disable_inject(struct mem_ctl_info *mci)
638 {
639 struct i7core_pvt *pvt = mci->pvt_info;
640
641 pvt->inject.enable = 0;
642
643 if (!pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0])
644 return -ENODEV;
645
646 pci_write_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
647 MC_CHANNEL_ERROR_INJECT, 0);
648
649 return 0;
650 }
651
652 /*
653 * i7core inject inject.socket
654 *
655 * accept and store error injection inject.socket value
656 */
657 static ssize_t i7core_inject_socket_store(struct mem_ctl_info *mci,
658 const char *data, size_t count)
659 {
660 struct i7core_pvt *pvt = mci->pvt_info;
661 unsigned long value;
662 int rc;
663
664 rc = strict_strtoul(data, 10, &value);
665 if ((rc < 0) || (value >= pvt->sockets))
666 return -EIO;
667
668 pvt->inject.socket = (u32) value;
669 return count;
670 }
671
672 static ssize_t i7core_inject_socket_show(struct mem_ctl_info *mci,
673 char *data)
674 {
675 struct i7core_pvt *pvt = mci->pvt_info;
676 return sprintf(data, "%d\n", pvt->inject.socket);
677 }
678
679 /*
680 * i7core inject inject.section
681 *
682 * accept and store error injection inject.section value
683 * bit 0 - refers to the lower 32-byte half cacheline
684 * bit 1 - refers to the upper 32-byte half cacheline
685 */
686 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
687 const char *data, size_t count)
688 {
689 struct i7core_pvt *pvt = mci->pvt_info;
690 unsigned long value;
691 int rc;
692
693 if (pvt->inject.enable)
694 disable_inject(mci);
695
696 rc = strict_strtoul(data, 10, &value);
697 if ((rc < 0) || (value > 3))
698 return -EIO;
699
700 pvt->inject.section = (u32) value;
701 return count;
702 }
703
704 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
705 char *data)
706 {
707 struct i7core_pvt *pvt = mci->pvt_info;
708 return sprintf(data, "0x%08x\n", pvt->inject.section);
709 }
710
711 /*
712 * i7core inject.type
713 *
714 * accept and store error injection inject.section value
715 * bit 0 - repeat enable - Enable error repetition
716 * bit 1 - inject ECC error
717 * bit 2 - inject parity error
718 */
719 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
720 const char *data, size_t count)
721 {
722 struct i7core_pvt *pvt = mci->pvt_info;
723 unsigned long value;
724 int rc;
725
726 if (pvt->inject.enable)
727 disable_inject(mci);
728
729 rc = strict_strtoul(data, 10, &value);
730 if ((rc < 0) || (value > 7))
731 return -EIO;
732
733 pvt->inject.type = (u32) value;
734 return count;
735 }
736
737 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
738 char *data)
739 {
740 struct i7core_pvt *pvt = mci->pvt_info;
741 return sprintf(data, "0x%08x\n", pvt->inject.type);
742 }
743
744 /*
745 * i7core_inject_inject.eccmask_store
746 *
747 * The type of error (UE/CE) will depend on the inject.eccmask value:
748 * Any bits set to a 1 will flip the corresponding ECC bit
749 * Correctable errors can be injected by flipping 1 bit or the bits within
750 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
751 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
752 * uncorrectable error to be injected.
753 */
754 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
755 const char *data, size_t count)
756 {
757 struct i7core_pvt *pvt = mci->pvt_info;
758 unsigned long value;
759 int rc;
760
761 if (pvt->inject.enable)
762 disable_inject(mci);
763
764 rc = strict_strtoul(data, 10, &value);
765 if (rc < 0)
766 return -EIO;
767
768 pvt->inject.eccmask = (u32) value;
769 return count;
770 }
771
772 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
773 char *data)
774 {
775 struct i7core_pvt *pvt = mci->pvt_info;
776 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
777 }
778
779 /*
780 * i7core_addrmatch
781 *
782 * The type of error (UE/CE) will depend on the inject.eccmask value:
783 * Any bits set to a 1 will flip the corresponding ECC bit
784 * Correctable errors can be injected by flipping 1 bit or the bits within
785 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
786 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
787 * uncorrectable error to be injected.
788 */
789 static ssize_t i7core_inject_addrmatch_store(struct mem_ctl_info *mci,
790 const char *data, size_t count)
791 {
792 struct i7core_pvt *pvt = mci->pvt_info;
793 char *cmd, *val;
794 long value;
795 int rc;
796
797 if (pvt->inject.enable)
798 disable_inject(mci);
799
800 do {
801 cmd = strsep((char **) &data, ":");
802 if (!cmd)
803 break;
804 val = strsep((char **) &data, " \n\t");
805 if (!val)
806 return cmd - data;
807
808 if (!strcasecmp(val, "any"))
809 value = -1;
810 else {
811 rc = strict_strtol(val, 10, &value);
812 if ((rc < 0) || (value < 0))
813 return cmd - data;
814 }
815
816 if (!strcasecmp(cmd, "channel")) {
817 if (value < 3)
818 pvt->inject.channel = value;
819 else
820 return cmd - data;
821 } else if (!strcasecmp(cmd, "dimm")) {
822 if (value < 3)
823 pvt->inject.dimm = value;
824 else
825 return cmd - data;
826 } else if (!strcasecmp(cmd, "rank")) {
827 if (value < 4)
828 pvt->inject.rank = value;
829 else
830 return cmd - data;
831 } else if (!strcasecmp(cmd, "bank")) {
832 if (value < 32)
833 pvt->inject.bank = value;
834 else
835 return cmd - data;
836 } else if (!strcasecmp(cmd, "page")) {
837 if (value <= 0xffff)
838 pvt->inject.page = value;
839 else
840 return cmd - data;
841 } else if (!strcasecmp(cmd, "col") ||
842 !strcasecmp(cmd, "column")) {
843 if (value <= 0x3fff)
844 pvt->inject.col = value;
845 else
846 return cmd - data;
847 }
848 } while (1);
849
850 return count;
851 }
852
853 static ssize_t i7core_inject_addrmatch_show(struct mem_ctl_info *mci,
854 char *data)
855 {
856 struct i7core_pvt *pvt = mci->pvt_info;
857 char channel[4], dimm[4], bank[4], rank[4], page[7], col[7];
858
859 if (pvt->inject.channel < 0)
860 sprintf(channel, "any");
861 else
862 sprintf(channel, "%d", pvt->inject.channel);
863 if (pvt->inject.dimm < 0)
864 sprintf(dimm, "any");
865 else
866 sprintf(dimm, "%d", pvt->inject.dimm);
867 if (pvt->inject.bank < 0)
868 sprintf(bank, "any");
869 else
870 sprintf(bank, "%d", pvt->inject.bank);
871 if (pvt->inject.rank < 0)
872 sprintf(rank, "any");
873 else
874 sprintf(rank, "%d", pvt->inject.rank);
875 if (pvt->inject.page < 0)
876 sprintf(page, "any");
877 else
878 sprintf(page, "0x%04x", pvt->inject.page);
879 if (pvt->inject.col < 0)
880 sprintf(col, "any");
881 else
882 sprintf(col, "0x%04x", pvt->inject.col);
883
884 return sprintf(data, "channel: %s\ndimm: %s\nbank: %s\n"
885 "rank: %s\npage: %s\ncolumn: %s\n",
886 channel, dimm, bank, rank, page, col);
887 }
888
889 static int write_and_test(struct pci_dev *dev, int where, u32 val)
890 {
891 u32 read;
892 int count;
893
894 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
895 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
896 where, val);
897
898 for (count = 0; count < 10; count++) {
899 if (count)
900 msleep(100);
901 pci_write_config_dword(dev, where, val);
902 pci_read_config_dword(dev, where, &read);
903
904 if (read == val)
905 return 0;
906 }
907
908 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
909 "write=%08x. Read=%08x\n",
910 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
911 where, val, read);
912
913 return -EINVAL;
914 }
915
916 /*
917 * This routine prepares the Memory Controller for error injection.
918 * The error will be injected when some process tries to write to the
919 * memory that matches the given criteria.
920 * The criteria can be set in terms of a mask where dimm, rank, bank, page
921 * and col can be specified.
922 * A -1 value for any of the mask items will make the MCU to ignore
923 * that matching criteria for error injection.
924 *
925 * It should be noticed that the error will only happen after a write operation
926 * on a memory that matches the condition. if REPEAT_EN is not enabled at
927 * inject mask, then it will produce just one error. Otherwise, it will repeat
928 * until the injectmask would be cleaned.
929 *
930 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
931 * is reliable enough to check if the MC is using the
932 * three channels. However, this is not clear at the datasheet.
933 */
934 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
935 const char *data, size_t count)
936 {
937 struct i7core_pvt *pvt = mci->pvt_info;
938 u32 injectmask;
939 u64 mask = 0;
940 int rc;
941 long enable;
942
943 if (!pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0])
944 return 0;
945
946 rc = strict_strtoul(data, 10, &enable);
947 if ((rc < 0))
948 return 0;
949
950 if (enable) {
951 pvt->inject.enable = 1;
952 } else {
953 disable_inject(mci);
954 return count;
955 }
956
957 /* Sets pvt->inject.dimm mask */
958 if (pvt->inject.dimm < 0)
959 mask |= 1L << 41;
960 else {
961 if (pvt->channel[pvt->inject.socket][pvt->inject.channel].dimms > 2)
962 mask |= (pvt->inject.dimm & 0x3L) << 35;
963 else
964 mask |= (pvt->inject.dimm & 0x1L) << 36;
965 }
966
967 /* Sets pvt->inject.rank mask */
968 if (pvt->inject.rank < 0)
969 mask |= 1L << 40;
970 else {
971 if (pvt->channel[pvt->inject.socket][pvt->inject.channel].dimms > 2)
972 mask |= (pvt->inject.rank & 0x1L) << 34;
973 else
974 mask |= (pvt->inject.rank & 0x3L) << 34;
975 }
976
977 /* Sets pvt->inject.bank mask */
978 if (pvt->inject.bank < 0)
979 mask |= 1L << 39;
980 else
981 mask |= (pvt->inject.bank & 0x15L) << 30;
982
983 /* Sets pvt->inject.page mask */
984 if (pvt->inject.page < 0)
985 mask |= 1L << 38;
986 else
987 mask |= (pvt->inject.page & 0xffffL) << 14;
988
989 /* Sets pvt->inject.column mask */
990 if (pvt->inject.col < 0)
991 mask |= 1L << 37;
992 else
993 mask |= (pvt->inject.col & 0x3fffL);
994
995 /*
996 * bit 0: REPEAT_EN
997 * bits 1-2: MASK_HALF_CACHELINE
998 * bit 3: INJECT_ECC
999 * bit 4: INJECT_ADDR_PARITY
1000 */
1001
1002 injectmask = (pvt->inject.type & 1) |
1003 (pvt->inject.section & 0x3) << 1 |
1004 (pvt->inject.type & 0x6) << (3 - 1);
1005
1006 /* Unlock writes to registers - this register is write only */
1007 pci_write_config_dword(pvt->pci_noncore[pvt->inject.socket],
1008 MC_CFG_CONTROL, 0x2);
1009
1010 write_and_test(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
1011 MC_CHANNEL_ADDR_MATCH, mask);
1012 write_and_test(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
1013 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1014
1015 write_and_test(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
1016 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1017
1018 write_and_test(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
1019 MC_CHANNEL_ERROR_INJECT, injectmask);
1020
1021 /*
1022 * This is something undocumented, based on my tests
1023 * Without writing 8 to this register, errors aren't injected. Not sure
1024 * why.
1025 */
1026 pci_write_config_dword(pvt->pci_noncore[pvt->inject.socket],
1027 MC_CFG_CONTROL, 8);
1028
1029 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1030 " inject 0x%08x\n",
1031 mask, pvt->inject.eccmask, injectmask);
1032
1033
1034 return count;
1035 }
1036
1037 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1038 char *data)
1039 {
1040 struct i7core_pvt *pvt = mci->pvt_info;
1041 u32 injectmask;
1042
1043 pci_read_config_dword(pvt->pci_ch[pvt->inject.socket][pvt->inject.channel][0],
1044 MC_CHANNEL_ERROR_INJECT, &injectmask);
1045
1046 debugf0("Inject error read: 0x%018x\n", injectmask);
1047
1048 if (injectmask & 0x0c)
1049 pvt->inject.enable = 1;
1050
1051 return sprintf(data, "%d\n", pvt->inject.enable);
1052 }
1053
1054 static ssize_t i7core_ce_regs_show(struct mem_ctl_info *mci, char *data)
1055 {
1056 unsigned i, j, count, total = 0;
1057 struct i7core_pvt *pvt = mci->pvt_info;
1058
1059 for (i = 0; i < pvt->sockets; i++) {
1060 if (!pvt->ce_count_available[i]) {
1061 count = sprintf(data, "socket 0 data unavailable\n");
1062 continue;
1063 }
1064 if (!pvt->is_registered[i])
1065 count = sprintf(data, "socket %d, dimm0: %lu\n"
1066 "dimm1: %lu\ndimm2: %lu\n",
1067 i,
1068 pvt->udimm_ce_count[i][0],
1069 pvt->udimm_ce_count[i][1],
1070 pvt->udimm_ce_count[i][2]);
1071 else
1072 for (j = 0; j < NUM_CHANS; j++) {
1073 count = sprintf(data, "socket %d, channel %d "
1074 "RDIMM0: %lu "
1075 "RDIMM1: %lu RDIMM2: %lu\n",
1076 i, j,
1077 pvt->rdimm_ce_count[i][j][0],
1078 pvt->rdimm_ce_count[i][j][1],
1079 pvt->rdimm_ce_count[i][j][2]);
1080 }
1081 data += count;
1082 total += count;
1083 }
1084
1085 return total;
1086 }
1087
1088 /*
1089 * Sysfs struct
1090 */
1091 static struct mcidev_sysfs_attribute i7core_inj_attrs[] = {
1092 {
1093 .attr = {
1094 .name = "inject_socket",
1095 .mode = (S_IRUGO | S_IWUSR)
1096 },
1097 .show = i7core_inject_socket_show,
1098 .store = i7core_inject_socket_store,
1099 }, {
1100 .attr = {
1101 .name = "inject_section",
1102 .mode = (S_IRUGO | S_IWUSR)
1103 },
1104 .show = i7core_inject_section_show,
1105 .store = i7core_inject_section_store,
1106 }, {
1107 .attr = {
1108 .name = "inject_type",
1109 .mode = (S_IRUGO | S_IWUSR)
1110 },
1111 .show = i7core_inject_type_show,
1112 .store = i7core_inject_type_store,
1113 }, {
1114 .attr = {
1115 .name = "inject_eccmask",
1116 .mode = (S_IRUGO | S_IWUSR)
1117 },
1118 .show = i7core_inject_eccmask_show,
1119 .store = i7core_inject_eccmask_store,
1120 }, {
1121 .attr = {
1122 .name = "inject_addrmatch",
1123 .mode = (S_IRUGO | S_IWUSR)
1124 },
1125 .show = i7core_inject_addrmatch_show,
1126 .store = i7core_inject_addrmatch_store,
1127 }, {
1128 .attr = {
1129 .name = "inject_enable",
1130 .mode = (S_IRUGO | S_IWUSR)
1131 },
1132 .show = i7core_inject_enable_show,
1133 .store = i7core_inject_enable_store,
1134 }, {
1135 .attr = {
1136 .name = "corrected_error_counts",
1137 .mode = (S_IRUGO | S_IWUSR)
1138 },
1139 .show = i7core_ce_regs_show,
1140 .store = NULL,
1141 },
1142 };
1143
1144 /****************************************************************************
1145 Device initialization routines: put/get, init/exit
1146 ****************************************************************************/
1147
1148 /*
1149 * i7core_put_devices 'put' all the devices that we have
1150 * reserved via 'get'
1151 */
1152 static void i7core_put_devices(void)
1153 {
1154 int i, j;
1155
1156 for (i = 0; i < NUM_SOCKETS; i++)
1157 for (j = 0; j < N_DEVS; j++)
1158 pci_dev_put(pci_devs[j].pdev[i]);
1159 }
1160
1161 static void i7core_xeon_pci_fixup(void)
1162 {
1163 struct pci_dev *pdev = NULL;
1164 int i;
1165 /*
1166 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
1167 * aren't announced by acpi. So, we need to use a legacy scan probing
1168 * to detect them
1169 */
1170 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1171 pci_devs[0].dev_id, NULL);
1172 if (unlikely(!pdev)) {
1173 for (i = 0; i < NUM_SOCKETS; i ++)
1174 pcibios_scan_specific_bus(255-i);
1175 }
1176 }
1177
1178 /*
1179 * i7core_get_devices Find and perform 'get' operation on the MCH's
1180 * device/functions we want to reference for this driver
1181 *
1182 * Need to 'get' device 16 func 1 and func 2
1183 */
1184 int i7core_get_onedevice(struct pci_dev **prev, int devno)
1185 {
1186 struct pci_dev *pdev = NULL;
1187 u8 bus = 0;
1188 u8 socket = 0;
1189
1190 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1191 pci_devs[devno].dev_id, *prev);
1192
1193 /*
1194 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1195 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1196 * to probe for the alternate address in case of failure
1197 */
1198 if (pci_devs[devno].dev_id == PCI_DEVICE_ID_INTEL_I7_NOCORE && !pdev)
1199 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1200 PCI_DEVICE_ID_INTEL_I7_NOCORE_ALT, *prev);
1201
1202 if (!pdev) {
1203 if (*prev) {
1204 *prev = pdev;
1205 return 0;
1206 }
1207
1208 /*
1209 * Dev 3 function 2 only exists on chips with RDIMMs
1210 * so, it is ok to not found it
1211 */
1212 if ((pci_devs[devno].dev == 3) && (pci_devs[devno].func == 2)) {
1213 *prev = pdev;
1214 return 0;
1215 }
1216
1217 i7core_printk(KERN_ERR,
1218 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1219 pci_devs[devno].dev, pci_devs[devno].func,
1220 PCI_VENDOR_ID_INTEL, pci_devs[devno].dev_id);
1221
1222 /* End of list, leave */
1223 return -ENODEV;
1224 }
1225 bus = pdev->bus->number;
1226
1227 if (bus == 0x3f)
1228 socket = 0;
1229 else
1230 socket = 255 - bus;
1231
1232 if (socket >= NUM_SOCKETS) {
1233 i7core_printk(KERN_ERR,
1234 "Unexpected socket for "
1235 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1236 bus, pci_devs[devno].dev, pci_devs[devno].func,
1237 PCI_VENDOR_ID_INTEL, pci_devs[devno].dev_id);
1238 pci_dev_put(pdev);
1239 return -ENODEV;
1240 }
1241
1242 if (pci_devs[devno].pdev[socket]) {
1243 i7core_printk(KERN_ERR,
1244 "Duplicated device for "
1245 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1246 bus, pci_devs[devno].dev, pci_devs[devno].func,
1247 PCI_VENDOR_ID_INTEL, pci_devs[devno].dev_id);
1248 pci_dev_put(pdev);
1249 return -ENODEV;
1250 }
1251
1252 pci_devs[devno].pdev[socket] = pdev;
1253
1254 /* Sanity check */
1255 if (unlikely(PCI_SLOT(pdev->devfn) != pci_devs[devno].dev ||
1256 PCI_FUNC(pdev->devfn) != pci_devs[devno].func)) {
1257 i7core_printk(KERN_ERR,
1258 "Device PCI ID %04x:%04x "
1259 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1260 PCI_VENDOR_ID_INTEL, pci_devs[devno].dev_id,
1261 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1262 bus, pci_devs[devno].dev, pci_devs[devno].func);
1263 return -ENODEV;
1264 }
1265
1266 /* Be sure that the device is enabled */
1267 if (unlikely(pci_enable_device(pdev) < 0)) {
1268 i7core_printk(KERN_ERR,
1269 "Couldn't enable "
1270 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1271 bus, pci_devs[devno].dev, pci_devs[devno].func,
1272 PCI_VENDOR_ID_INTEL, pci_devs[devno].dev_id);
1273 return -ENODEV;
1274 }
1275
1276 i7core_printk(KERN_INFO,
1277 "Registered socket %d "
1278 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1279 socket, bus, pci_devs[devno].dev, pci_devs[devno].func,
1280 PCI_VENDOR_ID_INTEL, pci_devs[devno].dev_id);
1281
1282 *prev = pdev;
1283
1284 return 0;
1285 }
1286
1287 static int i7core_get_devices(void)
1288 {
1289 int i;
1290 struct pci_dev *pdev = NULL;
1291
1292 for (i = 0; i < N_DEVS; i++) {
1293 pdev = NULL;
1294 do {
1295 if (i7core_get_onedevice(&pdev, i) < 0) {
1296 i7core_put_devices();
1297 return -ENODEV;
1298 }
1299 } while (pdev);
1300 }
1301 return 0;
1302 }
1303
1304 static int mci_bind_devs(struct mem_ctl_info *mci)
1305 {
1306 struct i7core_pvt *pvt = mci->pvt_info;
1307 struct pci_dev *pdev;
1308 int i, j, func, slot;
1309
1310
1311 for (i = 0; i < pvt->sockets; i++) {
1312 pvt->is_registered[i] = 0;
1313 for (j = 0; j < N_DEVS; j++) {
1314 pdev = pci_devs[j].pdev[i];
1315 if (!pdev)
1316 continue;
1317
1318 func = PCI_FUNC(pdev->devfn);
1319 slot = PCI_SLOT(pdev->devfn);
1320 if (slot == 3) {
1321 if (unlikely(func > MAX_MCR_FUNC))
1322 goto error;
1323 pvt->pci_mcr[i][func] = pdev;
1324 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1325 if (unlikely(func > MAX_CHAN_FUNC))
1326 goto error;
1327 pvt->pci_ch[i][slot - 4][func] = pdev;
1328 } else if (!slot && !func)
1329 pvt->pci_noncore[i] = pdev;
1330 else
1331 goto error;
1332
1333 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1334 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1335 pdev, i);
1336
1337 if (PCI_SLOT(pdev->devfn) == 3 &&
1338 PCI_FUNC(pdev->devfn) == 2)
1339 pvt->is_registered[i] = 1;
1340 }
1341 }
1342
1343 return 0;
1344
1345 error:
1346 i7core_printk(KERN_ERR, "Device %d, function %d "
1347 "is out of the expected range\n",
1348 slot, func);
1349 return -EINVAL;
1350 }
1351
1352 /****************************************************************************
1353 Error check routines
1354 ****************************************************************************/
1355 static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, int socket,
1356 int chan, int dimm, int add)
1357 {
1358 char *msg;
1359 struct i7core_pvt *pvt = mci->pvt_info;
1360 int row = pvt->csrow_map[socket][chan][dimm], i;
1361
1362 for (i = 0; i < add; i++) {
1363 msg = kasprintf(GFP_KERNEL, "Corrected error "
1364 "(Socket=%d channel=%d dimm=%d",
1365 socket, chan, dimm);
1366
1367 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1368 kfree (msg);
1369 }
1370 }
1371
1372 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1373 int socket, int chan, int new0, int new1, int new2)
1374 {
1375 struct i7core_pvt *pvt = mci->pvt_info;
1376 int add0 = 0, add1 = 0, add2 = 0;
1377 /* Updates CE counters if it is not the first time here */
1378 if (pvt->ce_count_available[socket]) {
1379 /* Updates CE counters */
1380
1381 add2 = new2 - pvt->rdimm_last_ce_count[socket][chan][2];
1382 add1 = new1 - pvt->rdimm_last_ce_count[socket][chan][1];
1383 add0 = new0 - pvt->rdimm_last_ce_count[socket][chan][0];
1384
1385 if (add2 < 0)
1386 add2 += 0x7fff;
1387 pvt->rdimm_ce_count[socket][chan][2] += add2;
1388
1389 if (add1 < 0)
1390 add1 += 0x7fff;
1391 pvt->rdimm_ce_count[socket][chan][1] += add1;
1392
1393 if (add0 < 0)
1394 add0 += 0x7fff;
1395 pvt->rdimm_ce_count[socket][chan][0] += add0;
1396 } else
1397 pvt->ce_count_available[socket] = 1;
1398
1399 /* Store the new values */
1400 pvt->rdimm_last_ce_count[socket][chan][2] = new2;
1401 pvt->rdimm_last_ce_count[socket][chan][1] = new1;
1402 pvt->rdimm_last_ce_count[socket][chan][0] = new0;
1403
1404 /*updated the edac core */
1405 if (add0 != 0)
1406 i7core_rdimm_update_csrow(mci, socket, chan, 0, add0);
1407 if (add1 != 0)
1408 i7core_rdimm_update_csrow(mci, socket, chan, 1, add1);
1409 if (add2 != 0)
1410 i7core_rdimm_update_csrow(mci, socket, chan, 2, add2);
1411
1412 }
1413
1414 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci, u8 socket)
1415 {
1416 struct i7core_pvt *pvt = mci->pvt_info;
1417 u32 rcv[3][2];
1418 int i, new0, new1, new2;
1419
1420 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
1421 pci_read_config_dword(pvt->pci_mcr[socket][2], MC_COR_ECC_CNT_0,
1422 &rcv[0][0]);
1423 pci_read_config_dword(pvt->pci_mcr[socket][2], MC_COR_ECC_CNT_1,
1424 &rcv[0][1]);
1425 pci_read_config_dword(pvt->pci_mcr[socket][2], MC_COR_ECC_CNT_2,
1426 &rcv[1][0]);
1427 pci_read_config_dword(pvt->pci_mcr[socket][2], MC_COR_ECC_CNT_3,
1428 &rcv[1][1]);
1429 pci_read_config_dword(pvt->pci_mcr[socket][2], MC_COR_ECC_CNT_4,
1430 &rcv[2][0]);
1431 pci_read_config_dword(pvt->pci_mcr[socket][2], MC_COR_ECC_CNT_5,
1432 &rcv[2][1]);
1433 for (i = 0 ; i < 3; i++) {
1434 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1435 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1436 /*if the channel has 3 dimms*/
1437 if (pvt->channel[socket][i].dimms > 2) {
1438 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1439 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1440 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1441 } else {
1442 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1443 DIMM_BOT_COR_ERR(rcv[i][0]);
1444 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1445 DIMM_BOT_COR_ERR(rcv[i][1]);
1446 new2 = 0;
1447 }
1448
1449 i7core_rdimm_update_ce_count(mci, socket, i, new0, new1, new2);
1450 }
1451 }
1452
1453 /* This function is based on the device 3 function 4 registers as described on:
1454 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1455 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1456 * also available at:
1457 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1458 */
1459 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci, u8 socket)
1460 {
1461 struct i7core_pvt *pvt = mci->pvt_info;
1462 u32 rcv1, rcv0;
1463 int new0, new1, new2;
1464
1465 if (!pvt->pci_mcr[socket][4]) {
1466 debugf0("%s MCR registers not found\n", __func__);
1467 return;
1468 }
1469
1470 /* Corrected test errors */
1471 pci_read_config_dword(pvt->pci_mcr[socket][4], MC_TEST_ERR_RCV1, &rcv1);
1472 pci_read_config_dword(pvt->pci_mcr[socket][4], MC_TEST_ERR_RCV0, &rcv0);
1473
1474 /* Store the new values */
1475 new2 = DIMM2_COR_ERR(rcv1);
1476 new1 = DIMM1_COR_ERR(rcv0);
1477 new0 = DIMM0_COR_ERR(rcv0);
1478
1479 /* Updates CE counters if it is not the first time here */
1480 if (pvt->ce_count_available[socket]) {
1481 /* Updates CE counters */
1482 int add0, add1, add2;
1483
1484 add2 = new2 - pvt->udimm_last_ce_count[socket][2];
1485 add1 = new1 - pvt->udimm_last_ce_count[socket][1];
1486 add0 = new0 - pvt->udimm_last_ce_count[socket][0];
1487
1488 if (add2 < 0)
1489 add2 += 0x7fff;
1490 pvt->udimm_ce_count[socket][2] += add2;
1491
1492 if (add1 < 0)
1493 add1 += 0x7fff;
1494 pvt->udimm_ce_count[socket][1] += add1;
1495
1496 if (add0 < 0)
1497 add0 += 0x7fff;
1498 pvt->udimm_ce_count[socket][0] += add0;
1499
1500 if (add0 | add1 | add2)
1501 i7core_printk(KERN_ERR, "New Corrected error(s): "
1502 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1503 add0, add1, add2);
1504 } else
1505 pvt->ce_count_available[socket] = 1;
1506
1507 /* Store the new values */
1508 pvt->udimm_last_ce_count[socket][2] = new2;
1509 pvt->udimm_last_ce_count[socket][1] = new1;
1510 pvt->udimm_last_ce_count[socket][0] = new0;
1511 }
1512
1513 /*
1514 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1515 * Architectures Software Developer’s Manual Volume 3B.
1516 * Nehalem are defined as family 0x06, model 0x1a
1517 *
1518 * The MCA registers used here are the following ones:
1519 * struct mce field MCA Register
1520 * m->status MSR_IA32_MC8_STATUS
1521 * m->addr MSR_IA32_MC8_ADDR
1522 * m->misc MSR_IA32_MC8_MISC
1523 * In the case of Nehalem, the error information is masked at .status and .misc
1524 * fields
1525 */
1526 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1527 struct mce *m)
1528 {
1529 struct i7core_pvt *pvt = mci->pvt_info;
1530 char *type, *optype, *err, *msg;
1531 unsigned long error = m->status & 0x1ff0000l;
1532 u32 optypenum = (m->status >> 4) & 0x07;
1533 u32 core_err_cnt = (m->status >> 38) && 0x7fff;
1534 u32 dimm = (m->misc >> 16) & 0x3;
1535 u32 channel = (m->misc >> 18) & 0x3;
1536 u32 syndrome = m->misc >> 32;
1537 u32 errnum = find_first_bit(&error, 32);
1538 int csrow;
1539 /* FIXME */
1540 //#ifdef CONFIG_SMP
1541 #if 0
1542 u32 socket_id = per_cpu(cpu_data, cpu).phys_proc_id;
1543 #else
1544 u32 socket_id = 0;
1545 #endif
1546
1547 if (m->mcgstatus & 1)
1548 type = "FATAL";
1549 else
1550 type = "NON_FATAL";
1551
1552 switch (optypenum) {
1553 case 0:
1554 optype = "generic undef request";
1555 break;
1556 case 1:
1557 optype = "read error";
1558 break;
1559 case 2:
1560 optype = "write error";
1561 break;
1562 case 3:
1563 optype = "addr/cmd error";
1564 break;
1565 case 4:
1566 optype = "scrubbing error";
1567 break;
1568 default:
1569 optype = "reserved";
1570 break;
1571 }
1572
1573 switch (errnum) {
1574 case 16:
1575 err = "read ECC error";
1576 break;
1577 case 17:
1578 err = "RAS ECC error";
1579 break;
1580 case 18:
1581 err = "write parity error";
1582 break;
1583 case 19:
1584 err = "redundacy loss";
1585 break;
1586 case 20:
1587 err = "reserved";
1588 break;
1589 case 21:
1590 err = "memory range error";
1591 break;
1592 case 22:
1593 err = "RTID out of range";
1594 break;
1595 case 23:
1596 err = "address parity error";
1597 break;
1598 case 24:
1599 err = "byte enable parity error";
1600 break;
1601 default:
1602 err = "unknown";
1603 }
1604
1605 /* FIXME: should convert addr into bank and rank information */
1606 msg = kasprintf(GFP_ATOMIC,
1607 "%s (addr = 0x%08llx, socket=%d, Dimm=%d, Channel=%d, "
1608 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1609 type, (long long) m->addr, socket_id, dimm, channel,
1610 syndrome, core_err_cnt, (long long)m->status,
1611 (long long)m->misc, optype, err);
1612
1613 debugf0("%s", msg);
1614
1615 if (socket_id < NUM_SOCKETS)
1616 csrow = pvt->csrow_map[socket_id][channel][dimm];
1617 else
1618 csrow = -1;
1619
1620 /* Call the helper to output message */
1621 if (m->mcgstatus & 1)
1622 edac_mc_handle_fbd_ue(mci, csrow, 0,
1623 0 /* FIXME: should be channel here */, msg);
1624 else if (!pvt->is_registered[socket_id])
1625 edac_mc_handle_fbd_ce(mci, csrow,
1626 0 /* FIXME: should be channel here */, msg);
1627
1628 kfree(msg);
1629 }
1630
1631 /*
1632 * i7core_check_error Retrieve and process errors reported by the
1633 * hardware. Called by the Core module.
1634 */
1635 static void i7core_check_error(struct mem_ctl_info *mci)
1636 {
1637 struct i7core_pvt *pvt = mci->pvt_info;
1638 int i;
1639 unsigned count = 0;
1640 struct mce *m = NULL;
1641 unsigned long flags;
1642
1643 /* Copy all mce errors into a temporary buffer */
1644 spin_lock_irqsave(&pvt->mce_lock, flags);
1645 if (pvt->mce_count) {
1646 m = kmalloc(sizeof(*m) * pvt->mce_count, GFP_ATOMIC);
1647 if (m) {
1648 count = pvt->mce_count;
1649 memcpy(m, &pvt->mce_entry, sizeof(*m) * count);
1650 }
1651 pvt->mce_count = 0;
1652 }
1653 spin_unlock_irqrestore(&pvt->mce_lock, flags);
1654
1655 /* proccess mcelog errors */
1656 for (i = 0; i < count; i++)
1657 i7core_mce_output_error(mci, &m[i]);
1658
1659 kfree(m);
1660
1661 /* check memory count errors */
1662 for (i = 0; i < pvt->sockets; i++)
1663 if (!pvt->is_registered[i])
1664 i7core_udimm_check_mc_ecc_err(mci, i);
1665 else
1666 i7core_rdimm_check_mc_ecc_err(mci, i);
1667 }
1668
1669 /*
1670 * i7core_mce_check_error Replicates mcelog routine to get errors
1671 * This routine simply queues mcelog errors, and
1672 * return. The error itself should be handled later
1673 * by i7core_check_error.
1674 */
1675 static int i7core_mce_check_error(void *priv, struct mce *mce)
1676 {
1677 struct mem_ctl_info *mci = priv;
1678 struct i7core_pvt *pvt = mci->pvt_info;
1679 unsigned long flags;
1680
1681 /*
1682 * Just let mcelog handle it if the error is
1683 * outside the memory controller
1684 */
1685 if (((mce->status & 0xffff) >> 7) != 1)
1686 return 0;
1687
1688 /* Bank 8 registers are the only ones that we know how to handle */
1689 if (mce->bank != 8)
1690 return 0;
1691
1692 spin_lock_irqsave(&pvt->mce_lock, flags);
1693 if (pvt->mce_count < MCE_LOG_LEN) {
1694 memcpy(&pvt->mce_entry[pvt->mce_count], mce, sizeof(*mce));
1695 pvt->mce_count++;
1696 }
1697 spin_unlock_irqrestore(&pvt->mce_lock, flags);
1698
1699 /* Handle fatal errors immediately */
1700 if (mce->mcgstatus & 1)
1701 i7core_check_error(mci);
1702
1703 /* Advice mcelog that the error were handled */
1704 return 1;
1705 }
1706
1707 /*
1708 * i7core_probe Probe for ONE instance of device to see if it is
1709 * present.
1710 * return:
1711 * 0 for FOUND a device
1712 * < 0 for error code
1713 */
1714 static int __devinit i7core_probe(struct pci_dev *pdev,
1715 const struct pci_device_id *id)
1716 {
1717 struct mem_ctl_info *mci;
1718 struct i7core_pvt *pvt;
1719 int num_channels = 0;
1720 int num_csrows = 0;
1721 int csrow = 0;
1722 int dev_idx = id->driver_data;
1723 int rc, i;
1724 u8 sockets;
1725
1726 if (unlikely(dev_idx >= ARRAY_SIZE(i7core_devs)))
1727 return -EINVAL;
1728
1729 /* get the pci devices we want to reserve for our use */
1730 rc = i7core_get_devices();
1731 if (unlikely(rc < 0))
1732 return rc;
1733
1734 sockets = 1;
1735 for (i = NUM_SOCKETS - 1; i > 0; i--)
1736 if (pci_devs[0].pdev[i]) {
1737 sockets = i + 1;
1738 break;
1739 }
1740
1741 for (i = 0; i < sockets; i++) {
1742 int channels;
1743 int csrows;
1744
1745 /* Check the number of active and not disabled channels */
1746 rc = i7core_get_active_channels(i, &channels, &csrows);
1747 if (unlikely(rc < 0))
1748 goto fail0;
1749
1750 num_channels += channels;
1751 num_csrows += csrows;
1752 }
1753
1754 /* allocate a new MC control structure */
1755 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
1756 if (unlikely(!mci)) {
1757 rc = -ENOMEM;
1758 goto fail0;
1759 }
1760
1761 debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
1762
1763 mci->dev = &pdev->dev; /* record ptr to the generic device */
1764 pvt = mci->pvt_info;
1765 memset(pvt, 0, sizeof(*pvt));
1766 pvt->sockets = sockets;
1767 mci->mc_idx = 0;
1768
1769 /*
1770 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
1771 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
1772 * memory channels
1773 */
1774 mci->mtype_cap = MEM_FLAG_DDR3;
1775 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1776 mci->edac_cap = EDAC_FLAG_NONE;
1777 mci->mod_name = "i7core_edac.c";
1778 mci->mod_ver = I7CORE_REVISION;
1779 mci->ctl_name = i7core_devs[dev_idx].ctl_name;
1780 mci->dev_name = pci_name(pdev);
1781 mci->ctl_page_to_phys = NULL;
1782 mci->mc_driver_sysfs_attributes = i7core_inj_attrs;
1783 /* Set the function pointer to an actual operation function */
1784 mci->edac_check = i7core_check_error;
1785
1786 /* Store pci devices at mci for faster access */
1787 rc = mci_bind_devs(mci);
1788 if (unlikely(rc < 0))
1789 goto fail1;
1790
1791 /* Get dimm basic config */
1792 for (i = 0; i < sockets; i++)
1793 get_dimm_config(mci, &csrow, i);
1794
1795 /* add this new MC control structure to EDAC's list of MCs */
1796 if (unlikely(edac_mc_add_mc(mci))) {
1797 debugf0("MC: " __FILE__
1798 ": %s(): failed edac_mc_add_mc()\n", __func__);
1799 /* FIXME: perhaps some code should go here that disables error
1800 * reporting if we just enabled it
1801 */
1802
1803 rc = -EINVAL;
1804 goto fail1;
1805 }
1806
1807 /* allocating generic PCI control info */
1808 i7core_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1809 if (unlikely(!i7core_pci)) {
1810 printk(KERN_WARNING
1811 "%s(): Unable to create PCI control\n",
1812 __func__);
1813 printk(KERN_WARNING
1814 "%s(): PCI error report via EDAC not setup\n",
1815 __func__);
1816 }
1817
1818 /* Default error mask is any memory */
1819 pvt->inject.channel = 0;
1820 pvt->inject.dimm = -1;
1821 pvt->inject.rank = -1;
1822 pvt->inject.bank = -1;
1823 pvt->inject.page = -1;
1824 pvt->inject.col = -1;
1825
1826 /* Registers on edac_mce in order to receive memory errors */
1827 pvt->edac_mce.priv = mci;
1828 pvt->edac_mce.check_error = i7core_mce_check_error;
1829 spin_lock_init(&pvt->mce_lock);
1830
1831 rc = edac_mce_register(&pvt->edac_mce);
1832 if (unlikely(rc < 0)) {
1833 debugf0("MC: " __FILE__
1834 ": %s(): failed edac_mce_register()\n", __func__);
1835 goto fail1;
1836 }
1837
1838 i7core_printk(KERN_INFO, "Driver loaded.\n");
1839
1840 return 0;
1841
1842 fail1:
1843 edac_mc_free(mci);
1844
1845 fail0:
1846 i7core_put_devices();
1847 return rc;
1848 }
1849
1850 /*
1851 * i7core_remove destructor for one instance of device
1852 *
1853 */
1854 static void __devexit i7core_remove(struct pci_dev *pdev)
1855 {
1856 struct mem_ctl_info *mci;
1857 struct i7core_pvt *pvt;
1858
1859 debugf0(__FILE__ ": %s()\n", __func__);
1860
1861 if (i7core_pci)
1862 edac_pci_release_generic_ctl(i7core_pci);
1863
1864
1865 mci = edac_mc_del_mc(&pdev->dev);
1866 if (!mci)
1867 return;
1868
1869 /* Unregisters on edac_mce in order to receive memory errors */
1870 pvt = mci->pvt_info;
1871 edac_mce_unregister(&pvt->edac_mce);
1872
1873 /* retrieve references to resources, and free those resources */
1874 i7core_put_devices();
1875
1876 edac_mc_free(mci);
1877 }
1878
1879 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
1880
1881 /*
1882 * i7core_driver pci_driver structure for this module
1883 *
1884 */
1885 static struct pci_driver i7core_driver = {
1886 .name = "i7core_edac",
1887 .probe = i7core_probe,
1888 .remove = __devexit_p(i7core_remove),
1889 .id_table = i7core_pci_tbl,
1890 };
1891
1892 /*
1893 * i7core_init Module entry function
1894 * Try to initialize this module for its devices
1895 */
1896 static int __init i7core_init(void)
1897 {
1898 int pci_rc;
1899
1900 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1901
1902 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1903 opstate_init();
1904
1905 i7core_xeon_pci_fixup();
1906
1907 pci_rc = pci_register_driver(&i7core_driver);
1908
1909 if (pci_rc >= 0)
1910 return 0;
1911
1912 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
1913 pci_rc);
1914
1915 return pci_rc;
1916 }
1917
1918 /*
1919 * i7core_exit() Module exit function
1920 * Unregister the driver
1921 */
1922 static void __exit i7core_exit(void)
1923 {
1924 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1925 pci_unregister_driver(&i7core_driver);
1926 }
1927
1928 module_init(i7core_init);
1929 module_exit(i7core_exit);
1930
1931 MODULE_LICENSE("GPL");
1932 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
1933 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
1934 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
1935 I7CORE_REVISION);
1936
1937 module_param(edac_op_state, int, 0444);
1938 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
This page took 0.075837 seconds and 6 git commands to generate.