Merge branch 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / edac / edac_mc.c
1 /*
2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <asm/uaccess.h>
32 #include <asm/page.h>
33 #include "edac_core.h"
34 #include "edac_module.h"
35 #include <ras/ras_event.h>
36
37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB
38 #include <asm/edac.h>
39 #else
40 #define edac_atomic_scrub(va, size) do { } while (0)
41 #endif
42
43 /* lock to memory controller's control array */
44 static DEFINE_MUTEX(mem_ctls_mutex);
45 static LIST_HEAD(mc_devices);
46
47 /*
48 * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
49 * apei/ghes and i7core_edac to be used at the same time.
50 */
51 static void const *edac_mc_owner;
52
53 static struct bus_type mc_bus[EDAC_MAX_MCS];
54
55 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
56 unsigned len)
57 {
58 struct mem_ctl_info *mci = dimm->mci;
59 int i, n, count = 0;
60 char *p = buf;
61
62 for (i = 0; i < mci->n_layers; i++) {
63 n = snprintf(p, len, "%s %d ",
64 edac_layer_name[mci->layers[i].type],
65 dimm->location[i]);
66 p += n;
67 len -= n;
68 count += n;
69 if (!len)
70 break;
71 }
72
73 return count;
74 }
75
76 #ifdef CONFIG_EDAC_DEBUG
77
78 static void edac_mc_dump_channel(struct rank_info *chan)
79 {
80 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
81 edac_dbg(4, " channel = %p\n", chan);
82 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
83 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
84 }
85
86 static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
87 {
88 char location[80];
89
90 edac_dimm_info_location(dimm, location, sizeof(location));
91
92 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
93 dimm->mci->csbased ? "rank" : "dimm",
94 number, location, dimm->csrow, dimm->cschannel);
95 edac_dbg(4, " dimm = %p\n", dimm);
96 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
97 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
98 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
99 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
100 }
101
102 static void edac_mc_dump_csrow(struct csrow_info *csrow)
103 {
104 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
105 edac_dbg(4, " csrow = %p\n", csrow);
106 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
107 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
108 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
109 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
110 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
111 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
112 }
113
114 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
115 {
116 edac_dbg(3, "\tmci = %p\n", mci);
117 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
118 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
119 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
120 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
121 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
122 mci->nr_csrows, mci->csrows);
123 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
124 mci->tot_dimms, mci->dimms);
125 edac_dbg(3, "\tdev = %p\n", mci->pdev);
126 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
127 mci->mod_name, mci->ctl_name);
128 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
129 }
130
131 #endif /* CONFIG_EDAC_DEBUG */
132
133 const char * const edac_mem_types[] = {
134 [MEM_EMPTY] = "Empty csrow",
135 [MEM_RESERVED] = "Reserved csrow type",
136 [MEM_UNKNOWN] = "Unknown csrow type",
137 [MEM_FPM] = "Fast page mode RAM",
138 [MEM_EDO] = "Extended data out RAM",
139 [MEM_BEDO] = "Burst Extended data out RAM",
140 [MEM_SDR] = "Single data rate SDRAM",
141 [MEM_RDR] = "Registered single data rate SDRAM",
142 [MEM_DDR] = "Double data rate SDRAM",
143 [MEM_RDDR] = "Registered Double data rate SDRAM",
144 [MEM_RMBS] = "Rambus DRAM",
145 [MEM_DDR2] = "Unbuffered DDR2 RAM",
146 [MEM_FB_DDR2] = "Fully buffered DDR2",
147 [MEM_RDDR2] = "Registered DDR2 RAM",
148 [MEM_XDR] = "Rambus XDR",
149 [MEM_DDR3] = "Unbuffered DDR3 RAM",
150 [MEM_RDDR3] = "Registered DDR3 RAM",
151 [MEM_LRDDR3] = "Load-Reduced DDR3 RAM",
152 [MEM_DDR4] = "Unbuffered DDR4 RAM",
153 [MEM_RDDR4] = "Registered DDR4 RAM",
154 };
155 EXPORT_SYMBOL_GPL(edac_mem_types);
156
157 /**
158 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
159 * @p: pointer to a pointer with the memory offset to be used. At
160 * return, this will be incremented to point to the next offset
161 * @size: Size of the data structure to be reserved
162 * @n_elems: Number of elements that should be reserved
163 *
164 * If 'size' is a constant, the compiler will optimize this whole function
165 * down to either a no-op or the addition of a constant to the value of '*p'.
166 *
167 * The 'p' pointer is absolutely needed to keep the proper advancing
168 * further in memory to the proper offsets when allocating the struct along
169 * with its embedded structs, as edac_device_alloc_ctl_info() does it
170 * above, for example.
171 *
172 * At return, the pointer 'p' will be incremented to be used on a next call
173 * to this function.
174 */
175 void *edac_align_ptr(void **p, unsigned size, int n_elems)
176 {
177 unsigned align, r;
178 void *ptr = *p;
179
180 *p += size * n_elems;
181
182 /*
183 * 'p' can possibly be an unaligned item X such that sizeof(X) is
184 * 'size'. Adjust 'p' so that its alignment is at least as
185 * stringent as what the compiler would provide for X and return
186 * the aligned result.
187 * Here we assume that the alignment of a "long long" is the most
188 * stringent alignment that the compiler will ever provide by default.
189 * As far as I know, this is a reasonable assumption.
190 */
191 if (size > sizeof(long))
192 align = sizeof(long long);
193 else if (size > sizeof(int))
194 align = sizeof(long);
195 else if (size > sizeof(short))
196 align = sizeof(int);
197 else if (size > sizeof(char))
198 align = sizeof(short);
199 else
200 return (char *)ptr;
201
202 r = (unsigned long)p % align;
203
204 if (r == 0)
205 return (char *)ptr;
206
207 *p += align - r;
208
209 return (void *)(((unsigned long)ptr) + align - r);
210 }
211
212 static void _edac_mc_free(struct mem_ctl_info *mci)
213 {
214 int i, chn, row;
215 struct csrow_info *csr;
216 const unsigned int tot_dimms = mci->tot_dimms;
217 const unsigned int tot_channels = mci->num_cschannel;
218 const unsigned int tot_csrows = mci->nr_csrows;
219
220 if (mci->dimms) {
221 for (i = 0; i < tot_dimms; i++)
222 kfree(mci->dimms[i]);
223 kfree(mci->dimms);
224 }
225 if (mci->csrows) {
226 for (row = 0; row < tot_csrows; row++) {
227 csr = mci->csrows[row];
228 if (csr) {
229 if (csr->channels) {
230 for (chn = 0; chn < tot_channels; chn++)
231 kfree(csr->channels[chn]);
232 kfree(csr->channels);
233 }
234 kfree(csr);
235 }
236 }
237 kfree(mci->csrows);
238 }
239 kfree(mci);
240 }
241
242 /**
243 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
244 * @mc_num: Memory controller number
245 * @n_layers: Number of MC hierarchy layers
246 * layers: Describes each layer as seen by the Memory Controller
247 * @size_pvt: size of private storage needed
248 *
249 *
250 * Everything is kmalloc'ed as one big chunk - more efficient.
251 * Only can be used if all structures have the same lifetime - otherwise
252 * you have to allocate and initialize your own structures.
253 *
254 * Use edac_mc_free() to free mc structures allocated by this function.
255 *
256 * NOTE: drivers handle multi-rank memories in different ways: in some
257 * drivers, one multi-rank memory stick is mapped as one entry, while, in
258 * others, a single multi-rank memory stick would be mapped into several
259 * entries. Currently, this function will allocate multiple struct dimm_info
260 * on such scenarios, as grouping the multiple ranks require drivers change.
261 *
262 * Returns:
263 * On failure: NULL
264 * On success: struct mem_ctl_info pointer
265 */
266 struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
267 unsigned n_layers,
268 struct edac_mc_layer *layers,
269 unsigned sz_pvt)
270 {
271 struct mem_ctl_info *mci;
272 struct edac_mc_layer *layer;
273 struct csrow_info *csr;
274 struct rank_info *chan;
275 struct dimm_info *dimm;
276 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
277 unsigned pos[EDAC_MAX_LAYERS];
278 unsigned size, tot_dimms = 1, count = 1;
279 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
280 void *pvt, *p, *ptr = NULL;
281 int i, j, row, chn, n, len, off;
282 bool per_rank = false;
283
284 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
285 /*
286 * Calculate the total amount of dimms and csrows/cschannels while
287 * in the old API emulation mode
288 */
289 for (i = 0; i < n_layers; i++) {
290 tot_dimms *= layers[i].size;
291 if (layers[i].is_virt_csrow)
292 tot_csrows *= layers[i].size;
293 else
294 tot_channels *= layers[i].size;
295
296 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
297 per_rank = true;
298 }
299
300 /* Figure out the offsets of the various items from the start of an mc
301 * structure. We want the alignment of each item to be at least as
302 * stringent as what the compiler would provide if we could simply
303 * hardcode everything into a single struct.
304 */
305 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
306 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
307 for (i = 0; i < n_layers; i++) {
308 count *= layers[i].size;
309 edac_dbg(4, "errcount layer %d size %d\n", i, count);
310 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
311 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
312 tot_errcount += 2 * count;
313 }
314
315 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
316 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
317 size = ((unsigned long)pvt) + sz_pvt;
318
319 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
320 size,
321 tot_dimms,
322 per_rank ? "ranks" : "dimms",
323 tot_csrows * tot_channels);
324
325 mci = kzalloc(size, GFP_KERNEL);
326 if (mci == NULL)
327 return NULL;
328
329 /* Adjust pointers so they point within the memory we just allocated
330 * rather than an imaginary chunk of memory located at address 0.
331 */
332 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
333 for (i = 0; i < n_layers; i++) {
334 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
335 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
336 }
337 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
338
339 /* setup index and various internal pointers */
340 mci->mc_idx = mc_num;
341 mci->tot_dimms = tot_dimms;
342 mci->pvt_info = pvt;
343 mci->n_layers = n_layers;
344 mci->layers = layer;
345 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
346 mci->nr_csrows = tot_csrows;
347 mci->num_cschannel = tot_channels;
348 mci->csbased = per_rank;
349
350 /*
351 * Alocate and fill the csrow/channels structs
352 */
353 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
354 if (!mci->csrows)
355 goto error;
356 for (row = 0; row < tot_csrows; row++) {
357 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
358 if (!csr)
359 goto error;
360 mci->csrows[row] = csr;
361 csr->csrow_idx = row;
362 csr->mci = mci;
363 csr->nr_channels = tot_channels;
364 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
365 GFP_KERNEL);
366 if (!csr->channels)
367 goto error;
368
369 for (chn = 0; chn < tot_channels; chn++) {
370 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
371 if (!chan)
372 goto error;
373 csr->channels[chn] = chan;
374 chan->chan_idx = chn;
375 chan->csrow = csr;
376 }
377 }
378
379 /*
380 * Allocate and fill the dimm structs
381 */
382 mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
383 if (!mci->dimms)
384 goto error;
385
386 memset(&pos, 0, sizeof(pos));
387 row = 0;
388 chn = 0;
389 for (i = 0; i < tot_dimms; i++) {
390 chan = mci->csrows[row]->channels[chn];
391 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
392 if (off < 0 || off >= tot_dimms) {
393 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
394 goto error;
395 }
396
397 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
398 if (!dimm)
399 goto error;
400 mci->dimms[off] = dimm;
401 dimm->mci = mci;
402
403 /*
404 * Copy DIMM location and initialize it.
405 */
406 len = sizeof(dimm->label);
407 p = dimm->label;
408 n = snprintf(p, len, "mc#%u", mc_num);
409 p += n;
410 len -= n;
411 for (j = 0; j < n_layers; j++) {
412 n = snprintf(p, len, "%s#%u",
413 edac_layer_name[layers[j].type],
414 pos[j]);
415 p += n;
416 len -= n;
417 dimm->location[j] = pos[j];
418
419 if (len <= 0)
420 break;
421 }
422
423 /* Link it to the csrows old API data */
424 chan->dimm = dimm;
425 dimm->csrow = row;
426 dimm->cschannel = chn;
427
428 /* Increment csrow location */
429 if (layers[0].is_virt_csrow) {
430 chn++;
431 if (chn == tot_channels) {
432 chn = 0;
433 row++;
434 }
435 } else {
436 row++;
437 if (row == tot_csrows) {
438 row = 0;
439 chn++;
440 }
441 }
442
443 /* Increment dimm location */
444 for (j = n_layers - 1; j >= 0; j--) {
445 pos[j]++;
446 if (pos[j] < layers[j].size)
447 break;
448 pos[j] = 0;
449 }
450 }
451
452 mci->op_state = OP_ALLOC;
453
454 return mci;
455
456 error:
457 _edac_mc_free(mci);
458
459 return NULL;
460 }
461 EXPORT_SYMBOL_GPL(edac_mc_alloc);
462
463 /**
464 * edac_mc_free
465 * 'Free' a previously allocated 'mci' structure
466 * @mci: pointer to a struct mem_ctl_info structure
467 */
468 void edac_mc_free(struct mem_ctl_info *mci)
469 {
470 edac_dbg(1, "\n");
471
472 /* If we're not yet registered with sysfs free only what was allocated
473 * in edac_mc_alloc().
474 */
475 if (!device_is_registered(&mci->dev)) {
476 _edac_mc_free(mci);
477 return;
478 }
479
480 /* the mci instance is freed here, when the sysfs object is dropped */
481 edac_unregister_sysfs(mci);
482 }
483 EXPORT_SYMBOL_GPL(edac_mc_free);
484
485
486 /**
487 * find_mci_by_dev
488 *
489 * scan list of controllers looking for the one that manages
490 * the 'dev' device
491 * @dev: pointer to a struct device related with the MCI
492 */
493 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
494 {
495 struct mem_ctl_info *mci;
496 struct list_head *item;
497
498 edac_dbg(3, "\n");
499
500 list_for_each(item, &mc_devices) {
501 mci = list_entry(item, struct mem_ctl_info, link);
502
503 if (mci->pdev == dev)
504 return mci;
505 }
506
507 return NULL;
508 }
509 EXPORT_SYMBOL_GPL(find_mci_by_dev);
510
511 /*
512 * handler for EDAC to check if NMI type handler has asserted interrupt
513 */
514 static int edac_mc_assert_error_check_and_clear(void)
515 {
516 int old_state;
517
518 if (edac_op_state == EDAC_OPSTATE_POLL)
519 return 1;
520
521 old_state = edac_err_assert;
522 edac_err_assert = 0;
523
524 return old_state;
525 }
526
527 /*
528 * edac_mc_workq_function
529 * performs the operation scheduled by a workq request
530 */
531 static void edac_mc_workq_function(struct work_struct *work_req)
532 {
533 struct delayed_work *d_work = to_delayed_work(work_req);
534 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
535
536 mutex_lock(&mem_ctls_mutex);
537
538 /* if this control struct has movd to offline state, we are done */
539 if (mci->op_state == OP_OFFLINE) {
540 mutex_unlock(&mem_ctls_mutex);
541 return;
542 }
543
544 /* Only poll controllers that are running polled and have a check */
545 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
546 mci->edac_check(mci);
547
548 mutex_unlock(&mem_ctls_mutex);
549
550 /* Reschedule */
551 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
552 }
553
554 /*
555 * edac_mc_workq_setup
556 * initialize a workq item for this mci
557 * passing in the new delay period in msec
558 *
559 * locking model:
560 *
561 * called with the mem_ctls_mutex held
562 */
563 static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
564 {
565 edac_dbg(0, "\n");
566
567 /* if this instance is not in the POLL state, then simply return */
568 if (mci->op_state != OP_RUNNING_POLL)
569 return;
570
571 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
572
573 edac_queue_work(&mci->work, msecs_to_jiffies(msec));
574 }
575
576 /*
577 * edac_mc_workq_teardown
578 * stop the workq processing on this mci
579 *
580 * locking model:
581 *
582 * called WITHOUT lock held
583 */
584 static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
585 {
586 mci->op_state = OP_OFFLINE;
587
588 edac_stop_work(&mci->work);
589 }
590
591 /*
592 * edac_mc_reset_delay_period(unsigned long value)
593 *
594 * user space has updated our poll period value, need to
595 * reset our workq delays
596 */
597 void edac_mc_reset_delay_period(unsigned long value)
598 {
599 struct mem_ctl_info *mci;
600 struct list_head *item;
601
602 mutex_lock(&mem_ctls_mutex);
603
604 list_for_each(item, &mc_devices) {
605 mci = list_entry(item, struct mem_ctl_info, link);
606
607 edac_mod_work(&mci->work, value);
608 }
609 mutex_unlock(&mem_ctls_mutex);
610 }
611
612
613
614 /* Return 0 on success, 1 on failure.
615 * Before calling this function, caller must
616 * assign a unique value to mci->mc_idx.
617 *
618 * locking model:
619 *
620 * called with the mem_ctls_mutex lock held
621 */
622 static int add_mc_to_global_list(struct mem_ctl_info *mci)
623 {
624 struct list_head *item, *insert_before;
625 struct mem_ctl_info *p;
626
627 insert_before = &mc_devices;
628
629 p = find_mci_by_dev(mci->pdev);
630 if (unlikely(p != NULL))
631 goto fail0;
632
633 list_for_each(item, &mc_devices) {
634 p = list_entry(item, struct mem_ctl_info, link);
635
636 if (p->mc_idx >= mci->mc_idx) {
637 if (unlikely(p->mc_idx == mci->mc_idx))
638 goto fail1;
639
640 insert_before = item;
641 break;
642 }
643 }
644
645 list_add_tail_rcu(&mci->link, insert_before);
646 atomic_inc(&edac_handlers);
647 return 0;
648
649 fail0:
650 edac_printk(KERN_WARNING, EDAC_MC,
651 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
652 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
653 return 1;
654
655 fail1:
656 edac_printk(KERN_WARNING, EDAC_MC,
657 "bug in low-level driver: attempt to assign\n"
658 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
659 return 1;
660 }
661
662 static int del_mc_from_global_list(struct mem_ctl_info *mci)
663 {
664 int handlers = atomic_dec_return(&edac_handlers);
665 list_del_rcu(&mci->link);
666
667 /* these are for safe removal of devices from global list while
668 * NMI handlers may be traversing list
669 */
670 synchronize_rcu();
671 INIT_LIST_HEAD(&mci->link);
672
673 return handlers;
674 }
675
676 /**
677 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
678 *
679 * If found, return a pointer to the structure.
680 * Else return NULL.
681 *
682 * Caller must hold mem_ctls_mutex.
683 */
684 struct mem_ctl_info *edac_mc_find(int idx)
685 {
686 struct list_head *item;
687 struct mem_ctl_info *mci;
688
689 list_for_each(item, &mc_devices) {
690 mci = list_entry(item, struct mem_ctl_info, link);
691
692 if (mci->mc_idx >= idx) {
693 if (mci->mc_idx == idx)
694 return mci;
695
696 break;
697 }
698 }
699
700 return NULL;
701 }
702 EXPORT_SYMBOL(edac_mc_find);
703
704 /**
705 * edac_mc_add_mc_with_groups: Insert the 'mci' structure into the mci
706 * global list and create sysfs entries associated with mci structure
707 * @mci: pointer to the mci structure to be added to the list
708 * @groups: optional attribute groups for the driver-specific sysfs entries
709 *
710 * Return:
711 * 0 Success
712 * !0 Failure
713 */
714
715 /* FIXME - should a warning be printed if no error detection? correction? */
716 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
717 const struct attribute_group **groups)
718 {
719 int ret = -EINVAL;
720 edac_dbg(0, "\n");
721
722 if (mci->mc_idx >= EDAC_MAX_MCS) {
723 pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
724 return -ENODEV;
725 }
726
727 #ifdef CONFIG_EDAC_DEBUG
728 if (edac_debug_level >= 3)
729 edac_mc_dump_mci(mci);
730
731 if (edac_debug_level >= 4) {
732 int i;
733
734 for (i = 0; i < mci->nr_csrows; i++) {
735 struct csrow_info *csrow = mci->csrows[i];
736 u32 nr_pages = 0;
737 int j;
738
739 for (j = 0; j < csrow->nr_channels; j++)
740 nr_pages += csrow->channels[j]->dimm->nr_pages;
741 if (!nr_pages)
742 continue;
743 edac_mc_dump_csrow(csrow);
744 for (j = 0; j < csrow->nr_channels; j++)
745 if (csrow->channels[j]->dimm->nr_pages)
746 edac_mc_dump_channel(csrow->channels[j]);
747 }
748 for (i = 0; i < mci->tot_dimms; i++)
749 if (mci->dimms[i]->nr_pages)
750 edac_mc_dump_dimm(mci->dimms[i], i);
751 }
752 #endif
753 mutex_lock(&mem_ctls_mutex);
754
755 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
756 ret = -EPERM;
757 goto fail0;
758 }
759
760 if (add_mc_to_global_list(mci))
761 goto fail0;
762
763 /* set load time so that error rate can be tracked */
764 mci->start_time = jiffies;
765
766 mci->bus = &mc_bus[mci->mc_idx];
767
768 if (edac_create_sysfs_mci_device(mci, groups)) {
769 edac_mc_printk(mci, KERN_WARNING,
770 "failed to create sysfs device\n");
771 goto fail1;
772 }
773
774 /* If there IS a check routine, then we are running POLLED */
775 if (mci->edac_check != NULL) {
776 /* This instance is NOW RUNNING */
777 mci->op_state = OP_RUNNING_POLL;
778
779 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
780 } else {
781 mci->op_state = OP_RUNNING_INTERRUPT;
782 }
783
784 /* Report action taken */
785 edac_mc_printk(mci, KERN_INFO,
786 "Giving out device to module %s controller %s: DEV %s (%s)\n",
787 mci->mod_name, mci->ctl_name, mci->dev_name,
788 edac_op_state_to_string(mci->op_state));
789
790 edac_mc_owner = mci->mod_name;
791
792 mutex_unlock(&mem_ctls_mutex);
793 return 0;
794
795 fail1:
796 del_mc_from_global_list(mci);
797
798 fail0:
799 mutex_unlock(&mem_ctls_mutex);
800 return ret;
801 }
802 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
803
804 /**
805 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
806 * remove mci structure from global list
807 * @pdev: Pointer to 'struct device' representing mci structure to remove.
808 *
809 * Return pointer to removed mci structure, or NULL if device not found.
810 */
811 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
812 {
813 struct mem_ctl_info *mci;
814
815 edac_dbg(0, "\n");
816
817 mutex_lock(&mem_ctls_mutex);
818
819 /* find the requested mci struct in the global list */
820 mci = find_mci_by_dev(dev);
821 if (mci == NULL) {
822 mutex_unlock(&mem_ctls_mutex);
823 return NULL;
824 }
825
826 if (!del_mc_from_global_list(mci))
827 edac_mc_owner = NULL;
828 mutex_unlock(&mem_ctls_mutex);
829
830 /* flush workq processes */
831 edac_mc_workq_teardown(mci);
832
833 /* marking MCI offline */
834 mci->op_state = OP_OFFLINE;
835
836 /* remove from sysfs */
837 edac_remove_sysfs_mci_device(mci);
838
839 edac_printk(KERN_INFO, EDAC_MC,
840 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
841 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
842
843 return mci;
844 }
845 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
846
847 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
848 u32 size)
849 {
850 struct page *pg;
851 void *virt_addr;
852 unsigned long flags = 0;
853
854 edac_dbg(3, "\n");
855
856 /* ECC error page was not in our memory. Ignore it. */
857 if (!pfn_valid(page))
858 return;
859
860 /* Find the actual page structure then map it and fix */
861 pg = pfn_to_page(page);
862
863 if (PageHighMem(pg))
864 local_irq_save(flags);
865
866 virt_addr = kmap_atomic(pg);
867
868 /* Perform architecture specific atomic scrub operation */
869 edac_atomic_scrub(virt_addr + offset, size);
870
871 /* Unmap and complete */
872 kunmap_atomic(virt_addr);
873
874 if (PageHighMem(pg))
875 local_irq_restore(flags);
876 }
877
878 /* FIXME - should return -1 */
879 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
880 {
881 struct csrow_info **csrows = mci->csrows;
882 int row, i, j, n;
883
884 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
885 row = -1;
886
887 for (i = 0; i < mci->nr_csrows; i++) {
888 struct csrow_info *csrow = csrows[i];
889 n = 0;
890 for (j = 0; j < csrow->nr_channels; j++) {
891 struct dimm_info *dimm = csrow->channels[j]->dimm;
892 n += dimm->nr_pages;
893 }
894 if (n == 0)
895 continue;
896
897 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
898 mci->mc_idx,
899 csrow->first_page, page, csrow->last_page,
900 csrow->page_mask);
901
902 if ((page >= csrow->first_page) &&
903 (page <= csrow->last_page) &&
904 ((page & csrow->page_mask) ==
905 (csrow->first_page & csrow->page_mask))) {
906 row = i;
907 break;
908 }
909 }
910
911 if (row == -1)
912 edac_mc_printk(mci, KERN_ERR,
913 "could not look up page error address %lx\n",
914 (unsigned long)page);
915
916 return row;
917 }
918 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
919
920 const char *edac_layer_name[] = {
921 [EDAC_MC_LAYER_BRANCH] = "branch",
922 [EDAC_MC_LAYER_CHANNEL] = "channel",
923 [EDAC_MC_LAYER_SLOT] = "slot",
924 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
925 [EDAC_MC_LAYER_ALL_MEM] = "memory",
926 };
927 EXPORT_SYMBOL_GPL(edac_layer_name);
928
929 static void edac_inc_ce_error(struct mem_ctl_info *mci,
930 bool enable_per_layer_report,
931 const int pos[EDAC_MAX_LAYERS],
932 const u16 count)
933 {
934 int i, index = 0;
935
936 mci->ce_mc += count;
937
938 if (!enable_per_layer_report) {
939 mci->ce_noinfo_count += count;
940 return;
941 }
942
943 for (i = 0; i < mci->n_layers; i++) {
944 if (pos[i] < 0)
945 break;
946 index += pos[i];
947 mci->ce_per_layer[i][index] += count;
948
949 if (i < mci->n_layers - 1)
950 index *= mci->layers[i + 1].size;
951 }
952 }
953
954 static void edac_inc_ue_error(struct mem_ctl_info *mci,
955 bool enable_per_layer_report,
956 const int pos[EDAC_MAX_LAYERS],
957 const u16 count)
958 {
959 int i, index = 0;
960
961 mci->ue_mc += count;
962
963 if (!enable_per_layer_report) {
964 mci->ce_noinfo_count += count;
965 return;
966 }
967
968 for (i = 0; i < mci->n_layers; i++) {
969 if (pos[i] < 0)
970 break;
971 index += pos[i];
972 mci->ue_per_layer[i][index] += count;
973
974 if (i < mci->n_layers - 1)
975 index *= mci->layers[i + 1].size;
976 }
977 }
978
979 static void edac_ce_error(struct mem_ctl_info *mci,
980 const u16 error_count,
981 const int pos[EDAC_MAX_LAYERS],
982 const char *msg,
983 const char *location,
984 const char *label,
985 const char *detail,
986 const char *other_detail,
987 const bool enable_per_layer_report,
988 const unsigned long page_frame_number,
989 const unsigned long offset_in_page,
990 long grain)
991 {
992 unsigned long remapped_page;
993 char *msg_aux = "";
994
995 if (*msg)
996 msg_aux = " ";
997
998 if (edac_mc_get_log_ce()) {
999 if (other_detail && *other_detail)
1000 edac_mc_printk(mci, KERN_WARNING,
1001 "%d CE %s%son %s (%s %s - %s)\n",
1002 error_count, msg, msg_aux, label,
1003 location, detail, other_detail);
1004 else
1005 edac_mc_printk(mci, KERN_WARNING,
1006 "%d CE %s%son %s (%s %s)\n",
1007 error_count, msg, msg_aux, label,
1008 location, detail);
1009 }
1010 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
1011
1012 if (mci->scrub_mode == SCRUB_SW_SRC) {
1013 /*
1014 * Some memory controllers (called MCs below) can remap
1015 * memory so that it is still available at a different
1016 * address when PCI devices map into memory.
1017 * MC's that can't do this, lose the memory where PCI
1018 * devices are mapped. This mapping is MC-dependent
1019 * and so we call back into the MC driver for it to
1020 * map the MC page to a physical (CPU) page which can
1021 * then be mapped to a virtual page - which can then
1022 * be scrubbed.
1023 */
1024 remapped_page = mci->ctl_page_to_phys ?
1025 mci->ctl_page_to_phys(mci, page_frame_number) :
1026 page_frame_number;
1027
1028 edac_mc_scrub_block(remapped_page,
1029 offset_in_page, grain);
1030 }
1031 }
1032
1033 static void edac_ue_error(struct mem_ctl_info *mci,
1034 const u16 error_count,
1035 const int pos[EDAC_MAX_LAYERS],
1036 const char *msg,
1037 const char *location,
1038 const char *label,
1039 const char *detail,
1040 const char *other_detail,
1041 const bool enable_per_layer_report)
1042 {
1043 char *msg_aux = "";
1044
1045 if (*msg)
1046 msg_aux = " ";
1047
1048 if (edac_mc_get_log_ue()) {
1049 if (other_detail && *other_detail)
1050 edac_mc_printk(mci, KERN_WARNING,
1051 "%d UE %s%son %s (%s %s - %s)\n",
1052 error_count, msg, msg_aux, label,
1053 location, detail, other_detail);
1054 else
1055 edac_mc_printk(mci, KERN_WARNING,
1056 "%d UE %s%son %s (%s %s)\n",
1057 error_count, msg, msg_aux, label,
1058 location, detail);
1059 }
1060
1061 if (edac_mc_get_panic_on_ue()) {
1062 if (other_detail && *other_detail)
1063 panic("UE %s%son %s (%s%s - %s)\n",
1064 msg, msg_aux, label, location, detail, other_detail);
1065 else
1066 panic("UE %s%son %s (%s%s)\n",
1067 msg, msg_aux, label, location, detail);
1068 }
1069
1070 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
1071 }
1072
1073 /**
1074 * edac_raw_mc_handle_error - reports a memory event to userspace without doing
1075 * anything to discover the error location
1076 *
1077 * @type: severity of the error (CE/UE/Fatal)
1078 * @mci: a struct mem_ctl_info pointer
1079 * @e: error description
1080 *
1081 * This raw function is used internally by edac_mc_handle_error(). It should
1082 * only be called directly when the hardware error come directly from BIOS,
1083 * like in the case of APEI GHES driver.
1084 */
1085 void edac_raw_mc_handle_error(const enum hw_event_mc_err_type type,
1086 struct mem_ctl_info *mci,
1087 struct edac_raw_error_desc *e)
1088 {
1089 char detail[80];
1090 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
1091
1092 /* Memory type dependent details about the error */
1093 if (type == HW_EVENT_ERR_CORRECTED) {
1094 snprintf(detail, sizeof(detail),
1095 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1096 e->page_frame_number, e->offset_in_page,
1097 e->grain, e->syndrome);
1098 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1099 detail, e->other_detail, e->enable_per_layer_report,
1100 e->page_frame_number, e->offset_in_page, e->grain);
1101 } else {
1102 snprintf(detail, sizeof(detail),
1103 "page:0x%lx offset:0x%lx grain:%ld",
1104 e->page_frame_number, e->offset_in_page, e->grain);
1105
1106 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1107 detail, e->other_detail, e->enable_per_layer_report);
1108 }
1109
1110
1111 }
1112 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
1113
1114 /**
1115 * edac_mc_handle_error - reports a memory event to userspace
1116 *
1117 * @type: severity of the error (CE/UE/Fatal)
1118 * @mci: a struct mem_ctl_info pointer
1119 * @error_count: Number of errors of the same type
1120 * @page_frame_number: mem page where the error occurred
1121 * @offset_in_page: offset of the error inside the page
1122 * @syndrome: ECC syndrome
1123 * @top_layer: Memory layer[0] position
1124 * @mid_layer: Memory layer[1] position
1125 * @low_layer: Memory layer[2] position
1126 * @msg: Message meaningful to the end users that
1127 * explains the event
1128 * @other_detail: Technical details about the event that
1129 * may help hardware manufacturers and
1130 * EDAC developers to analyse the event
1131 */
1132 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1133 struct mem_ctl_info *mci,
1134 const u16 error_count,
1135 const unsigned long page_frame_number,
1136 const unsigned long offset_in_page,
1137 const unsigned long syndrome,
1138 const int top_layer,
1139 const int mid_layer,
1140 const int low_layer,
1141 const char *msg,
1142 const char *other_detail)
1143 {
1144 char *p;
1145 int row = -1, chan = -1;
1146 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1147 int i, n_labels = 0;
1148 u8 grain_bits;
1149 struct edac_raw_error_desc *e = &mci->error_desc;
1150
1151 edac_dbg(3, "MC%d\n", mci->mc_idx);
1152
1153 /* Fills the error report buffer */
1154 memset(e, 0, sizeof (*e));
1155 e->error_count = error_count;
1156 e->top_layer = top_layer;
1157 e->mid_layer = mid_layer;
1158 e->low_layer = low_layer;
1159 e->page_frame_number = page_frame_number;
1160 e->offset_in_page = offset_in_page;
1161 e->syndrome = syndrome;
1162 e->msg = msg;
1163 e->other_detail = other_detail;
1164
1165 /*
1166 * Check if the event report is consistent and if the memory
1167 * location is known. If it is known, enable_per_layer_report will be
1168 * true, the DIMM(s) label info will be filled and the per-layer
1169 * error counters will be incremented.
1170 */
1171 for (i = 0; i < mci->n_layers; i++) {
1172 if (pos[i] >= (int)mci->layers[i].size) {
1173
1174 edac_mc_printk(mci, KERN_ERR,
1175 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1176 edac_layer_name[mci->layers[i].type],
1177 pos[i], mci->layers[i].size);
1178 /*
1179 * Instead of just returning it, let's use what's
1180 * known about the error. The increment routines and
1181 * the DIMM filter logic will do the right thing by
1182 * pointing the likely damaged DIMMs.
1183 */
1184 pos[i] = -1;
1185 }
1186 if (pos[i] >= 0)
1187 e->enable_per_layer_report = true;
1188 }
1189
1190 /*
1191 * Get the dimm label/grain that applies to the match criteria.
1192 * As the error algorithm may not be able to point to just one memory
1193 * stick, the logic here will get all possible labels that could
1194 * pottentially be affected by the error.
1195 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1196 * to have only the MC channel and the MC dimm (also called "branch")
1197 * but the channel is not known, as the memory is arranged in pairs,
1198 * where each memory belongs to a separate channel within the same
1199 * branch.
1200 */
1201 p = e->label;
1202 *p = '\0';
1203
1204 for (i = 0; i < mci->tot_dimms; i++) {
1205 struct dimm_info *dimm = mci->dimms[i];
1206
1207 if (top_layer >= 0 && top_layer != dimm->location[0])
1208 continue;
1209 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1210 continue;
1211 if (low_layer >= 0 && low_layer != dimm->location[2])
1212 continue;
1213
1214 /* get the max grain, over the error match range */
1215 if (dimm->grain > e->grain)
1216 e->grain = dimm->grain;
1217
1218 /*
1219 * If the error is memory-controller wide, there's no need to
1220 * seek for the affected DIMMs because the whole
1221 * channel/memory controller/... may be affected.
1222 * Also, don't show errors for empty DIMM slots.
1223 */
1224 if (e->enable_per_layer_report && dimm->nr_pages) {
1225 if (n_labels >= EDAC_MAX_LABELS) {
1226 e->enable_per_layer_report = false;
1227 break;
1228 }
1229 n_labels++;
1230 if (p != e->label) {
1231 strcpy(p, OTHER_LABEL);
1232 p += strlen(OTHER_LABEL);
1233 }
1234 strcpy(p, dimm->label);
1235 p += strlen(p);
1236 *p = '\0';
1237
1238 /*
1239 * get csrow/channel of the DIMM, in order to allow
1240 * incrementing the compat API counters
1241 */
1242 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1243 mci->csbased ? "rank" : "dimm",
1244 dimm->csrow, dimm->cschannel);
1245 if (row == -1)
1246 row = dimm->csrow;
1247 else if (row >= 0 && row != dimm->csrow)
1248 row = -2;
1249
1250 if (chan == -1)
1251 chan = dimm->cschannel;
1252 else if (chan >= 0 && chan != dimm->cschannel)
1253 chan = -2;
1254 }
1255 }
1256
1257 if (!e->enable_per_layer_report) {
1258 strcpy(e->label, "any memory");
1259 } else {
1260 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1261 if (p == e->label)
1262 strcpy(e->label, "unknown memory");
1263 if (type == HW_EVENT_ERR_CORRECTED) {
1264 if (row >= 0) {
1265 mci->csrows[row]->ce_count += error_count;
1266 if (chan >= 0)
1267 mci->csrows[row]->channels[chan]->ce_count += error_count;
1268 }
1269 } else
1270 if (row >= 0)
1271 mci->csrows[row]->ue_count += error_count;
1272 }
1273
1274 /* Fill the RAM location data */
1275 p = e->location;
1276
1277 for (i = 0; i < mci->n_layers; i++) {
1278 if (pos[i] < 0)
1279 continue;
1280
1281 p += sprintf(p, "%s:%d ",
1282 edac_layer_name[mci->layers[i].type],
1283 pos[i]);
1284 }
1285 if (p > e->location)
1286 *(p - 1) = '\0';
1287
1288 /* Report the error via the trace interface */
1289 grain_bits = fls_long(e->grain) + 1;
1290 trace_mc_event(type, e->msg, e->label, e->error_count,
1291 mci->mc_idx, e->top_layer, e->mid_layer, e->low_layer,
1292 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1293 grain_bits, e->syndrome, e->other_detail);
1294
1295 edac_raw_mc_handle_error(type, mci, e);
1296 }
1297 EXPORT_SYMBOL_GPL(edac_mc_handle_error);
This page took 0.082813 seconds and 6 git commands to generate.