Merge branch 'qgroup' of git://git.jan-o-sch.net/btrfs-unstable into for-linus
[deliverable/linux.git] / drivers / edac / edac_device.c
1
2 /*
3 * edac_device.c
4 * (C) 2007 www.douglaskthompson.com
5 *
6 * This file may be distributed under the terms of the
7 * GNU General Public License.
8 *
9 * Written by Doug Thompson <norsk5@xmission.com>
10 *
11 * edac_device API implementation
12 * 19 Jan 2007
13 */
14
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/smp.h>
18 #include <linux/init.h>
19 #include <linux/sysctl.h>
20 #include <linux/highmem.h>
21 #include <linux/timer.h>
22 #include <linux/slab.h>
23 #include <linux/jiffies.h>
24 #include <linux/spinlock.h>
25 #include <linux/list.h>
26 #include <linux/ctype.h>
27 #include <linux/workqueue.h>
28 #include <asm/uaccess.h>
29 #include <asm/page.h>
30
31 #include "edac_core.h"
32 #include "edac_module.h"
33
34 /* lock for the list: 'edac_device_list', manipulation of this list
35 * is protected by the 'device_ctls_mutex' lock
36 */
37 static DEFINE_MUTEX(device_ctls_mutex);
38 static LIST_HEAD(edac_device_list);
39
40 #ifdef CONFIG_EDAC_DEBUG
41 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
42 {
43 debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
44 debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
45 debugf3("\tdev = %p\n", edac_dev->dev);
46 debugf3("\tmod_name:ctl_name = %s:%s\n",
47 edac_dev->mod_name, edac_dev->ctl_name);
48 debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
49 }
50 #endif /* CONFIG_EDAC_DEBUG */
51
52
53 /*
54 * edac_device_alloc_ctl_info()
55 * Allocate a new edac device control info structure
56 *
57 * The control structure is allocated in complete chunk
58 * from the OS. It is in turn sub allocated to the
59 * various objects that compose the structure
60 *
61 * The structure has a 'nr_instance' array within itself.
62 * Each instance represents a major component
63 * Example: L1 cache and L2 cache are 2 instance components
64 *
65 * Within each instance is an array of 'nr_blocks' blockoffsets
66 */
67 struct edac_device_ctl_info *edac_device_alloc_ctl_info(
68 unsigned sz_private,
69 char *edac_device_name, unsigned nr_instances,
70 char *edac_block_name, unsigned nr_blocks,
71 unsigned offset_value, /* zero, 1, or other based offset */
72 struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
73 int device_index)
74 {
75 struct edac_device_ctl_info *dev_ctl;
76 struct edac_device_instance *dev_inst, *inst;
77 struct edac_device_block *dev_blk, *blk_p, *blk;
78 struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
79 unsigned total_size;
80 unsigned count;
81 unsigned instance, block, attr;
82 void *pvt, *p;
83 int err;
84
85 debugf4("%s() instances=%d blocks=%d\n",
86 __func__, nr_instances, nr_blocks);
87
88 /* Calculate the size of memory we need to allocate AND
89 * determine the offsets of the various item arrays
90 * (instance,block,attrib) from the start of an allocated structure.
91 * We want the alignment of each item (instance,block,attrib)
92 * to be at least as stringent as what the compiler would
93 * provide if we could simply hardcode everything into a single struct.
94 */
95 p = NULL;
96 dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
97
98 /* Calc the 'end' offset past end of ONE ctl_info structure
99 * which will become the start of the 'instance' array
100 */
101 dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
102
103 /* Calc the 'end' offset past the instance array within the ctl_info
104 * which will become the start of the block array
105 */
106 count = nr_instances * nr_blocks;
107 dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
108
109 /* Calc the 'end' offset past the dev_blk array
110 * which will become the start of the attrib array, if any.
111 */
112 /* calc how many nr_attrib we need */
113 if (nr_attrib > 0)
114 count *= nr_attrib;
115 dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
116
117 /* Calc the 'end' offset past the attributes array */
118 pvt = edac_align_ptr(&p, sz_private, 1);
119
120 /* 'pvt' now points to where the private data area is.
121 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
122 * is baselined at ZERO
123 */
124 total_size = ((unsigned long)pvt) + sz_private;
125
126 /* Allocate the amount of memory for the set of control structures */
127 dev_ctl = kzalloc(total_size, GFP_KERNEL);
128 if (dev_ctl == NULL)
129 return NULL;
130
131 /* Adjust pointers so they point within the actual memory we
132 * just allocated rather than an imaginary chunk of memory
133 * located at address 0.
134 * 'dev_ctl' points to REAL memory, while the others are
135 * ZERO based and thus need to be adjusted to point within
136 * the allocated memory.
137 */
138 dev_inst = (struct edac_device_instance *)
139 (((char *)dev_ctl) + ((unsigned long)dev_inst));
140 dev_blk = (struct edac_device_block *)
141 (((char *)dev_ctl) + ((unsigned long)dev_blk));
142 dev_attrib = (struct edac_dev_sysfs_block_attribute *)
143 (((char *)dev_ctl) + ((unsigned long)dev_attrib));
144 pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
145
146 /* Begin storing the information into the control info structure */
147 dev_ctl->dev_idx = device_index;
148 dev_ctl->nr_instances = nr_instances;
149 dev_ctl->instances = dev_inst;
150 dev_ctl->pvt_info = pvt;
151
152 /* Default logging of CEs and UEs */
153 dev_ctl->log_ce = 1;
154 dev_ctl->log_ue = 1;
155
156 /* Name of this edac device */
157 snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
158
159 debugf4("%s() edac_dev=%p next after end=%p\n",
160 __func__, dev_ctl, pvt + sz_private );
161
162 /* Initialize every Instance */
163 for (instance = 0; instance < nr_instances; instance++) {
164 inst = &dev_inst[instance];
165 inst->ctl = dev_ctl;
166 inst->nr_blocks = nr_blocks;
167 blk_p = &dev_blk[instance * nr_blocks];
168 inst->blocks = blk_p;
169
170 /* name of this instance */
171 snprintf(inst->name, sizeof(inst->name),
172 "%s%u", edac_device_name, instance);
173
174 /* Initialize every block in each instance */
175 for (block = 0; block < nr_blocks; block++) {
176 blk = &blk_p[block];
177 blk->instance = inst;
178 snprintf(blk->name, sizeof(blk->name),
179 "%s%d", edac_block_name, block+offset_value);
180
181 debugf4("%s() instance=%d inst_p=%p block=#%d "
182 "block_p=%p name='%s'\n",
183 __func__, instance, inst, block,
184 blk, blk->name);
185
186 /* if there are NO attributes OR no attribute pointer
187 * then continue on to next block iteration
188 */
189 if ((nr_attrib == 0) || (attrib_spec == NULL))
190 continue;
191
192 /* setup the attribute array for this block */
193 blk->nr_attribs = nr_attrib;
194 attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
195 blk->block_attributes = attrib_p;
196
197 debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
198 __func__, blk->block_attributes);
199
200 /* Initialize every user specified attribute in this
201 * block with the data the caller passed in
202 * Each block gets its own copy of pointers,
203 * and its unique 'value'
204 */
205 for (attr = 0; attr < nr_attrib; attr++) {
206 attrib = &attrib_p[attr];
207
208 /* populate the unique per attrib
209 * with the code pointers and info
210 */
211 attrib->attr = attrib_spec[attr].attr;
212 attrib->show = attrib_spec[attr].show;
213 attrib->store = attrib_spec[attr].store;
214
215 attrib->block = blk; /* up link */
216
217 debugf4("%s() alloc-attrib=%p attrib_name='%s' "
218 "attrib-spec=%p spec-name=%s\n",
219 __func__, attrib, attrib->attr.name,
220 &attrib_spec[attr],
221 attrib_spec[attr].attr.name
222 );
223 }
224 }
225 }
226
227 /* Mark this instance as merely ALLOCATED */
228 dev_ctl->op_state = OP_ALLOC;
229
230 /*
231 * Initialize the 'root' kobj for the edac_device controller
232 */
233 err = edac_device_register_sysfs_main_kobj(dev_ctl);
234 if (err) {
235 kfree(dev_ctl);
236 return NULL;
237 }
238
239 /* at this point, the root kobj is valid, and in order to
240 * 'free' the object, then the function:
241 * edac_device_unregister_sysfs_main_kobj() must be called
242 * which will perform kobj unregistration and the actual free
243 * will occur during the kobject callback operation
244 */
245
246 return dev_ctl;
247 }
248 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
249
250 /*
251 * edac_device_free_ctl_info()
252 * frees the memory allocated by the edac_device_alloc_ctl_info()
253 * function
254 */
255 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
256 {
257 edac_device_unregister_sysfs_main_kobj(ctl_info);
258 }
259 EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
260
261 /*
262 * find_edac_device_by_dev
263 * scans the edac_device list for a specific 'struct device *'
264 *
265 * lock to be held prior to call: device_ctls_mutex
266 *
267 * Return:
268 * pointer to control structure managing 'dev'
269 * NULL if not found on list
270 */
271 static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
272 {
273 struct edac_device_ctl_info *edac_dev;
274 struct list_head *item;
275
276 debugf0("%s()\n", __func__);
277
278 list_for_each(item, &edac_device_list) {
279 edac_dev = list_entry(item, struct edac_device_ctl_info, link);
280
281 if (edac_dev->dev == dev)
282 return edac_dev;
283 }
284
285 return NULL;
286 }
287
288 /*
289 * add_edac_dev_to_global_list
290 * Before calling this function, caller must
291 * assign a unique value to edac_dev->dev_idx.
292 *
293 * lock to be held prior to call: device_ctls_mutex
294 *
295 * Return:
296 * 0 on success
297 * 1 on failure.
298 */
299 static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
300 {
301 struct list_head *item, *insert_before;
302 struct edac_device_ctl_info *rover;
303
304 insert_before = &edac_device_list;
305
306 /* Determine if already on the list */
307 rover = find_edac_device_by_dev(edac_dev->dev);
308 if (unlikely(rover != NULL))
309 goto fail0;
310
311 /* Insert in ascending order by 'dev_idx', so find position */
312 list_for_each(item, &edac_device_list) {
313 rover = list_entry(item, struct edac_device_ctl_info, link);
314
315 if (rover->dev_idx >= edac_dev->dev_idx) {
316 if (unlikely(rover->dev_idx == edac_dev->dev_idx))
317 goto fail1;
318
319 insert_before = item;
320 break;
321 }
322 }
323
324 list_add_tail_rcu(&edac_dev->link, insert_before);
325 return 0;
326
327 fail0:
328 edac_printk(KERN_WARNING, EDAC_MC,
329 "%s (%s) %s %s already assigned %d\n",
330 dev_name(rover->dev), edac_dev_name(rover),
331 rover->mod_name, rover->ctl_name, rover->dev_idx);
332 return 1;
333
334 fail1:
335 edac_printk(KERN_WARNING, EDAC_MC,
336 "bug in low-level driver: attempt to assign\n"
337 " duplicate dev_idx %d in %s()\n", rover->dev_idx,
338 __func__);
339 return 1;
340 }
341
342 /*
343 * del_edac_device_from_global_list
344 */
345 static void del_edac_device_from_global_list(struct edac_device_ctl_info
346 *edac_device)
347 {
348 list_del_rcu(&edac_device->link);
349
350 /* these are for safe removal of devices from global list while
351 * NMI handlers may be traversing list
352 */
353 synchronize_rcu();
354 INIT_LIST_HEAD(&edac_device->link);
355 }
356
357 /*
358 * edac_device_workq_function
359 * performs the operation scheduled by a workq request
360 *
361 * this workq is embedded within an edac_device_ctl_info
362 * structure, that needs to be polled for possible error events.
363 *
364 * This operation is to acquire the list mutex lock
365 * (thus preventing insertation or deletion)
366 * and then call the device's poll function IFF this device is
367 * running polled and there is a poll function defined.
368 */
369 static void edac_device_workq_function(struct work_struct *work_req)
370 {
371 struct delayed_work *d_work = to_delayed_work(work_req);
372 struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
373
374 mutex_lock(&device_ctls_mutex);
375
376 /* If we are being removed, bail out immediately */
377 if (edac_dev->op_state == OP_OFFLINE) {
378 mutex_unlock(&device_ctls_mutex);
379 return;
380 }
381
382 /* Only poll controllers that are running polled and have a check */
383 if ((edac_dev->op_state == OP_RUNNING_POLL) &&
384 (edac_dev->edac_check != NULL)) {
385 edac_dev->edac_check(edac_dev);
386 }
387
388 mutex_unlock(&device_ctls_mutex);
389
390 /* Reschedule the workq for the next time period to start again
391 * if the number of msec is for 1 sec, then adjust to the next
392 * whole one second to save timers firing all over the period
393 * between integral seconds
394 */
395 if (edac_dev->poll_msec == 1000)
396 queue_delayed_work(edac_workqueue, &edac_dev->work,
397 round_jiffies_relative(edac_dev->delay));
398 else
399 queue_delayed_work(edac_workqueue, &edac_dev->work,
400 edac_dev->delay);
401 }
402
403 /*
404 * edac_device_workq_setup
405 * initialize a workq item for this edac_device instance
406 * passing in the new delay period in msec
407 */
408 void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
409 unsigned msec)
410 {
411 debugf0("%s()\n", __func__);
412
413 /* take the arg 'msec' and set it into the control structure
414 * to used in the time period calculation
415 * then calc the number of jiffies that represents
416 */
417 edac_dev->poll_msec = msec;
418 edac_dev->delay = msecs_to_jiffies(msec);
419
420 INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
421
422 /* optimize here for the 1 second case, which will be normal value, to
423 * fire ON the 1 second time event. This helps reduce all sorts of
424 * timers firing on sub-second basis, while they are happy
425 * to fire together on the 1 second exactly
426 */
427 if (edac_dev->poll_msec == 1000)
428 queue_delayed_work(edac_workqueue, &edac_dev->work,
429 round_jiffies_relative(edac_dev->delay));
430 else
431 queue_delayed_work(edac_workqueue, &edac_dev->work,
432 edac_dev->delay);
433 }
434
435 /*
436 * edac_device_workq_teardown
437 * stop the workq processing on this edac_dev
438 */
439 void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
440 {
441 int status;
442
443 status = cancel_delayed_work(&edac_dev->work);
444 if (status == 0) {
445 /* workq instance might be running, wait for it */
446 flush_workqueue(edac_workqueue);
447 }
448 }
449
450 /*
451 * edac_device_reset_delay_period
452 *
453 * need to stop any outstanding workq queued up at this time
454 * because we will be resetting the sleep time.
455 * Then restart the workq on the new delay
456 */
457 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
458 unsigned long value)
459 {
460 /* cancel the current workq request, without the mutex lock */
461 edac_device_workq_teardown(edac_dev);
462
463 /* acquire the mutex before doing the workq setup */
464 mutex_lock(&device_ctls_mutex);
465
466 /* restart the workq request, with new delay value */
467 edac_device_workq_setup(edac_dev, value);
468
469 mutex_unlock(&device_ctls_mutex);
470 }
471
472 /*
473 * edac_device_alloc_index: Allocate a unique device index number
474 *
475 * Return:
476 * allocated index number
477 */
478 int edac_device_alloc_index(void)
479 {
480 static atomic_t device_indexes = ATOMIC_INIT(0);
481
482 return atomic_inc_return(&device_indexes) - 1;
483 }
484 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
485
486 /**
487 * edac_device_add_device: Insert the 'edac_dev' structure into the
488 * edac_device global list and create sysfs entries associated with
489 * edac_device structure.
490 * @edac_device: pointer to the edac_device structure to be added to the list
491 * 'edac_device' structure.
492 *
493 * Return:
494 * 0 Success
495 * !0 Failure
496 */
497 int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
498 {
499 debugf0("%s()\n", __func__);
500
501 #ifdef CONFIG_EDAC_DEBUG
502 if (edac_debug_level >= 3)
503 edac_device_dump_device(edac_dev);
504 #endif
505 mutex_lock(&device_ctls_mutex);
506
507 if (add_edac_dev_to_global_list(edac_dev))
508 goto fail0;
509
510 /* set load time so that error rate can be tracked */
511 edac_dev->start_time = jiffies;
512
513 /* create this instance's sysfs entries */
514 if (edac_device_create_sysfs(edac_dev)) {
515 edac_device_printk(edac_dev, KERN_WARNING,
516 "failed to create sysfs device\n");
517 goto fail1;
518 }
519
520 /* If there IS a check routine, then we are running POLLED */
521 if (edac_dev->edac_check != NULL) {
522 /* This instance is NOW RUNNING */
523 edac_dev->op_state = OP_RUNNING_POLL;
524
525 /*
526 * enable workq processing on this instance,
527 * default = 1000 msec
528 */
529 edac_device_workq_setup(edac_dev, 1000);
530 } else {
531 edac_dev->op_state = OP_RUNNING_INTERRUPT;
532 }
533
534 /* Report action taken */
535 edac_device_printk(edac_dev, KERN_INFO,
536 "Giving out device to module '%s' controller "
537 "'%s': DEV '%s' (%s)\n",
538 edac_dev->mod_name,
539 edac_dev->ctl_name,
540 edac_dev_name(edac_dev),
541 edac_op_state_to_string(edac_dev->op_state));
542
543 mutex_unlock(&device_ctls_mutex);
544 return 0;
545
546 fail1:
547 /* Some error, so remove the entry from the lsit */
548 del_edac_device_from_global_list(edac_dev);
549
550 fail0:
551 mutex_unlock(&device_ctls_mutex);
552 return 1;
553 }
554 EXPORT_SYMBOL_GPL(edac_device_add_device);
555
556 /**
557 * edac_device_del_device:
558 * Remove sysfs entries for specified edac_device structure and
559 * then remove edac_device structure from global list
560 *
561 * @dev:
562 * Pointer to 'struct device' representing edac_device
563 * structure to remove.
564 *
565 * Return:
566 * Pointer to removed edac_device structure,
567 * OR NULL if device not found.
568 */
569 struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
570 {
571 struct edac_device_ctl_info *edac_dev;
572
573 debugf0("%s()\n", __func__);
574
575 mutex_lock(&device_ctls_mutex);
576
577 /* Find the structure on the list, if not there, then leave */
578 edac_dev = find_edac_device_by_dev(dev);
579 if (edac_dev == NULL) {
580 mutex_unlock(&device_ctls_mutex);
581 return NULL;
582 }
583
584 /* mark this instance as OFFLINE */
585 edac_dev->op_state = OP_OFFLINE;
586
587 /* deregister from global list */
588 del_edac_device_from_global_list(edac_dev);
589
590 mutex_unlock(&device_ctls_mutex);
591
592 /* clear workq processing on this instance */
593 edac_device_workq_teardown(edac_dev);
594
595 /* Tear down the sysfs entries for this instance */
596 edac_device_remove_sysfs(edac_dev);
597
598 edac_printk(KERN_INFO, EDAC_MC,
599 "Removed device %d for %s %s: DEV %s\n",
600 edac_dev->dev_idx,
601 edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
602
603 return edac_dev;
604 }
605 EXPORT_SYMBOL_GPL(edac_device_del_device);
606
607 static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
608 {
609 return edac_dev->log_ce;
610 }
611
612 static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
613 {
614 return edac_dev->log_ue;
615 }
616
617 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
618 *edac_dev)
619 {
620 return edac_dev->panic_on_ue;
621 }
622
623 /*
624 * edac_device_handle_ce
625 * perform a common output and handling of an 'edac_dev' CE event
626 */
627 void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
628 int inst_nr, int block_nr, const char *msg)
629 {
630 struct edac_device_instance *instance;
631 struct edac_device_block *block = NULL;
632
633 if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
634 edac_device_printk(edac_dev, KERN_ERR,
635 "INTERNAL ERROR: 'instance' out of range "
636 "(%d >= %d)\n", inst_nr,
637 edac_dev->nr_instances);
638 return;
639 }
640
641 instance = edac_dev->instances + inst_nr;
642
643 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
644 edac_device_printk(edac_dev, KERN_ERR,
645 "INTERNAL ERROR: instance %d 'block' "
646 "out of range (%d >= %d)\n",
647 inst_nr, block_nr,
648 instance->nr_blocks);
649 return;
650 }
651
652 if (instance->nr_blocks > 0) {
653 block = instance->blocks + block_nr;
654 block->counters.ce_count++;
655 }
656
657 /* Propagate the count up the 'totals' tree */
658 instance->counters.ce_count++;
659 edac_dev->counters.ce_count++;
660
661 if (edac_device_get_log_ce(edac_dev))
662 edac_device_printk(edac_dev, KERN_WARNING,
663 "CE: %s instance: %s block: %s '%s'\n",
664 edac_dev->ctl_name, instance->name,
665 block ? block->name : "N/A", msg);
666 }
667 EXPORT_SYMBOL_GPL(edac_device_handle_ce);
668
669 /*
670 * edac_device_handle_ue
671 * perform a common output and handling of an 'edac_dev' UE event
672 */
673 void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
674 int inst_nr, int block_nr, const char *msg)
675 {
676 struct edac_device_instance *instance;
677 struct edac_device_block *block = NULL;
678
679 if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
680 edac_device_printk(edac_dev, KERN_ERR,
681 "INTERNAL ERROR: 'instance' out of range "
682 "(%d >= %d)\n", inst_nr,
683 edac_dev->nr_instances);
684 return;
685 }
686
687 instance = edac_dev->instances + inst_nr;
688
689 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
690 edac_device_printk(edac_dev, KERN_ERR,
691 "INTERNAL ERROR: instance %d 'block' "
692 "out of range (%d >= %d)\n",
693 inst_nr, block_nr,
694 instance->nr_blocks);
695 return;
696 }
697
698 if (instance->nr_blocks > 0) {
699 block = instance->blocks + block_nr;
700 block->counters.ue_count++;
701 }
702
703 /* Propagate the count up the 'totals' tree */
704 instance->counters.ue_count++;
705 edac_dev->counters.ue_count++;
706
707 if (edac_device_get_log_ue(edac_dev))
708 edac_device_printk(edac_dev, KERN_EMERG,
709 "UE: %s instance: %s block: %s '%s'\n",
710 edac_dev->ctl_name, instance->name,
711 block ? block->name : "N/A", msg);
712
713 if (edac_device_get_panic_on_ue(edac_dev))
714 panic("EDAC %s: UE instance: %s block %s '%s'\n",
715 edac_dev->ctl_name, instance->name,
716 block ? block->name : "N/A", msg);
717 }
718 EXPORT_SYMBOL_GPL(edac_device_handle_ue);
This page took 0.100292 seconds and 5 git commands to generate.