x86, MCE, AMD: Use macros to compute bank MSRs
[deliverable/linux.git] / arch / x86 / kernel / cpu / mcheck / mce_amd.c
CommitLineData
89b831ef 1/*
11122570 2 * (c) 2005-2012 Advanced Micro Devices, Inc.
89b831ef
JS
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
6 *
7 * Written by Jacob Shin - AMD, Inc.
8 *
e6d41e8c 9 * Maintained by: Borislav Petkov <bp@alien8.de>
89b831ef 10 *
95268664
JS
11 * April 2006
12 * - added support for AMD Family 0x10 processors
11122570
BP
13 * May 2012
14 * - major scrubbing
89b831ef 15 *
95268664 16 * All MC4_MISCi registers are shared between multi-cores
89b831ef 17 */
89b831ef 18#include <linux/interrupt.h>
89b831ef 19#include <linux/notifier.h>
1cb2a8e1 20#include <linux/kobject.h>
34fa1967 21#include <linux/percpu.h>
1cb2a8e1
IM
22#include <linux/errno.h>
23#include <linux/sched.h>
89b831ef 24#include <linux/sysfs.h>
5a0e3ad6 25#include <linux/slab.h>
1cb2a8e1
IM
26#include <linux/init.h>
27#include <linux/cpu.h>
28#include <linux/smp.h>
29
019f34fc 30#include <asm/amd_nb.h>
89b831ef 31#include <asm/apic.h>
1cb2a8e1 32#include <asm/idle.h>
89b831ef
JS
33#include <asm/mce.h>
34#include <asm/msr.h>
89b831ef 35
2903ee85
JS
36#define NR_BLOCKS 9
37#define THRESHOLD_MAX 0xFFF
38#define INT_TYPE_APIC 0x00020000
39#define MASK_VALID_HI 0x80000000
24ce0e96
JB
40#define MASK_CNTP_HI 0x40000000
41#define MASK_LOCKED_HI 0x20000000
2903ee85
JS
42#define MASK_LVTOFF_HI 0x00F00000
43#define MASK_COUNT_EN_HI 0x00080000
44#define MASK_INT_TYPE_HI 0x00060000
45#define MASK_OVERFLOW_HI 0x00010000
89b831ef 46#define MASK_ERR_COUNT_HI 0x00000FFF
95268664
JS
47#define MASK_BLKPTR_LO 0xFF000000
48#define MCG_XBLK_ADDR 0xC0000400
89b831ef 49
336d335a
BP
50static const char * const th_names[] = {
51 "load_store",
52 "insn_fetch",
53 "combined_unit",
54 "",
55 "northbridge",
56 "execution_unit",
57};
58
bafcdd3b 59static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
89b831ef
JS
60static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
61
b2762686
AK
62static void amd_threshold_interrupt(void);
63
89b831ef
JS
64/*
65 * CPU Initialization
66 */
67
4cd4601d 68struct thresh_restart {
1cb2a8e1
IM
69 struct threshold_block *b;
70 int reset;
9c37c9d8
RR
71 int set_lvt_off;
72 int lvt_off;
1cb2a8e1 73 u16 old_limit;
4cd4601d
MT
74};
75
c76e8164
BO
76static inline bool is_shared_bank(int bank)
77{
78 /* Bank 4 is for northbridge reporting and is thus shared */
79 return (bank == 4);
80}
81
336d335a
BP
82static const char * const bank4_names(struct threshold_block *b)
83{
84 switch (b->address) {
85 /* MSR4_MISC0 */
86 case 0x00000413:
87 return "dram";
88
89 case 0xc0000408:
90 return "ht_links";
91
92 case 0xc0000409:
93 return "l3_cache";
94
95 default:
96 WARN(1, "Funny MSR: 0x%08x\n", b->address);
97 return "";
98 }
99};
100
101
f227d430
BP
102static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
103{
104 /*
105 * bank 4 supports APIC LVT interrupts implicitly since forever.
106 */
107 if (bank == 4)
108 return true;
109
110 /*
111 * IntP: interrupt present; if this bit is set, the thresholding
112 * bank can generate APIC LVT interrupts
113 */
114 return msr_high_bits & BIT(28);
115}
116
bbaff08d
RR
117static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
118{
119 int msr = (hi & MASK_LVTOFF_HI) >> 20;
120
121 if (apic < 0) {
122 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
123 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
124 b->bank, b->block, b->address, hi, lo);
125 return 0;
126 }
127
128 if (apic != msr) {
129 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
130 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
131 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
132 return 0;
133 }
134
135 return 1;
136};
137
f227d430
BP
138/*
139 * Called via smp_call_function_single(), must be called with correct
140 * cpu affinity.
141 */
a6b6a14e 142static void threshold_restart_bank(void *_tr)
89b831ef 143{
4cd4601d 144 struct thresh_restart *tr = _tr;
7203a049 145 u32 hi, lo;
89b831ef 146
7203a049 147 rdmsr(tr->b->address, lo, hi);
89b831ef 148
7203a049 149 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
4cd4601d 150 tr->reset = 1; /* limit cannot be lower than err count */
89b831ef 151
4cd4601d 152 if (tr->reset) { /* reset err count and overflow bit */
7203a049
RR
153 hi =
154 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
4cd4601d
MT
155 (THRESHOLD_MAX - tr->b->threshold_limit);
156 } else if (tr->old_limit) { /* change limit w/o reset */
7203a049 157 int new_count = (hi & THRESHOLD_MAX) +
4cd4601d 158 (tr->old_limit - tr->b->threshold_limit);
1cb2a8e1 159
7203a049 160 hi = (hi & ~MASK_ERR_COUNT_HI) |
89b831ef
JS
161 (new_count & THRESHOLD_MAX);
162 }
163
f227d430
BP
164 /* clear IntType */
165 hi &= ~MASK_INT_TYPE_HI;
166
167 if (!tr->b->interrupt_capable)
168 goto done;
169
9c37c9d8 170 if (tr->set_lvt_off) {
bbaff08d
RR
171 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
172 /* set new lvt offset */
173 hi &= ~MASK_LVTOFF_HI;
174 hi |= tr->lvt_off << 20;
175 }
9c37c9d8
RR
176 }
177
f227d430
BP
178 if (tr->b->interrupt_enable)
179 hi |= INT_TYPE_APIC;
180
181 done:
89b831ef 182
7203a049
RR
183 hi |= MASK_COUNT_EN_HI;
184 wrmsr(tr->b->address, lo, hi);
89b831ef
JS
185}
186
9c37c9d8
RR
187static void mce_threshold_block_init(struct threshold_block *b, int offset)
188{
189 struct thresh_restart tr = {
190 .b = b,
191 .set_lvt_off = 1,
192 .lvt_off = offset,
193 };
194
195 b->threshold_limit = THRESHOLD_MAX;
196 threshold_restart_bank(&tr);
197};
198
bbaff08d
RR
199static int setup_APIC_mce(int reserved, int new)
200{
201 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
202 APIC_EILVT_MSG_FIX, 0))
203 return new;
204
205 return reserved;
206}
207
95268664 208/* cpu init entry point, called from mce.c with preempt off */
cc3ca220 209void mce_amd_feature_init(struct cpuinfo_x86 *c)
89b831ef 210{
9c37c9d8 211 struct threshold_block b;
89b831ef 212 unsigned int cpu = smp_processor_id();
95268664 213 u32 low = 0, high = 0, address = 0;
1cb2a8e1 214 unsigned int bank, block;
bbaff08d 215 int offset = -1;
89b831ef 216
bafcdd3b 217 for (bank = 0; bank < mca_cfg.banks; ++bank) {
95268664
JS
218 for (block = 0; block < NR_BLOCKS; ++block) {
219 if (block == 0)
4b737d78 220 address = MSR_IA32_MCx_MISC(bank);
24ce0e96
JB
221 else if (block == 1) {
222 address = (low & MASK_BLKPTR_LO) >> 21;
223 if (!address)
224 break;
6dcbfe4f 225
24ce0e96 226 address += MCG_XBLK_ADDR;
1cb2a8e1 227 } else
95268664
JS
228 ++address;
229
230 if (rdmsr_safe(address, &low, &high))
24ce0e96 231 break;
95268664 232
6dcbfe4f
BP
233 if (!(high & MASK_VALID_HI))
234 continue;
95268664 235
24ce0e96
JB
236 if (!(high & MASK_CNTP_HI) ||
237 (high & MASK_LOCKED_HI))
95268664
JS
238 continue;
239
240 if (!block)
241 per_cpu(bank_map, cpu) |= (1 << bank);
141168c3 242
9c37c9d8 243 memset(&b, 0, sizeof(b));
f227d430
BP
244 b.cpu = cpu;
245 b.bank = bank;
246 b.block = block;
247 b.address = address;
248 b.interrupt_capable = lvt_interrupt_supported(bank, high);
249
250 if (b.interrupt_capable) {
251 int new = (high & MASK_LVTOFF_HI) >> 20;
252 offset = setup_APIC_mce(offset, new);
253 }
b2762686 254
9c37c9d8 255 mce_threshold_block_init(&b, offset);
b2762686 256 mce_threshold_vector = amd_threshold_interrupt;
95268664 257 }
89b831ef
JS
258 }
259}
260
261/*
262 * APIC Interrupt Handler
263 */
264
265/*
266 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
267 * the interrupt goes off when error_count reaches threshold_limit.
268 * the handler will simply log mcelog w/ software defined bank number.
269 */
b2762686 270static void amd_threshold_interrupt(void)
89b831ef 271{
1cb2a8e1 272 u32 low = 0, high = 0, address = 0;
95268664 273 unsigned int bank, block;
89b831ef
JS
274 struct mce m;
275
b5f2fa4e 276 mce_setup(&m);
89b831ef
JS
277
278 /* assume first bank caused it */
bafcdd3b 279 for (bank = 0; bank < mca_cfg.banks; ++bank) {
24ce0e96
JB
280 if (!(per_cpu(bank_map, m.cpu) & (1 << bank)))
281 continue;
95268664 282 for (block = 0; block < NR_BLOCKS; ++block) {
1cb2a8e1 283 if (block == 0) {
4b737d78 284 address = MSR_IA32_MCx_MISC(bank);
1cb2a8e1 285 } else if (block == 1) {
24ce0e96
JB
286 address = (low & MASK_BLKPTR_LO) >> 21;
287 if (!address)
288 break;
289 address += MCG_XBLK_ADDR;
1cb2a8e1 290 } else {
95268664 291 ++address;
1cb2a8e1 292 }
95268664
JS
293
294 if (rdmsr_safe(address, &low, &high))
24ce0e96 295 break;
95268664
JS
296
297 if (!(high & MASK_VALID_HI)) {
298 if (block)
299 continue;
300 else
301 break;
302 }
303
24ce0e96
JB
304 if (!(high & MASK_CNTP_HI) ||
305 (high & MASK_LOCKED_HI))
95268664
JS
306 continue;
307
1cb2a8e1
IM
308 /*
309 * Log the machine check that caused the threshold
310 * event.
311 */
ee031c31 312 machine_check_poll(MCP_TIMESTAMP,
89cbc767 313 this_cpu_ptr(&mce_poll_banks));
a98f0dd3 314
95268664
JS
315 if (high & MASK_OVERFLOW_HI) {
316 rdmsrl(address, m.misc);
4b737d78 317 rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status);
95268664
JS
318 m.bank = K8_MCE_THRESHOLD_BASE
319 + bank * NR_BLOCKS
320 + block;
321 mce_log(&m);
b2762686 322 return;
95268664 323 }
89b831ef
JS
324 }
325 }
89b831ef
JS
326}
327
328/*
329 * Sysfs Interface
330 */
331
89b831ef 332struct threshold_attr {
2903ee85 333 struct attribute attr;
1cb2a8e1
IM
334 ssize_t (*show) (struct threshold_block *, char *);
335 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
89b831ef
JS
336};
337
1cb2a8e1
IM
338#define SHOW_FIELDS(name) \
339static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
340{ \
18c20f37 341 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
2903ee85 342}
89b831ef
JS
343SHOW_FIELDS(interrupt_enable)
344SHOW_FIELDS(threshold_limit)
345
1cb2a8e1 346static ssize_t
9319cec8 347store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
89b831ef 348{
4cd4601d 349 struct thresh_restart tr;
1cb2a8e1 350 unsigned long new;
1cb2a8e1 351
f227d430
BP
352 if (!b->interrupt_capable)
353 return -EINVAL;
354
164109e3 355 if (kstrtoul(buf, 0, &new) < 0)
89b831ef 356 return -EINVAL;
1cb2a8e1 357
89b831ef
JS
358 b->interrupt_enable = !!new;
359
9c37c9d8 360 memset(&tr, 0, sizeof(tr));
1cb2a8e1 361 tr.b = b;
1cb2a8e1 362
a6b6a14e 363 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
89b831ef 364
9319cec8 365 return size;
89b831ef
JS
366}
367
1cb2a8e1 368static ssize_t
9319cec8 369store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
89b831ef 370{
4cd4601d 371 struct thresh_restart tr;
1cb2a8e1 372 unsigned long new;
1cb2a8e1 373
164109e3 374 if (kstrtoul(buf, 0, &new) < 0)
89b831ef 375 return -EINVAL;
1cb2a8e1 376
89b831ef
JS
377 if (new > THRESHOLD_MAX)
378 new = THRESHOLD_MAX;
379 if (new < 1)
380 new = 1;
1cb2a8e1 381
9c37c9d8 382 memset(&tr, 0, sizeof(tr));
4cd4601d 383 tr.old_limit = b->threshold_limit;
89b831ef 384 b->threshold_limit = new;
4cd4601d 385 tr.b = b;
89b831ef 386
a6b6a14e 387 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
89b831ef 388
9319cec8 389 return size;
89b831ef
JS
390}
391
4cd4601d
MT
392static ssize_t show_error_count(struct threshold_block *b, char *buf)
393{
2c9c42fa
BP
394 u32 lo, hi;
395
396 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
a6b6a14e 397
2c9c42fa
BP
398 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
399 (THRESHOLD_MAX - b->threshold_limit)));
89b831ef
JS
400}
401
6e927361
BP
402static struct threshold_attr error_count = {
403 .attr = {.name = __stringify(error_count), .mode = 0444 },
404 .show = show_error_count,
405};
89b831ef 406
34fa1967
HS
407#define RW_ATTR(val) \
408static struct threshold_attr val = { \
409 .attr = {.name = __stringify(val), .mode = 0644 }, \
410 .show = show_## val, \
411 .store = store_## val, \
89b831ef
JS
412};
413
2903ee85
JS
414RW_ATTR(interrupt_enable);
415RW_ATTR(threshold_limit);
89b831ef
JS
416
417static struct attribute *default_attrs[] = {
89b831ef
JS
418 &threshold_limit.attr,
419 &error_count.attr,
d26ecc48
BP
420 NULL, /* possibly interrupt_enable if supported, see below */
421 NULL,
89b831ef
JS
422};
423
1cb2a8e1
IM
424#define to_block(k) container_of(k, struct threshold_block, kobj)
425#define to_attr(a) container_of(a, struct threshold_attr, attr)
89b831ef
JS
426
427static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
428{
95268664 429 struct threshold_block *b = to_block(kobj);
89b831ef
JS
430 struct threshold_attr *a = to_attr(attr);
431 ssize_t ret;
1cb2a8e1 432
89b831ef 433 ret = a->show ? a->show(b, buf) : -EIO;
1cb2a8e1 434
89b831ef
JS
435 return ret;
436}
437
438static ssize_t store(struct kobject *kobj, struct attribute *attr,
439 const char *buf, size_t count)
440{
95268664 441 struct threshold_block *b = to_block(kobj);
89b831ef
JS
442 struct threshold_attr *a = to_attr(attr);
443 ssize_t ret;
1cb2a8e1 444
89b831ef 445 ret = a->store ? a->store(b, buf, count) : -EIO;
1cb2a8e1 446
89b831ef
JS
447 return ret;
448}
449
52cf25d0 450static const struct sysfs_ops threshold_ops = {
1cb2a8e1
IM
451 .show = show,
452 .store = store,
89b831ef
JS
453};
454
455static struct kobj_type threshold_ktype = {
1cb2a8e1
IM
456 .sysfs_ops = &threshold_ops,
457 .default_attrs = default_attrs,
89b831ef
JS
458};
459
148f9bb8
PG
460static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
461 unsigned int block, u32 address)
95268664 462{
95268664 463 struct threshold_block *b = NULL;
1cb2a8e1
IM
464 u32 low, high;
465 int err;
95268664 466
bafcdd3b 467 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
95268664
JS
468 return 0;
469
a6b6a14e 470 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
24ce0e96 471 return 0;
95268664
JS
472
473 if (!(high & MASK_VALID_HI)) {
474 if (block)
475 goto recurse;
476 else
477 return 0;
478 }
479
24ce0e96
JB
480 if (!(high & MASK_CNTP_HI) ||
481 (high & MASK_LOCKED_HI))
95268664
JS
482 goto recurse;
483
484 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
485 if (!b)
486 return -ENOMEM;
95268664 487
1cb2a8e1
IM
488 b->block = block;
489 b->bank = bank;
490 b->cpu = cpu;
491 b->address = address;
492 b->interrupt_enable = 0;
f227d430 493 b->interrupt_capable = lvt_interrupt_supported(bank, high);
1cb2a8e1 494 b->threshold_limit = THRESHOLD_MAX;
95268664 495
d26ecc48
BP
496 if (b->interrupt_capable)
497 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
498 else
499 threshold_ktype.default_attrs[2] = NULL;
500
95268664
JS
501 INIT_LIST_HEAD(&b->miscj);
502
1cb2a8e1 503 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
95268664
JS
504 list_add(&b->miscj,
505 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
1cb2a8e1 506 } else {
95268664 507 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
1cb2a8e1 508 }
95268664 509
542eb75a
GKH
510 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
511 per_cpu(threshold_banks, cpu)[bank]->kobj,
336d335a 512 (bank == 4 ? bank4_names(b) : th_names[bank]));
95268664
JS
513 if (err)
514 goto out_free;
515recurse:
516 if (!block) {
517 address = (low & MASK_BLKPTR_LO) >> 21;
518 if (!address)
519 return 0;
520 address += MCG_XBLK_ADDR;
1cb2a8e1 521 } else {
95268664 522 ++address;
1cb2a8e1 523 }
95268664
JS
524
525 err = allocate_threshold_blocks(cpu, bank, ++block, address);
526 if (err)
527 goto out_free;
528
213eca7f
GK
529 if (b)
530 kobject_uevent(&b->kobj, KOBJ_ADD);
542eb75a 531
95268664
JS
532 return err;
533
534out_free:
535 if (b) {
38a382ae 536 kobject_put(&b->kobj);
d9a5ac9e 537 list_del(&b->miscj);
95268664
JS
538 kfree(b);
539 }
540 return err;
541}
542
148f9bb8 543static int __threshold_add_blocks(struct threshold_bank *b)
019f34fc
BP
544{
545 struct list_head *head = &b->blocks->miscj;
546 struct threshold_block *pos = NULL;
547 struct threshold_block *tmp = NULL;
548 int err = 0;
549
550 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
551 if (err)
552 return err;
553
554 list_for_each_entry_safe(pos, tmp, head, miscj) {
555
556 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
557 if (err) {
558 list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
559 kobject_del(&pos->kobj);
560
561 return err;
562 }
563 }
564 return err;
565}
566
148f9bb8 567static int threshold_create_bank(unsigned int cpu, unsigned int bank)
89b831ef 568{
d6126ef5 569 struct device *dev = per_cpu(mce_device, cpu);
019f34fc 570 struct amd_northbridge *nb = NULL;
92e26e2a 571 struct threshold_bank *b = NULL;
336d335a 572 const char *name = th_names[bank];
92e26e2a 573 int err = 0;
95268664 574
c76e8164 575 if (is_shared_bank(bank)) {
019f34fc 576 nb = node_to_amd_nb(amd_get_nb_id(cpu));
019f34fc
BP
577
578 /* threshold descriptor already initialized on this node? */
21c5e50e 579 if (nb && nb->bank4) {
019f34fc
BP
580 /* yes, use it */
581 b = nb->bank4;
582 err = kobject_add(b->kobj, &dev->kobj, name);
583 if (err)
584 goto out;
585
586 per_cpu(threshold_banks, cpu)[bank] = b;
587 atomic_inc(&b->cpus);
588
589 err = __threshold_add_blocks(b);
590
591 goto out;
592 }
593 }
594
95268664 595 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
89b831ef
JS
596 if (!b) {
597 err = -ENOMEM;
598 goto out;
599 }
89b831ef 600
e032d807 601 b->kobj = kobject_create_and_add(name, &dev->kobj);
92e26e2a
BP
602 if (!b->kobj) {
603 err = -EINVAL;
a521cf20 604 goto out_free;
92e26e2a 605 }
95268664 606
89b831ef 607 per_cpu(threshold_banks, cpu)[bank] = b;
95268664 608
c76e8164 609 if (is_shared_bank(bank)) {
019f34fc
BP
610 atomic_set(&b->cpus, 1);
611
612 /* nb is already initialized, see above */
21c5e50e
DB
613 if (nb) {
614 WARN_ON(nb->bank4);
615 nb->bank4 = b;
616 }
019f34fc
BP
617 }
618
4b737d78 619 err = allocate_threshold_blocks(cpu, bank, 0, MSR_IA32_MCx_MISC(bank));
92e26e2a
BP
620 if (!err)
621 goto out;
95268664 622
019f34fc 623 out_free:
95268664 624 kfree(b);
019f34fc
BP
625
626 out:
89b831ef
JS
627 return err;
628}
629
630/* create dir/files for all valid threshold banks */
148f9bb8 631static int threshold_create_device(unsigned int cpu)
89b831ef 632{
2903ee85 633 unsigned int bank;
bafcdd3b 634 struct threshold_bank **bp;
89b831ef
JS
635 int err = 0;
636
bafcdd3b
BO
637 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
638 GFP_KERNEL);
639 if (!bp)
640 return -ENOMEM;
641
642 per_cpu(threshold_banks, cpu) = bp;
643
644 for (bank = 0; bank < mca_cfg.banks; ++bank) {
5a96f4a5 645 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
89b831ef
JS
646 continue;
647 err = threshold_create_bank(cpu, bank);
648 if (err)
0a17941e 649 return err;
89b831ef 650 }
0a17941e 651
89b831ef
JS
652 return err;
653}
654
be6b5a35 655static void deallocate_threshold_block(unsigned int cpu,
95268664
JS
656 unsigned int bank)
657{
658 struct threshold_block *pos = NULL;
659 struct threshold_block *tmp = NULL;
660 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
661
662 if (!head)
663 return;
664
665 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
38a382ae 666 kobject_put(&pos->kobj);
95268664
JS
667 list_del(&pos->miscj);
668 kfree(pos);
669 }
670
671 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
672 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
673}
674
019f34fc
BP
675static void __threshold_remove_blocks(struct threshold_bank *b)
676{
677 struct threshold_block *pos = NULL;
678 struct threshold_block *tmp = NULL;
679
680 kobject_del(b->kobj);
681
682 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
683 kobject_del(&pos->kobj);
684}
685
be6b5a35 686static void threshold_remove_bank(unsigned int cpu, int bank)
89b831ef 687{
019f34fc 688 struct amd_northbridge *nb;
89b831ef 689 struct threshold_bank *b;
89b831ef
JS
690
691 b = per_cpu(threshold_banks, cpu)[bank];
692 if (!b)
693 return;
019f34fc 694
95268664
JS
695 if (!b->blocks)
696 goto free_out;
697
c76e8164 698 if (is_shared_bank(bank)) {
019f34fc
BP
699 if (!atomic_dec_and_test(&b->cpus)) {
700 __threshold_remove_blocks(b);
701 per_cpu(threshold_banks, cpu)[bank] = NULL;
702 return;
703 } else {
704 /*
705 * the last CPU on this node using the shared bank is
706 * going away, remove that bank now.
707 */
708 nb = node_to_amd_nb(amd_get_nb_id(cpu));
709 nb->bank4 = NULL;
710 }
711 }
712
95268664
JS
713 deallocate_threshold_block(cpu, bank);
714
715free_out:
8735728e 716 kobject_del(b->kobj);
38a382ae 717 kobject_put(b->kobj);
95268664
JS
718 kfree(b);
719 per_cpu(threshold_banks, cpu)[bank] = NULL;
89b831ef
JS
720}
721
be6b5a35 722static void threshold_remove_device(unsigned int cpu)
89b831ef 723{
2903ee85 724 unsigned int bank;
89b831ef 725
bafcdd3b 726 for (bank = 0; bank < mca_cfg.banks; ++bank) {
5a96f4a5 727 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
89b831ef
JS
728 continue;
729 threshold_remove_bank(cpu, bank);
730 }
bafcdd3b 731 kfree(per_cpu(threshold_banks, cpu));
89b831ef
JS
732}
733
89b831ef 734/* get notified when a cpu comes on/off */
148f9bb8 735static void
1cb2a8e1 736amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
89b831ef 737{
89b831ef
JS
738 switch (action) {
739 case CPU_ONLINE:
8bb78442 740 case CPU_ONLINE_FROZEN:
89b831ef 741 threshold_create_device(cpu);
89b831ef
JS
742 break;
743 case CPU_DEAD:
8bb78442 744 case CPU_DEAD_FROZEN:
89b831ef
JS
745 threshold_remove_device(cpu);
746 break;
747 default:
748 break;
749 }
89b831ef
JS
750}
751
89b831ef
JS
752static __init int threshold_init_device(void)
753{
2903ee85 754 unsigned lcpu = 0;
89b831ef 755
89b831ef
JS
756 /* to hit CPUs online before the notifier is up */
757 for_each_online_cpu(lcpu) {
fff2e89f 758 int err = threshold_create_device(lcpu);
1cb2a8e1 759
89b831ef 760 if (err)
fff2e89f 761 return err;
89b831ef 762 }
8735728e 763 threshold_cpu_callback = amd_64_threshold_cpu_callback;
1cb2a8e1 764
fff2e89f 765 return 0;
89b831ef 766}
a8fccdb0
LJ
767/*
768 * there are 3 funcs which need to be _initcalled in a logic sequence:
769 * 1. xen_late_init_mcelog
770 * 2. mcheck_init_device
771 * 3. threshold_init_device
772 *
773 * xen_late_init_mcelog must register xen_mce_chrdev_device before
774 * native mce_chrdev_device registration if running under xen platform;
775 *
776 * mcheck_init_device should be inited before threshold_init_device to
777 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
778 *
779 * so we use following _initcalls
780 * 1. device_initcall(xen_late_init_mcelog);
781 * 2. device_initcall_sync(mcheck_init_device);
782 * 3. late_initcall(threshold_init_device);
783 *
784 * when running under xen, the initcall order is 1,2,3;
785 * on baremetal, we skip 1 and we do only 2 and 3.
786 */
787late_initcall(threshold_init_device);
This page took 0.83751 seconds and 5 git commands to generate.