Merge branch 'linus' into x86/xsave
[deliverable/linux.git] / arch / sparc64 / kernel / traps.c
1 /* arch/sparc64/kernel/traps.c
2 *
3 * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7 /*
8 * I like traps on v9, :))))
9 */
10
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/linkage.h>
14 #include <linux/kernel.h>
15 #include <linux/signal.h>
16 #include <linux/smp.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 #include <linux/kdebug.h>
20
21 #include <asm/smp.h>
22 #include <asm/delay.h>
23 #include <asm/system.h>
24 #include <asm/ptrace.h>
25 #include <asm/oplib.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/unistd.h>
29 #include <asm/uaccess.h>
30 #include <asm/fpumacro.h>
31 #include <asm/lsu.h>
32 #include <asm/dcu.h>
33 #include <asm/estate.h>
34 #include <asm/chafsr.h>
35 #include <asm/sfafsr.h>
36 #include <asm/psrcompat.h>
37 #include <asm/processor.h>
38 #include <asm/timer.h>
39 #include <asm/head.h>
40 #include <asm/prom.h>
41
42 #include "entry.h"
43 #include "kstack.h"
44
45 /* When an irrecoverable trap occurs at tl > 0, the trap entry
46 * code logs the trap state registers at every level in the trap
47 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
48 * is as follows:
49 */
50 struct tl1_traplog {
51 struct {
52 unsigned long tstate;
53 unsigned long tpc;
54 unsigned long tnpc;
55 unsigned long tt;
56 } trapstack[4];
57 unsigned long tl;
58 };
59
60 static void dump_tl1_traplog(struct tl1_traplog *p)
61 {
62 int i, limit;
63
64 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
65 "dumping track stack.\n", p->tl);
66
67 limit = (tlb_type == hypervisor) ? 2 : 4;
68 for (i = 0; i < limit; i++) {
69 printk(KERN_EMERG
70 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
71 "TNPC[%016lx] TT[%lx]\n",
72 i + 1,
73 p->trapstack[i].tstate, p->trapstack[i].tpc,
74 p->trapstack[i].tnpc, p->trapstack[i].tt);
75 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
76 }
77 }
78
79 void bad_trap(struct pt_regs *regs, long lvl)
80 {
81 char buffer[32];
82 siginfo_t info;
83
84 if (notify_die(DIE_TRAP, "bad trap", regs,
85 0, lvl, SIGTRAP) == NOTIFY_STOP)
86 return;
87
88 if (lvl < 0x100) {
89 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
90 die_if_kernel(buffer, regs);
91 }
92
93 lvl -= 0x100;
94 if (regs->tstate & TSTATE_PRIV) {
95 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
96 die_if_kernel(buffer, regs);
97 }
98 if (test_thread_flag(TIF_32BIT)) {
99 regs->tpc &= 0xffffffff;
100 regs->tnpc &= 0xffffffff;
101 }
102 info.si_signo = SIGILL;
103 info.si_errno = 0;
104 info.si_code = ILL_ILLTRP;
105 info.si_addr = (void __user *)regs->tpc;
106 info.si_trapno = lvl;
107 force_sig_info(SIGILL, &info, current);
108 }
109
110 void bad_trap_tl1(struct pt_regs *regs, long lvl)
111 {
112 char buffer[32];
113
114 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
115 0, lvl, SIGTRAP) == NOTIFY_STOP)
116 return;
117
118 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
119
120 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
121 die_if_kernel (buffer, regs);
122 }
123
124 #ifdef CONFIG_DEBUG_BUGVERBOSE
125 void do_BUG(const char *file, int line)
126 {
127 bust_spinlocks(1);
128 printk("kernel BUG at %s:%d!\n", file, line);
129 }
130 #endif
131
132 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
133 {
134 siginfo_t info;
135
136 if (notify_die(DIE_TRAP, "instruction access exception", regs,
137 0, 0x8, SIGTRAP) == NOTIFY_STOP)
138 return;
139
140 if (regs->tstate & TSTATE_PRIV) {
141 printk("spitfire_insn_access_exception: SFSR[%016lx] "
142 "SFAR[%016lx], going.\n", sfsr, sfar);
143 die_if_kernel("Iax", regs);
144 }
145 if (test_thread_flag(TIF_32BIT)) {
146 regs->tpc &= 0xffffffff;
147 regs->tnpc &= 0xffffffff;
148 }
149 info.si_signo = SIGSEGV;
150 info.si_errno = 0;
151 info.si_code = SEGV_MAPERR;
152 info.si_addr = (void __user *)regs->tpc;
153 info.si_trapno = 0;
154 force_sig_info(SIGSEGV, &info, current);
155 }
156
157 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
158 {
159 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
160 0, 0x8, SIGTRAP) == NOTIFY_STOP)
161 return;
162
163 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
164 spitfire_insn_access_exception(regs, sfsr, sfar);
165 }
166
167 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
168 {
169 unsigned short type = (type_ctx >> 16);
170 unsigned short ctx = (type_ctx & 0xffff);
171 siginfo_t info;
172
173 if (notify_die(DIE_TRAP, "instruction access exception", regs,
174 0, 0x8, SIGTRAP) == NOTIFY_STOP)
175 return;
176
177 if (regs->tstate & TSTATE_PRIV) {
178 printk("sun4v_insn_access_exception: ADDR[%016lx] "
179 "CTX[%04x] TYPE[%04x], going.\n",
180 addr, ctx, type);
181 die_if_kernel("Iax", regs);
182 }
183
184 if (test_thread_flag(TIF_32BIT)) {
185 regs->tpc &= 0xffffffff;
186 regs->tnpc &= 0xffffffff;
187 }
188 info.si_signo = SIGSEGV;
189 info.si_errno = 0;
190 info.si_code = SEGV_MAPERR;
191 info.si_addr = (void __user *) addr;
192 info.si_trapno = 0;
193 force_sig_info(SIGSEGV, &info, current);
194 }
195
196 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
197 {
198 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
199 0, 0x8, SIGTRAP) == NOTIFY_STOP)
200 return;
201
202 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
203 sun4v_insn_access_exception(regs, addr, type_ctx);
204 }
205
206 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
207 {
208 siginfo_t info;
209
210 if (notify_die(DIE_TRAP, "data access exception", regs,
211 0, 0x30, SIGTRAP) == NOTIFY_STOP)
212 return;
213
214 if (regs->tstate & TSTATE_PRIV) {
215 /* Test if this comes from uaccess places. */
216 const struct exception_table_entry *entry;
217
218 entry = search_exception_tables(regs->tpc);
219 if (entry) {
220 /* Ouch, somebody is trying VM hole tricks on us... */
221 #ifdef DEBUG_EXCEPTIONS
222 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
223 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
224 regs->tpc, entry->fixup);
225 #endif
226 regs->tpc = entry->fixup;
227 regs->tnpc = regs->tpc + 4;
228 return;
229 }
230 /* Shit... */
231 printk("spitfire_data_access_exception: SFSR[%016lx] "
232 "SFAR[%016lx], going.\n", sfsr, sfar);
233 die_if_kernel("Dax", regs);
234 }
235
236 info.si_signo = SIGSEGV;
237 info.si_errno = 0;
238 info.si_code = SEGV_MAPERR;
239 info.si_addr = (void __user *)sfar;
240 info.si_trapno = 0;
241 force_sig_info(SIGSEGV, &info, current);
242 }
243
244 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
245 {
246 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
247 0, 0x30, SIGTRAP) == NOTIFY_STOP)
248 return;
249
250 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
251 spitfire_data_access_exception(regs, sfsr, sfar);
252 }
253
254 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
255 {
256 unsigned short type = (type_ctx >> 16);
257 unsigned short ctx = (type_ctx & 0xffff);
258 siginfo_t info;
259
260 if (notify_die(DIE_TRAP, "data access exception", regs,
261 0, 0x8, SIGTRAP) == NOTIFY_STOP)
262 return;
263
264 if (regs->tstate & TSTATE_PRIV) {
265 printk("sun4v_data_access_exception: ADDR[%016lx] "
266 "CTX[%04x] TYPE[%04x], going.\n",
267 addr, ctx, type);
268 die_if_kernel("Dax", regs);
269 }
270
271 if (test_thread_flag(TIF_32BIT)) {
272 regs->tpc &= 0xffffffff;
273 regs->tnpc &= 0xffffffff;
274 }
275 info.si_signo = SIGSEGV;
276 info.si_errno = 0;
277 info.si_code = SEGV_MAPERR;
278 info.si_addr = (void __user *) addr;
279 info.si_trapno = 0;
280 force_sig_info(SIGSEGV, &info, current);
281 }
282
283 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
284 {
285 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
286 0, 0x8, SIGTRAP) == NOTIFY_STOP)
287 return;
288
289 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
290 sun4v_data_access_exception(regs, addr, type_ctx);
291 }
292
293 #ifdef CONFIG_PCI
294 /* This is really pathetic... */
295 extern volatile int pci_poke_in_progress;
296 extern volatile int pci_poke_cpu;
297 extern volatile int pci_poke_faulted;
298 #endif
299
300 /* When access exceptions happen, we must do this. */
301 static void spitfire_clean_and_reenable_l1_caches(void)
302 {
303 unsigned long va;
304
305 if (tlb_type != spitfire)
306 BUG();
307
308 /* Clean 'em. */
309 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
310 spitfire_put_icache_tag(va, 0x0);
311 spitfire_put_dcache_tag(va, 0x0);
312 }
313
314 /* Re-enable in LSU. */
315 __asm__ __volatile__("flush %%g6\n\t"
316 "membar #Sync\n\t"
317 "stxa %0, [%%g0] %1\n\t"
318 "membar #Sync"
319 : /* no outputs */
320 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
321 LSU_CONTROL_IM | LSU_CONTROL_DM),
322 "i" (ASI_LSU_CONTROL)
323 : "memory");
324 }
325
326 static void spitfire_enable_estate_errors(void)
327 {
328 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
329 "membar #Sync"
330 : /* no outputs */
331 : "r" (ESTATE_ERR_ALL),
332 "i" (ASI_ESTATE_ERROR_EN));
333 }
334
335 static char ecc_syndrome_table[] = {
336 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
337 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
338 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
339 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
340 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
341 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
342 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
343 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
344 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
345 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
346 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
347 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
348 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
349 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
350 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
351 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
352 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
353 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
354 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
355 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
356 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
357 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
358 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
359 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
360 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
361 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
362 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
363 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
364 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
365 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
366 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
367 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
368 };
369
370 static char *syndrome_unknown = "<Unknown>";
371
372 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
373 {
374 unsigned short scode;
375 char memmod_str[64], *p;
376
377 if (udbl & bit) {
378 scode = ecc_syndrome_table[udbl & 0xff];
379 if (prom_getunumber(scode, afar,
380 memmod_str, sizeof(memmod_str)) == -1)
381 p = syndrome_unknown;
382 else
383 p = memmod_str;
384 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
385 "Memory Module \"%s\"\n",
386 smp_processor_id(), scode, p);
387 }
388
389 if (udbh & bit) {
390 scode = ecc_syndrome_table[udbh & 0xff];
391 if (prom_getunumber(scode, afar,
392 memmod_str, sizeof(memmod_str)) == -1)
393 p = syndrome_unknown;
394 else
395 p = memmod_str;
396 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
397 "Memory Module \"%s\"\n",
398 smp_processor_id(), scode, p);
399 }
400
401 }
402
403 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
404 {
405
406 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
407 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
408 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
409
410 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
411
412 /* We always log it, even if someone is listening for this
413 * trap.
414 */
415 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
416 0, TRAP_TYPE_CEE, SIGTRAP);
417
418 /* The Correctable ECC Error trap does not disable I/D caches. So
419 * we only have to restore the ESTATE Error Enable register.
420 */
421 spitfire_enable_estate_errors();
422 }
423
424 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
425 {
426 siginfo_t info;
427
428 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
429 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
430 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
431
432 /* XXX add more human friendly logging of the error status
433 * XXX as is implemented for cheetah
434 */
435
436 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
437
438 /* We always log it, even if someone is listening for this
439 * trap.
440 */
441 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
442 0, tt, SIGTRAP);
443
444 if (regs->tstate & TSTATE_PRIV) {
445 if (tl1)
446 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
447 die_if_kernel("UE", regs);
448 }
449
450 /* XXX need more intelligent processing here, such as is implemented
451 * XXX for cheetah errors, in fact if the E-cache still holds the
452 * XXX line with bad parity this will loop
453 */
454
455 spitfire_clean_and_reenable_l1_caches();
456 spitfire_enable_estate_errors();
457
458 if (test_thread_flag(TIF_32BIT)) {
459 regs->tpc &= 0xffffffff;
460 regs->tnpc &= 0xffffffff;
461 }
462 info.si_signo = SIGBUS;
463 info.si_errno = 0;
464 info.si_code = BUS_OBJERR;
465 info.si_addr = (void *)0;
466 info.si_trapno = 0;
467 force_sig_info(SIGBUS, &info, current);
468 }
469
470 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
471 {
472 unsigned long afsr, tt, udbh, udbl;
473 int tl1;
474
475 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
476 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
477 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
478 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
479 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
480
481 #ifdef CONFIG_PCI
482 if (tt == TRAP_TYPE_DAE &&
483 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
484 spitfire_clean_and_reenable_l1_caches();
485 spitfire_enable_estate_errors();
486
487 pci_poke_faulted = 1;
488 regs->tnpc = regs->tpc + 4;
489 return;
490 }
491 #endif
492
493 if (afsr & SFAFSR_UE)
494 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
495
496 if (tt == TRAP_TYPE_CEE) {
497 /* Handle the case where we took a CEE trap, but ACK'd
498 * only the UE state in the UDB error registers.
499 */
500 if (afsr & SFAFSR_UE) {
501 if (udbh & UDBE_CE) {
502 __asm__ __volatile__(
503 "stxa %0, [%1] %2\n\t"
504 "membar #Sync"
505 : /* no outputs */
506 : "r" (udbh & UDBE_CE),
507 "r" (0x0), "i" (ASI_UDB_ERROR_W));
508 }
509 if (udbl & UDBE_CE) {
510 __asm__ __volatile__(
511 "stxa %0, [%1] %2\n\t"
512 "membar #Sync"
513 : /* no outputs */
514 : "r" (udbl & UDBE_CE),
515 "r" (0x18), "i" (ASI_UDB_ERROR_W));
516 }
517 }
518
519 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
520 }
521 }
522
523 int cheetah_pcache_forced_on;
524
525 void cheetah_enable_pcache(void)
526 {
527 unsigned long dcr;
528
529 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
530 smp_processor_id());
531
532 __asm__ __volatile__("ldxa [%%g0] %1, %0"
533 : "=r" (dcr)
534 : "i" (ASI_DCU_CONTROL_REG));
535 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
536 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
537 "membar #Sync"
538 : /* no outputs */
539 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
540 }
541
542 /* Cheetah error trap handling. */
543 static unsigned long ecache_flush_physbase;
544 static unsigned long ecache_flush_linesize;
545 static unsigned long ecache_flush_size;
546
547 /* This table is ordered in priority of errors and matches the
548 * AFAR overwrite policy as well.
549 */
550
551 struct afsr_error_table {
552 unsigned long mask;
553 const char *name;
554 };
555
556 static const char CHAFSR_PERR_msg[] =
557 "System interface protocol error";
558 static const char CHAFSR_IERR_msg[] =
559 "Internal processor error";
560 static const char CHAFSR_ISAP_msg[] =
561 "System request parity error on incoming addresss";
562 static const char CHAFSR_UCU_msg[] =
563 "Uncorrectable E-cache ECC error for ifetch/data";
564 static const char CHAFSR_UCC_msg[] =
565 "SW Correctable E-cache ECC error for ifetch/data";
566 static const char CHAFSR_UE_msg[] =
567 "Uncorrectable system bus data ECC error for read";
568 static const char CHAFSR_EDU_msg[] =
569 "Uncorrectable E-cache ECC error for stmerge/blkld";
570 static const char CHAFSR_EMU_msg[] =
571 "Uncorrectable system bus MTAG error";
572 static const char CHAFSR_WDU_msg[] =
573 "Uncorrectable E-cache ECC error for writeback";
574 static const char CHAFSR_CPU_msg[] =
575 "Uncorrectable ECC error for copyout";
576 static const char CHAFSR_CE_msg[] =
577 "HW corrected system bus data ECC error for read";
578 static const char CHAFSR_EDC_msg[] =
579 "HW corrected E-cache ECC error for stmerge/blkld";
580 static const char CHAFSR_EMC_msg[] =
581 "HW corrected system bus MTAG ECC error";
582 static const char CHAFSR_WDC_msg[] =
583 "HW corrected E-cache ECC error for writeback";
584 static const char CHAFSR_CPC_msg[] =
585 "HW corrected ECC error for copyout";
586 static const char CHAFSR_TO_msg[] =
587 "Unmapped error from system bus";
588 static const char CHAFSR_BERR_msg[] =
589 "Bus error response from system bus";
590 static const char CHAFSR_IVC_msg[] =
591 "HW corrected system bus data ECC error for ivec read";
592 static const char CHAFSR_IVU_msg[] =
593 "Uncorrectable system bus data ECC error for ivec read";
594 static struct afsr_error_table __cheetah_error_table[] = {
595 { CHAFSR_PERR, CHAFSR_PERR_msg },
596 { CHAFSR_IERR, CHAFSR_IERR_msg },
597 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
598 { CHAFSR_UCU, CHAFSR_UCU_msg },
599 { CHAFSR_UCC, CHAFSR_UCC_msg },
600 { CHAFSR_UE, CHAFSR_UE_msg },
601 { CHAFSR_EDU, CHAFSR_EDU_msg },
602 { CHAFSR_EMU, CHAFSR_EMU_msg },
603 { CHAFSR_WDU, CHAFSR_WDU_msg },
604 { CHAFSR_CPU, CHAFSR_CPU_msg },
605 { CHAFSR_CE, CHAFSR_CE_msg },
606 { CHAFSR_EDC, CHAFSR_EDC_msg },
607 { CHAFSR_EMC, CHAFSR_EMC_msg },
608 { CHAFSR_WDC, CHAFSR_WDC_msg },
609 { CHAFSR_CPC, CHAFSR_CPC_msg },
610 { CHAFSR_TO, CHAFSR_TO_msg },
611 { CHAFSR_BERR, CHAFSR_BERR_msg },
612 /* These two do not update the AFAR. */
613 { CHAFSR_IVC, CHAFSR_IVC_msg },
614 { CHAFSR_IVU, CHAFSR_IVU_msg },
615 { 0, NULL },
616 };
617 static const char CHPAFSR_DTO_msg[] =
618 "System bus unmapped error for prefetch/storequeue-read";
619 static const char CHPAFSR_DBERR_msg[] =
620 "System bus error for prefetch/storequeue-read";
621 static const char CHPAFSR_THCE_msg[] =
622 "Hardware corrected E-cache Tag ECC error";
623 static const char CHPAFSR_TSCE_msg[] =
624 "SW handled correctable E-cache Tag ECC error";
625 static const char CHPAFSR_TUE_msg[] =
626 "Uncorrectable E-cache Tag ECC error";
627 static const char CHPAFSR_DUE_msg[] =
628 "System bus uncorrectable data ECC error due to prefetch/store-fill";
629 static struct afsr_error_table __cheetah_plus_error_table[] = {
630 { CHAFSR_PERR, CHAFSR_PERR_msg },
631 { CHAFSR_IERR, CHAFSR_IERR_msg },
632 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
633 { CHAFSR_UCU, CHAFSR_UCU_msg },
634 { CHAFSR_UCC, CHAFSR_UCC_msg },
635 { CHAFSR_UE, CHAFSR_UE_msg },
636 { CHAFSR_EDU, CHAFSR_EDU_msg },
637 { CHAFSR_EMU, CHAFSR_EMU_msg },
638 { CHAFSR_WDU, CHAFSR_WDU_msg },
639 { CHAFSR_CPU, CHAFSR_CPU_msg },
640 { CHAFSR_CE, CHAFSR_CE_msg },
641 { CHAFSR_EDC, CHAFSR_EDC_msg },
642 { CHAFSR_EMC, CHAFSR_EMC_msg },
643 { CHAFSR_WDC, CHAFSR_WDC_msg },
644 { CHAFSR_CPC, CHAFSR_CPC_msg },
645 { CHAFSR_TO, CHAFSR_TO_msg },
646 { CHAFSR_BERR, CHAFSR_BERR_msg },
647 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
648 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
649 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
650 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
651 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
652 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
653 /* These two do not update the AFAR. */
654 { CHAFSR_IVC, CHAFSR_IVC_msg },
655 { CHAFSR_IVU, CHAFSR_IVU_msg },
656 { 0, NULL },
657 };
658 static const char JPAFSR_JETO_msg[] =
659 "System interface protocol error, hw timeout caused";
660 static const char JPAFSR_SCE_msg[] =
661 "Parity error on system snoop results";
662 static const char JPAFSR_JEIC_msg[] =
663 "System interface protocol error, illegal command detected";
664 static const char JPAFSR_JEIT_msg[] =
665 "System interface protocol error, illegal ADTYPE detected";
666 static const char JPAFSR_OM_msg[] =
667 "Out of range memory error has occurred";
668 static const char JPAFSR_ETP_msg[] =
669 "Parity error on L2 cache tag SRAM";
670 static const char JPAFSR_UMS_msg[] =
671 "Error due to unsupported store";
672 static const char JPAFSR_RUE_msg[] =
673 "Uncorrectable ECC error from remote cache/memory";
674 static const char JPAFSR_RCE_msg[] =
675 "Correctable ECC error from remote cache/memory";
676 static const char JPAFSR_BP_msg[] =
677 "JBUS parity error on returned read data";
678 static const char JPAFSR_WBP_msg[] =
679 "JBUS parity error on data for writeback or block store";
680 static const char JPAFSR_FRC_msg[] =
681 "Foreign read to DRAM incurring correctable ECC error";
682 static const char JPAFSR_FRU_msg[] =
683 "Foreign read to DRAM incurring uncorrectable ECC error";
684 static struct afsr_error_table __jalapeno_error_table[] = {
685 { JPAFSR_JETO, JPAFSR_JETO_msg },
686 { JPAFSR_SCE, JPAFSR_SCE_msg },
687 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
688 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
689 { CHAFSR_PERR, CHAFSR_PERR_msg },
690 { CHAFSR_IERR, CHAFSR_IERR_msg },
691 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
692 { CHAFSR_UCU, CHAFSR_UCU_msg },
693 { CHAFSR_UCC, CHAFSR_UCC_msg },
694 { CHAFSR_UE, CHAFSR_UE_msg },
695 { CHAFSR_EDU, CHAFSR_EDU_msg },
696 { JPAFSR_OM, JPAFSR_OM_msg },
697 { CHAFSR_WDU, CHAFSR_WDU_msg },
698 { CHAFSR_CPU, CHAFSR_CPU_msg },
699 { CHAFSR_CE, CHAFSR_CE_msg },
700 { CHAFSR_EDC, CHAFSR_EDC_msg },
701 { JPAFSR_ETP, JPAFSR_ETP_msg },
702 { CHAFSR_WDC, CHAFSR_WDC_msg },
703 { CHAFSR_CPC, CHAFSR_CPC_msg },
704 { CHAFSR_TO, CHAFSR_TO_msg },
705 { CHAFSR_BERR, CHAFSR_BERR_msg },
706 { JPAFSR_UMS, JPAFSR_UMS_msg },
707 { JPAFSR_RUE, JPAFSR_RUE_msg },
708 { JPAFSR_RCE, JPAFSR_RCE_msg },
709 { JPAFSR_BP, JPAFSR_BP_msg },
710 { JPAFSR_WBP, JPAFSR_WBP_msg },
711 { JPAFSR_FRC, JPAFSR_FRC_msg },
712 { JPAFSR_FRU, JPAFSR_FRU_msg },
713 /* These two do not update the AFAR. */
714 { CHAFSR_IVU, CHAFSR_IVU_msg },
715 { 0, NULL },
716 };
717 static struct afsr_error_table *cheetah_error_table;
718 static unsigned long cheetah_afsr_errors;
719
720 struct cheetah_err_info *cheetah_error_log;
721
722 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
723 {
724 struct cheetah_err_info *p;
725 int cpu = smp_processor_id();
726
727 if (!cheetah_error_log)
728 return NULL;
729
730 p = cheetah_error_log + (cpu * 2);
731 if ((afsr & CHAFSR_TL1) != 0UL)
732 p++;
733
734 return p;
735 }
736
737 extern unsigned int tl0_icpe[], tl1_icpe[];
738 extern unsigned int tl0_dcpe[], tl1_dcpe[];
739 extern unsigned int tl0_fecc[], tl1_fecc[];
740 extern unsigned int tl0_cee[], tl1_cee[];
741 extern unsigned int tl0_iae[], tl1_iae[];
742 extern unsigned int tl0_dae[], tl1_dae[];
743 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
744 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
745 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
746 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
747 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
748
749 void __init cheetah_ecache_flush_init(void)
750 {
751 unsigned long largest_size, smallest_linesize, order, ver;
752 int i, sz;
753
754 /* Scan all cpu device tree nodes, note two values:
755 * 1) largest E-cache size
756 * 2) smallest E-cache line size
757 */
758 largest_size = 0UL;
759 smallest_linesize = ~0UL;
760
761 for (i = 0; i < NR_CPUS; i++) {
762 unsigned long val;
763
764 val = cpu_data(i).ecache_size;
765 if (!val)
766 continue;
767
768 if (val > largest_size)
769 largest_size = val;
770
771 val = cpu_data(i).ecache_line_size;
772 if (val < smallest_linesize)
773 smallest_linesize = val;
774
775 }
776
777 if (largest_size == 0UL || smallest_linesize == ~0UL) {
778 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
779 "parameters.\n");
780 prom_halt();
781 }
782
783 ecache_flush_size = (2 * largest_size);
784 ecache_flush_linesize = smallest_linesize;
785
786 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
787
788 if (ecache_flush_physbase == ~0UL) {
789 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
790 "contiguous physical memory.\n",
791 ecache_flush_size);
792 prom_halt();
793 }
794
795 /* Now allocate error trap reporting scoreboard. */
796 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
797 for (order = 0; order < MAX_ORDER; order++) {
798 if ((PAGE_SIZE << order) >= sz)
799 break;
800 }
801 cheetah_error_log = (struct cheetah_err_info *)
802 __get_free_pages(GFP_KERNEL, order);
803 if (!cheetah_error_log) {
804 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
805 "error logging scoreboard (%d bytes).\n", sz);
806 prom_halt();
807 }
808 memset(cheetah_error_log, 0, PAGE_SIZE << order);
809
810 /* Mark all AFSRs as invalid so that the trap handler will
811 * log new new information there.
812 */
813 for (i = 0; i < 2 * NR_CPUS; i++)
814 cheetah_error_log[i].afsr = CHAFSR_INVALID;
815
816 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
817 if ((ver >> 32) == __JALAPENO_ID ||
818 (ver >> 32) == __SERRANO_ID) {
819 cheetah_error_table = &__jalapeno_error_table[0];
820 cheetah_afsr_errors = JPAFSR_ERRORS;
821 } else if ((ver >> 32) == 0x003e0015) {
822 cheetah_error_table = &__cheetah_plus_error_table[0];
823 cheetah_afsr_errors = CHPAFSR_ERRORS;
824 } else {
825 cheetah_error_table = &__cheetah_error_table[0];
826 cheetah_afsr_errors = CHAFSR_ERRORS;
827 }
828
829 /* Now patch trap tables. */
830 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
831 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
832 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
833 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
834 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
835 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
836 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
837 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
838 if (tlb_type == cheetah_plus) {
839 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
840 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
841 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
842 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
843 }
844 flushi(PAGE_OFFSET);
845 }
846
847 static void cheetah_flush_ecache(void)
848 {
849 unsigned long flush_base = ecache_flush_physbase;
850 unsigned long flush_linesize = ecache_flush_linesize;
851 unsigned long flush_size = ecache_flush_size;
852
853 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
854 " bne,pt %%xcc, 1b\n\t"
855 " ldxa [%2 + %0] %3, %%g0\n\t"
856 : "=&r" (flush_size)
857 : "0" (flush_size), "r" (flush_base),
858 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
859 }
860
861 static void cheetah_flush_ecache_line(unsigned long physaddr)
862 {
863 unsigned long alias;
864
865 physaddr &= ~(8UL - 1UL);
866 physaddr = (ecache_flush_physbase +
867 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
868 alias = physaddr + (ecache_flush_size >> 1UL);
869 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
870 "ldxa [%1] %2, %%g0\n\t"
871 "membar #Sync"
872 : /* no outputs */
873 : "r" (physaddr), "r" (alias),
874 "i" (ASI_PHYS_USE_EC));
875 }
876
877 /* Unfortunately, the diagnostic access to the I-cache tags we need to
878 * use to clear the thing interferes with I-cache coherency transactions.
879 *
880 * So we must only flush the I-cache when it is disabled.
881 */
882 static void __cheetah_flush_icache(void)
883 {
884 unsigned int icache_size, icache_line_size;
885 unsigned long addr;
886
887 icache_size = local_cpu_data().icache_size;
888 icache_line_size = local_cpu_data().icache_line_size;
889
890 /* Clear the valid bits in all the tags. */
891 for (addr = 0; addr < icache_size; addr += icache_line_size) {
892 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
893 "membar #Sync"
894 : /* no outputs */
895 : "r" (addr | (2 << 3)),
896 "i" (ASI_IC_TAG));
897 }
898 }
899
900 static void cheetah_flush_icache(void)
901 {
902 unsigned long dcu_save;
903
904 /* Save current DCU, disable I-cache. */
905 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
906 "or %0, %2, %%g1\n\t"
907 "stxa %%g1, [%%g0] %1\n\t"
908 "membar #Sync"
909 : "=r" (dcu_save)
910 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
911 : "g1");
912
913 __cheetah_flush_icache();
914
915 /* Restore DCU register */
916 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
917 "membar #Sync"
918 : /* no outputs */
919 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
920 }
921
922 static void cheetah_flush_dcache(void)
923 {
924 unsigned int dcache_size, dcache_line_size;
925 unsigned long addr;
926
927 dcache_size = local_cpu_data().dcache_size;
928 dcache_line_size = local_cpu_data().dcache_line_size;
929
930 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
931 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
932 "membar #Sync"
933 : /* no outputs */
934 : "r" (addr), "i" (ASI_DCACHE_TAG));
935 }
936 }
937
938 /* In order to make the even parity correct we must do two things.
939 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
940 * Next, we clear out all 32-bytes of data for that line. Data of
941 * all-zero + tag parity value of zero == correct parity.
942 */
943 static void cheetah_plus_zap_dcache_parity(void)
944 {
945 unsigned int dcache_size, dcache_line_size;
946 unsigned long addr;
947
948 dcache_size = local_cpu_data().dcache_size;
949 dcache_line_size = local_cpu_data().dcache_line_size;
950
951 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
952 unsigned long tag = (addr >> 14);
953 unsigned long line;
954
955 __asm__ __volatile__("membar #Sync\n\t"
956 "stxa %0, [%1] %2\n\t"
957 "membar #Sync"
958 : /* no outputs */
959 : "r" (tag), "r" (addr),
960 "i" (ASI_DCACHE_UTAG));
961 for (line = addr; line < addr + dcache_line_size; line += 8)
962 __asm__ __volatile__("membar #Sync\n\t"
963 "stxa %%g0, [%0] %1\n\t"
964 "membar #Sync"
965 : /* no outputs */
966 : "r" (line),
967 "i" (ASI_DCACHE_DATA));
968 }
969 }
970
971 /* Conversion tables used to frob Cheetah AFSR syndrome values into
972 * something palatable to the memory controller driver get_unumber
973 * routine.
974 */
975 #define MT0 137
976 #define MT1 138
977 #define MT2 139
978 #define NONE 254
979 #define MTC0 140
980 #define MTC1 141
981 #define MTC2 142
982 #define MTC3 143
983 #define C0 128
984 #define C1 129
985 #define C2 130
986 #define C3 131
987 #define C4 132
988 #define C5 133
989 #define C6 134
990 #define C7 135
991 #define C8 136
992 #define M2 144
993 #define M3 145
994 #define M4 146
995 #define M 147
996 static unsigned char cheetah_ecc_syntab[] = {
997 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
998 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
999 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1000 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1001 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1002 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1003 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1004 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1005 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1006 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1007 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1008 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1009 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1010 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1011 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1012 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1013 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1014 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1015 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1016 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1017 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1018 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1019 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1020 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1021 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1022 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1023 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1024 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1025 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1026 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1027 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1028 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1029 };
1030 static unsigned char cheetah_mtag_syntab[] = {
1031 NONE, MTC0,
1032 MTC1, NONE,
1033 MTC2, NONE,
1034 NONE, MT0,
1035 MTC3, NONE,
1036 NONE, MT1,
1037 NONE, MT2,
1038 NONE, NONE
1039 };
1040
1041 /* Return the highest priority error conditon mentioned. */
1042 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1043 {
1044 unsigned long tmp = 0;
1045 int i;
1046
1047 for (i = 0; cheetah_error_table[i].mask; i++) {
1048 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1049 return tmp;
1050 }
1051 return tmp;
1052 }
1053
1054 static const char *cheetah_get_string(unsigned long bit)
1055 {
1056 int i;
1057
1058 for (i = 0; cheetah_error_table[i].mask; i++) {
1059 if ((bit & cheetah_error_table[i].mask) != 0UL)
1060 return cheetah_error_table[i].name;
1061 }
1062 return "???";
1063 }
1064
1065 extern int chmc_getunumber(int, unsigned long, char *, int);
1066
1067 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1068 unsigned long afsr, unsigned long afar, int recoverable)
1069 {
1070 unsigned long hipri;
1071 char unum[256];
1072
1073 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1074 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1075 afsr, afar,
1076 (afsr & CHAFSR_TL1) ? 1 : 0);
1077 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1078 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1079 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1080 printk("%s" "ERROR(%d): ",
1081 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1082 printk("TPC<%pS>\n", (void *) regs->tpc);
1083 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1084 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1085 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1086 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1087 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1088 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1089 hipri = cheetah_get_hipri(afsr);
1090 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1091 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1092 hipri, cheetah_get_string(hipri));
1093
1094 /* Try to get unumber if relevant. */
1095 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1096 CHAFSR_CPC | CHAFSR_CPU | \
1097 CHAFSR_UE | CHAFSR_CE | \
1098 CHAFSR_EDC | CHAFSR_EDU | \
1099 CHAFSR_UCC | CHAFSR_UCU | \
1100 CHAFSR_WDU | CHAFSR_WDC)
1101 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1102 if (afsr & ESYND_ERRORS) {
1103 int syndrome;
1104 int ret;
1105
1106 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1107 syndrome = cheetah_ecc_syntab[syndrome];
1108 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1109 if (ret != -1)
1110 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1111 (recoverable ? KERN_WARNING : KERN_CRIT),
1112 smp_processor_id(), unum);
1113 } else if (afsr & MSYND_ERRORS) {
1114 int syndrome;
1115 int ret;
1116
1117 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1118 syndrome = cheetah_mtag_syntab[syndrome];
1119 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1120 if (ret != -1)
1121 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1122 (recoverable ? KERN_WARNING : KERN_CRIT),
1123 smp_processor_id(), unum);
1124 }
1125
1126 /* Now dump the cache snapshots. */
1127 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1128 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1129 (int) info->dcache_index,
1130 info->dcache_tag,
1131 info->dcache_utag,
1132 info->dcache_stag);
1133 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1134 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 info->dcache_data[0],
1136 info->dcache_data[1],
1137 info->dcache_data[2],
1138 info->dcache_data[3]);
1139 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1140 "u[%016lx] l[%016lx]\n",
1141 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1142 (int) info->icache_index,
1143 info->icache_tag,
1144 info->icache_utag,
1145 info->icache_stag,
1146 info->icache_upper,
1147 info->icache_lower);
1148 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1149 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1150 info->icache_data[0],
1151 info->icache_data[1],
1152 info->icache_data[2],
1153 info->icache_data[3]);
1154 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1155 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1156 info->icache_data[4],
1157 info->icache_data[5],
1158 info->icache_data[6],
1159 info->icache_data[7]);
1160 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1161 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1162 (int) info->ecache_index, info->ecache_tag);
1163 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1164 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1165 info->ecache_data[0],
1166 info->ecache_data[1],
1167 info->ecache_data[2],
1168 info->ecache_data[3]);
1169
1170 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1171 while (afsr != 0UL) {
1172 unsigned long bit = cheetah_get_hipri(afsr);
1173
1174 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1175 (recoverable ? KERN_WARNING : KERN_CRIT),
1176 bit, cheetah_get_string(bit));
1177
1178 afsr &= ~bit;
1179 }
1180
1181 if (!recoverable)
1182 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1183 }
1184
1185 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1186 {
1187 unsigned long afsr, afar;
1188 int ret = 0;
1189
1190 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1191 : "=r" (afsr)
1192 : "i" (ASI_AFSR));
1193 if ((afsr & cheetah_afsr_errors) != 0) {
1194 if (logp != NULL) {
1195 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1196 : "=r" (afar)
1197 : "i" (ASI_AFAR));
1198 logp->afsr = afsr;
1199 logp->afar = afar;
1200 }
1201 ret = 1;
1202 }
1203 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1204 "membar #Sync\n\t"
1205 : : "r" (afsr), "i" (ASI_AFSR));
1206
1207 return ret;
1208 }
1209
1210 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1211 {
1212 struct cheetah_err_info local_snapshot, *p;
1213 int recoverable;
1214
1215 /* Flush E-cache */
1216 cheetah_flush_ecache();
1217
1218 p = cheetah_get_error_log(afsr);
1219 if (!p) {
1220 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1221 afsr, afar);
1222 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1223 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1224 prom_halt();
1225 }
1226
1227 /* Grab snapshot of logged error. */
1228 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1229
1230 /* If the current trap snapshot does not match what the
1231 * trap handler passed along into our args, big trouble.
1232 * In such a case, mark the local copy as invalid.
1233 *
1234 * Else, it matches and we mark the afsr in the non-local
1235 * copy as invalid so we may log new error traps there.
1236 */
1237 if (p->afsr != afsr || p->afar != afar)
1238 local_snapshot.afsr = CHAFSR_INVALID;
1239 else
1240 p->afsr = CHAFSR_INVALID;
1241
1242 cheetah_flush_icache();
1243 cheetah_flush_dcache();
1244
1245 /* Re-enable I-cache/D-cache */
1246 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1247 "or %%g1, %1, %%g1\n\t"
1248 "stxa %%g1, [%%g0] %0\n\t"
1249 "membar #Sync"
1250 : /* no outputs */
1251 : "i" (ASI_DCU_CONTROL_REG),
1252 "i" (DCU_DC | DCU_IC)
1253 : "g1");
1254
1255 /* Re-enable error reporting */
1256 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1257 "or %%g1, %1, %%g1\n\t"
1258 "stxa %%g1, [%%g0] %0\n\t"
1259 "membar #Sync"
1260 : /* no outputs */
1261 : "i" (ASI_ESTATE_ERROR_EN),
1262 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1263 : "g1");
1264
1265 /* Decide if we can continue after handling this trap and
1266 * logging the error.
1267 */
1268 recoverable = 1;
1269 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1270 recoverable = 0;
1271
1272 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1273 * error was logged while we had error reporting traps disabled.
1274 */
1275 if (cheetah_recheck_errors(&local_snapshot)) {
1276 unsigned long new_afsr = local_snapshot.afsr;
1277
1278 /* If we got a new asynchronous error, die... */
1279 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1280 CHAFSR_WDU | CHAFSR_CPU |
1281 CHAFSR_IVU | CHAFSR_UE |
1282 CHAFSR_BERR | CHAFSR_TO))
1283 recoverable = 0;
1284 }
1285
1286 /* Log errors. */
1287 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1288
1289 if (!recoverable)
1290 panic("Irrecoverable Fast-ECC error trap.\n");
1291
1292 /* Flush E-cache to kick the error trap handlers out. */
1293 cheetah_flush_ecache();
1294 }
1295
1296 /* Try to fix a correctable error by pushing the line out from
1297 * the E-cache. Recheck error reporting registers to see if the
1298 * problem is intermittent.
1299 */
1300 static int cheetah_fix_ce(unsigned long physaddr)
1301 {
1302 unsigned long orig_estate;
1303 unsigned long alias1, alias2;
1304 int ret;
1305
1306 /* Make sure correctable error traps are disabled. */
1307 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1308 "andn %0, %1, %%g1\n\t"
1309 "stxa %%g1, [%%g0] %2\n\t"
1310 "membar #Sync"
1311 : "=&r" (orig_estate)
1312 : "i" (ESTATE_ERROR_CEEN),
1313 "i" (ASI_ESTATE_ERROR_EN)
1314 : "g1");
1315
1316 /* We calculate alias addresses that will force the
1317 * cache line in question out of the E-cache. Then
1318 * we bring it back in with an atomic instruction so
1319 * that we get it in some modified/exclusive state,
1320 * then we displace it again to try and get proper ECC
1321 * pushed back into the system.
1322 */
1323 physaddr &= ~(8UL - 1UL);
1324 alias1 = (ecache_flush_physbase +
1325 (physaddr & ((ecache_flush_size >> 1) - 1)));
1326 alias2 = alias1 + (ecache_flush_size >> 1);
1327 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1328 "ldxa [%1] %3, %%g0\n\t"
1329 "casxa [%2] %3, %%g0, %%g0\n\t"
1330 "membar #StoreLoad | #StoreStore\n\t"
1331 "ldxa [%0] %3, %%g0\n\t"
1332 "ldxa [%1] %3, %%g0\n\t"
1333 "membar #Sync"
1334 : /* no outputs */
1335 : "r" (alias1), "r" (alias2),
1336 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1337
1338 /* Did that trigger another error? */
1339 if (cheetah_recheck_errors(NULL)) {
1340 /* Try one more time. */
1341 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1342 "membar #Sync"
1343 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1344 if (cheetah_recheck_errors(NULL))
1345 ret = 2;
1346 else
1347 ret = 1;
1348 } else {
1349 /* No new error, intermittent problem. */
1350 ret = 0;
1351 }
1352
1353 /* Restore error enables. */
1354 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1355 "membar #Sync"
1356 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1357
1358 return ret;
1359 }
1360
1361 /* Return non-zero if PADDR is a valid physical memory address. */
1362 static int cheetah_check_main_memory(unsigned long paddr)
1363 {
1364 unsigned long vaddr = PAGE_OFFSET + paddr;
1365
1366 if (vaddr > (unsigned long) high_memory)
1367 return 0;
1368
1369 return kern_addr_valid(vaddr);
1370 }
1371
1372 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1373 {
1374 struct cheetah_err_info local_snapshot, *p;
1375 int recoverable, is_memory;
1376
1377 p = cheetah_get_error_log(afsr);
1378 if (!p) {
1379 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1380 afsr, afar);
1381 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1382 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1383 prom_halt();
1384 }
1385
1386 /* Grab snapshot of logged error. */
1387 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1388
1389 /* If the current trap snapshot does not match what the
1390 * trap handler passed along into our args, big trouble.
1391 * In such a case, mark the local copy as invalid.
1392 *
1393 * Else, it matches and we mark the afsr in the non-local
1394 * copy as invalid so we may log new error traps there.
1395 */
1396 if (p->afsr != afsr || p->afar != afar)
1397 local_snapshot.afsr = CHAFSR_INVALID;
1398 else
1399 p->afsr = CHAFSR_INVALID;
1400
1401 is_memory = cheetah_check_main_memory(afar);
1402
1403 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1404 /* XXX Might want to log the results of this operation
1405 * XXX somewhere... -DaveM
1406 */
1407 cheetah_fix_ce(afar);
1408 }
1409
1410 {
1411 int flush_all, flush_line;
1412
1413 flush_all = flush_line = 0;
1414 if ((afsr & CHAFSR_EDC) != 0UL) {
1415 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1416 flush_line = 1;
1417 else
1418 flush_all = 1;
1419 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1420 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1421 flush_line = 1;
1422 else
1423 flush_all = 1;
1424 }
1425
1426 /* Trap handler only disabled I-cache, flush it. */
1427 cheetah_flush_icache();
1428
1429 /* Re-enable I-cache */
1430 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1431 "or %%g1, %1, %%g1\n\t"
1432 "stxa %%g1, [%%g0] %0\n\t"
1433 "membar #Sync"
1434 : /* no outputs */
1435 : "i" (ASI_DCU_CONTROL_REG),
1436 "i" (DCU_IC)
1437 : "g1");
1438
1439 if (flush_all)
1440 cheetah_flush_ecache();
1441 else if (flush_line)
1442 cheetah_flush_ecache_line(afar);
1443 }
1444
1445 /* Re-enable error reporting */
1446 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1447 "or %%g1, %1, %%g1\n\t"
1448 "stxa %%g1, [%%g0] %0\n\t"
1449 "membar #Sync"
1450 : /* no outputs */
1451 : "i" (ASI_ESTATE_ERROR_EN),
1452 "i" (ESTATE_ERROR_CEEN)
1453 : "g1");
1454
1455 /* Decide if we can continue after handling this trap and
1456 * logging the error.
1457 */
1458 recoverable = 1;
1459 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1460 recoverable = 0;
1461
1462 /* Re-check AFSR/AFAR */
1463 (void) cheetah_recheck_errors(&local_snapshot);
1464
1465 /* Log errors. */
1466 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1467
1468 if (!recoverable)
1469 panic("Irrecoverable Correctable-ECC error trap.\n");
1470 }
1471
1472 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1473 {
1474 struct cheetah_err_info local_snapshot, *p;
1475 int recoverable, is_memory;
1476
1477 #ifdef CONFIG_PCI
1478 /* Check for the special PCI poke sequence. */
1479 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1480 cheetah_flush_icache();
1481 cheetah_flush_dcache();
1482
1483 /* Re-enable I-cache/D-cache */
1484 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1485 "or %%g1, %1, %%g1\n\t"
1486 "stxa %%g1, [%%g0] %0\n\t"
1487 "membar #Sync"
1488 : /* no outputs */
1489 : "i" (ASI_DCU_CONTROL_REG),
1490 "i" (DCU_DC | DCU_IC)
1491 : "g1");
1492
1493 /* Re-enable error reporting */
1494 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1495 "or %%g1, %1, %%g1\n\t"
1496 "stxa %%g1, [%%g0] %0\n\t"
1497 "membar #Sync"
1498 : /* no outputs */
1499 : "i" (ASI_ESTATE_ERROR_EN),
1500 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1501 : "g1");
1502
1503 (void) cheetah_recheck_errors(NULL);
1504
1505 pci_poke_faulted = 1;
1506 regs->tpc += 4;
1507 regs->tnpc = regs->tpc + 4;
1508 return;
1509 }
1510 #endif
1511
1512 p = cheetah_get_error_log(afsr);
1513 if (!p) {
1514 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1515 afsr, afar);
1516 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1517 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1518 prom_halt();
1519 }
1520
1521 /* Grab snapshot of logged error. */
1522 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1523
1524 /* If the current trap snapshot does not match what the
1525 * trap handler passed along into our args, big trouble.
1526 * In such a case, mark the local copy as invalid.
1527 *
1528 * Else, it matches and we mark the afsr in the non-local
1529 * copy as invalid so we may log new error traps there.
1530 */
1531 if (p->afsr != afsr || p->afar != afar)
1532 local_snapshot.afsr = CHAFSR_INVALID;
1533 else
1534 p->afsr = CHAFSR_INVALID;
1535
1536 is_memory = cheetah_check_main_memory(afar);
1537
1538 {
1539 int flush_all, flush_line;
1540
1541 flush_all = flush_line = 0;
1542 if ((afsr & CHAFSR_EDU) != 0UL) {
1543 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1544 flush_line = 1;
1545 else
1546 flush_all = 1;
1547 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1548 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1549 flush_line = 1;
1550 else
1551 flush_all = 1;
1552 }
1553
1554 cheetah_flush_icache();
1555 cheetah_flush_dcache();
1556
1557 /* Re-enable I/D caches */
1558 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1559 "or %%g1, %1, %%g1\n\t"
1560 "stxa %%g1, [%%g0] %0\n\t"
1561 "membar #Sync"
1562 : /* no outputs */
1563 : "i" (ASI_DCU_CONTROL_REG),
1564 "i" (DCU_IC | DCU_DC)
1565 : "g1");
1566
1567 if (flush_all)
1568 cheetah_flush_ecache();
1569 else if (flush_line)
1570 cheetah_flush_ecache_line(afar);
1571 }
1572
1573 /* Re-enable error reporting */
1574 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1575 "or %%g1, %1, %%g1\n\t"
1576 "stxa %%g1, [%%g0] %0\n\t"
1577 "membar #Sync"
1578 : /* no outputs */
1579 : "i" (ASI_ESTATE_ERROR_EN),
1580 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1581 : "g1");
1582
1583 /* Decide if we can continue after handling this trap and
1584 * logging the error.
1585 */
1586 recoverable = 1;
1587 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1588 recoverable = 0;
1589
1590 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1591 * error was logged while we had error reporting traps disabled.
1592 */
1593 if (cheetah_recheck_errors(&local_snapshot)) {
1594 unsigned long new_afsr = local_snapshot.afsr;
1595
1596 /* If we got a new asynchronous error, die... */
1597 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1598 CHAFSR_WDU | CHAFSR_CPU |
1599 CHAFSR_IVU | CHAFSR_UE |
1600 CHAFSR_BERR | CHAFSR_TO))
1601 recoverable = 0;
1602 }
1603
1604 /* Log errors. */
1605 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1606
1607 /* "Recoverable" here means we try to yank the page from ever
1608 * being newly used again. This depends upon a few things:
1609 * 1) Must be main memory, and AFAR must be valid.
1610 * 2) If we trapped from user, OK.
1611 * 3) Else, if we trapped from kernel we must find exception
1612 * table entry (ie. we have to have been accessing user
1613 * space).
1614 *
1615 * If AFAR is not in main memory, or we trapped from kernel
1616 * and cannot find an exception table entry, it is unacceptable
1617 * to try and continue.
1618 */
1619 if (recoverable && is_memory) {
1620 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1621 /* OK, usermode access. */
1622 recoverable = 1;
1623 } else {
1624 const struct exception_table_entry *entry;
1625
1626 entry = search_exception_tables(regs->tpc);
1627 if (entry) {
1628 /* OK, kernel access to userspace. */
1629 recoverable = 1;
1630
1631 } else {
1632 /* BAD, privileged state is corrupted. */
1633 recoverable = 0;
1634 }
1635
1636 if (recoverable) {
1637 if (pfn_valid(afar >> PAGE_SHIFT))
1638 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1639 else
1640 recoverable = 0;
1641
1642 /* Only perform fixup if we still have a
1643 * recoverable condition.
1644 */
1645 if (recoverable) {
1646 regs->tpc = entry->fixup;
1647 regs->tnpc = regs->tpc + 4;
1648 }
1649 }
1650 }
1651 } else {
1652 recoverable = 0;
1653 }
1654
1655 if (!recoverable)
1656 panic("Irrecoverable deferred error trap.\n");
1657 }
1658
1659 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1660 *
1661 * Bit0: 0=dcache,1=icache
1662 * Bit1: 0=recoverable,1=unrecoverable
1663 *
1664 * The hardware has disabled both the I-cache and D-cache in
1665 * the %dcr register.
1666 */
1667 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1668 {
1669 if (type & 0x1)
1670 __cheetah_flush_icache();
1671 else
1672 cheetah_plus_zap_dcache_parity();
1673 cheetah_flush_dcache();
1674
1675 /* Re-enable I-cache/D-cache */
1676 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1677 "or %%g1, %1, %%g1\n\t"
1678 "stxa %%g1, [%%g0] %0\n\t"
1679 "membar #Sync"
1680 : /* no outputs */
1681 : "i" (ASI_DCU_CONTROL_REG),
1682 "i" (DCU_DC | DCU_IC)
1683 : "g1");
1684
1685 if (type & 0x2) {
1686 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1687 smp_processor_id(),
1688 (type & 0x1) ? 'I' : 'D',
1689 regs->tpc);
1690 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1691 panic("Irrecoverable Cheetah+ parity error.");
1692 }
1693
1694 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1695 smp_processor_id(),
1696 (type & 0x1) ? 'I' : 'D',
1697 regs->tpc);
1698 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1699 }
1700
1701 struct sun4v_error_entry {
1702 u64 err_handle;
1703 u64 err_stick;
1704
1705 u32 err_type;
1706 #define SUN4V_ERR_TYPE_UNDEFINED 0
1707 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1708 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1709 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1710 #define SUN4V_ERR_TYPE_WARNING_RES 4
1711
1712 u32 err_attrs;
1713 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1714 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1715 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1716 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1717 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1718 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1719 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1720 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1721
1722 u64 err_raddr;
1723 u32 err_size;
1724 u16 err_cpu;
1725 u16 err_pad;
1726 };
1727
1728 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1729 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1730
1731 static const char *sun4v_err_type_to_str(u32 type)
1732 {
1733 switch (type) {
1734 case SUN4V_ERR_TYPE_UNDEFINED:
1735 return "undefined";
1736 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1737 return "uncorrected resumable";
1738 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1739 return "precise nonresumable";
1740 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1741 return "deferred nonresumable";
1742 case SUN4V_ERR_TYPE_WARNING_RES:
1743 return "warning resumable";
1744 default:
1745 return "unknown";
1746 };
1747 }
1748
1749 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1750 {
1751 int cnt;
1752
1753 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1754 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1755 pfx,
1756 ent->err_handle, ent->err_stick,
1757 ent->err_type,
1758 sun4v_err_type_to_str(ent->err_type));
1759 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1760 pfx,
1761 ent->err_attrs,
1762 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1763 "processor" : ""),
1764 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1765 "memory" : ""),
1766 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1767 "pio" : ""),
1768 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1769 "integer-regs" : ""),
1770 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1771 "fpu-regs" : ""),
1772 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1773 "user" : ""),
1774 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1775 "privileged" : ""),
1776 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1777 "queue-full" : ""));
1778 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1779 pfx,
1780 ent->err_raddr, ent->err_size, ent->err_cpu);
1781
1782 show_regs(regs);
1783
1784 if ((cnt = atomic_read(ocnt)) != 0) {
1785 atomic_set(ocnt, 0);
1786 wmb();
1787 printk("%s: Queue overflowed %d times.\n",
1788 pfx, cnt);
1789 }
1790 }
1791
1792 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1793 * Log the event and clear the first word of the entry.
1794 */
1795 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1796 {
1797 struct sun4v_error_entry *ent, local_copy;
1798 struct trap_per_cpu *tb;
1799 unsigned long paddr;
1800 int cpu;
1801
1802 cpu = get_cpu();
1803
1804 tb = &trap_block[cpu];
1805 paddr = tb->resum_kernel_buf_pa + offset;
1806 ent = __va(paddr);
1807
1808 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1809
1810 /* We have a local copy now, so release the entry. */
1811 ent->err_handle = 0;
1812 wmb();
1813
1814 put_cpu();
1815
1816 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1817 /* If err_type is 0x4, it's a powerdown request. Do
1818 * not do the usual resumable error log because that
1819 * makes it look like some abnormal error.
1820 */
1821 printk(KERN_INFO "Power down request...\n");
1822 kill_cad_pid(SIGINT, 1);
1823 return;
1824 }
1825
1826 sun4v_log_error(regs, &local_copy, cpu,
1827 KERN_ERR "RESUMABLE ERROR",
1828 &sun4v_resum_oflow_cnt);
1829 }
1830
1831 /* If we try to printk() we'll probably make matters worse, by trying
1832 * to retake locks this cpu already holds or causing more errors. So
1833 * just bump a counter, and we'll report these counter bumps above.
1834 */
1835 void sun4v_resum_overflow(struct pt_regs *regs)
1836 {
1837 atomic_inc(&sun4v_resum_oflow_cnt);
1838 }
1839
1840 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1841 * Log the event, clear the first word of the entry, and die.
1842 */
1843 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1844 {
1845 struct sun4v_error_entry *ent, local_copy;
1846 struct trap_per_cpu *tb;
1847 unsigned long paddr;
1848 int cpu;
1849
1850 cpu = get_cpu();
1851
1852 tb = &trap_block[cpu];
1853 paddr = tb->nonresum_kernel_buf_pa + offset;
1854 ent = __va(paddr);
1855
1856 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1857
1858 /* We have a local copy now, so release the entry. */
1859 ent->err_handle = 0;
1860 wmb();
1861
1862 put_cpu();
1863
1864 #ifdef CONFIG_PCI
1865 /* Check for the special PCI poke sequence. */
1866 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1867 pci_poke_faulted = 1;
1868 regs->tpc += 4;
1869 regs->tnpc = regs->tpc + 4;
1870 return;
1871 }
1872 #endif
1873
1874 sun4v_log_error(regs, &local_copy, cpu,
1875 KERN_EMERG "NON-RESUMABLE ERROR",
1876 &sun4v_nonresum_oflow_cnt);
1877
1878 panic("Non-resumable error.");
1879 }
1880
1881 /* If we try to printk() we'll probably make matters worse, by trying
1882 * to retake locks this cpu already holds or causing more errors. So
1883 * just bump a counter, and we'll report these counter bumps above.
1884 */
1885 void sun4v_nonresum_overflow(struct pt_regs *regs)
1886 {
1887 /* XXX Actually even this can make not that much sense. Perhaps
1888 * XXX we should just pull the plug and panic directly from here?
1889 */
1890 atomic_inc(&sun4v_nonresum_oflow_cnt);
1891 }
1892
1893 unsigned long sun4v_err_itlb_vaddr;
1894 unsigned long sun4v_err_itlb_ctx;
1895 unsigned long sun4v_err_itlb_pte;
1896 unsigned long sun4v_err_itlb_error;
1897
1898 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1899 {
1900 if (tl > 1)
1901 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1902
1903 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1904 regs->tpc, tl);
1905 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
1906 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1907 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1908 (void *) regs->u_regs[UREG_I7]);
1909 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1910 "pte[%lx] error[%lx]\n",
1911 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1912 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1913
1914 prom_halt();
1915 }
1916
1917 unsigned long sun4v_err_dtlb_vaddr;
1918 unsigned long sun4v_err_dtlb_ctx;
1919 unsigned long sun4v_err_dtlb_pte;
1920 unsigned long sun4v_err_dtlb_error;
1921
1922 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1923 {
1924 if (tl > 1)
1925 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1926
1927 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1928 regs->tpc, tl);
1929 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
1930 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1931 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1932 (void *) regs->u_regs[UREG_I7]);
1933 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1934 "pte[%lx] error[%lx]\n",
1935 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1936 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1937
1938 prom_halt();
1939 }
1940
1941 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1942 {
1943 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1944 err, op);
1945 }
1946
1947 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1948 {
1949 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1950 err, op);
1951 }
1952
1953 void do_fpe_common(struct pt_regs *regs)
1954 {
1955 if (regs->tstate & TSTATE_PRIV) {
1956 regs->tpc = regs->tnpc;
1957 regs->tnpc += 4;
1958 } else {
1959 unsigned long fsr = current_thread_info()->xfsr[0];
1960 siginfo_t info;
1961
1962 if (test_thread_flag(TIF_32BIT)) {
1963 regs->tpc &= 0xffffffff;
1964 regs->tnpc &= 0xffffffff;
1965 }
1966 info.si_signo = SIGFPE;
1967 info.si_errno = 0;
1968 info.si_addr = (void __user *)regs->tpc;
1969 info.si_trapno = 0;
1970 info.si_code = __SI_FAULT;
1971 if ((fsr & 0x1c000) == (1 << 14)) {
1972 if (fsr & 0x10)
1973 info.si_code = FPE_FLTINV;
1974 else if (fsr & 0x08)
1975 info.si_code = FPE_FLTOVF;
1976 else if (fsr & 0x04)
1977 info.si_code = FPE_FLTUND;
1978 else if (fsr & 0x02)
1979 info.si_code = FPE_FLTDIV;
1980 else if (fsr & 0x01)
1981 info.si_code = FPE_FLTRES;
1982 }
1983 force_sig_info(SIGFPE, &info, current);
1984 }
1985 }
1986
1987 void do_fpieee(struct pt_regs *regs)
1988 {
1989 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1990 0, 0x24, SIGFPE) == NOTIFY_STOP)
1991 return;
1992
1993 do_fpe_common(regs);
1994 }
1995
1996 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1997
1998 void do_fpother(struct pt_regs *regs)
1999 {
2000 struct fpustate *f = FPUSTATE;
2001 int ret = 0;
2002
2003 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2004 0, 0x25, SIGFPE) == NOTIFY_STOP)
2005 return;
2006
2007 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2008 case (2 << 14): /* unfinished_FPop */
2009 case (3 << 14): /* unimplemented_FPop */
2010 ret = do_mathemu(regs, f);
2011 break;
2012 }
2013 if (ret)
2014 return;
2015 do_fpe_common(regs);
2016 }
2017
2018 void do_tof(struct pt_regs *regs)
2019 {
2020 siginfo_t info;
2021
2022 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2023 0, 0x26, SIGEMT) == NOTIFY_STOP)
2024 return;
2025
2026 if (regs->tstate & TSTATE_PRIV)
2027 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2028 if (test_thread_flag(TIF_32BIT)) {
2029 regs->tpc &= 0xffffffff;
2030 regs->tnpc &= 0xffffffff;
2031 }
2032 info.si_signo = SIGEMT;
2033 info.si_errno = 0;
2034 info.si_code = EMT_TAGOVF;
2035 info.si_addr = (void __user *)regs->tpc;
2036 info.si_trapno = 0;
2037 force_sig_info(SIGEMT, &info, current);
2038 }
2039
2040 void do_div0(struct pt_regs *regs)
2041 {
2042 siginfo_t info;
2043
2044 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2045 0, 0x28, SIGFPE) == NOTIFY_STOP)
2046 return;
2047
2048 if (regs->tstate & TSTATE_PRIV)
2049 die_if_kernel("TL0: Kernel divide by zero.", regs);
2050 if (test_thread_flag(TIF_32BIT)) {
2051 regs->tpc &= 0xffffffff;
2052 regs->tnpc &= 0xffffffff;
2053 }
2054 info.si_signo = SIGFPE;
2055 info.si_errno = 0;
2056 info.si_code = FPE_INTDIV;
2057 info.si_addr = (void __user *)regs->tpc;
2058 info.si_trapno = 0;
2059 force_sig_info(SIGFPE, &info, current);
2060 }
2061
2062 static void instruction_dump(unsigned int *pc)
2063 {
2064 int i;
2065
2066 if ((((unsigned long) pc) & 3))
2067 return;
2068
2069 printk("Instruction DUMP:");
2070 for (i = -3; i < 6; i++)
2071 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2072 printk("\n");
2073 }
2074
2075 static void user_instruction_dump(unsigned int __user *pc)
2076 {
2077 int i;
2078 unsigned int buf[9];
2079
2080 if ((((unsigned long) pc) & 3))
2081 return;
2082
2083 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2084 return;
2085
2086 printk("Instruction DUMP:");
2087 for (i = 0; i < 9; i++)
2088 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2089 printk("\n");
2090 }
2091
2092 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2093 {
2094 unsigned long fp, thread_base, ksp;
2095 struct thread_info *tp;
2096 int count = 0;
2097
2098 ksp = (unsigned long) _ksp;
2099 if (!tsk)
2100 tsk = current;
2101 tp = task_thread_info(tsk);
2102 if (ksp == 0UL) {
2103 if (tsk == current)
2104 asm("mov %%fp, %0" : "=r" (ksp));
2105 else
2106 ksp = tp->ksp;
2107 }
2108 if (tp == current_thread_info())
2109 flushw_all();
2110
2111 fp = ksp + STACK_BIAS;
2112 thread_base = (unsigned long) tp;
2113
2114 printk("Call Trace:\n");
2115 do {
2116 struct sparc_stackf *sf;
2117 struct pt_regs *regs;
2118 unsigned long pc;
2119
2120 if (!kstack_valid(tp, fp))
2121 break;
2122 sf = (struct sparc_stackf *) fp;
2123 regs = (struct pt_regs *) (sf + 1);
2124
2125 if (kstack_is_trap_frame(tp, regs)) {
2126 if (!(regs->tstate & TSTATE_PRIV))
2127 break;
2128 pc = regs->tpc;
2129 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2130 } else {
2131 pc = sf->callers_pc;
2132 fp = (unsigned long)sf->fp + STACK_BIAS;
2133 }
2134
2135 printk(" [%016lx] %pS\n", pc, (void *) pc);
2136 } while (++count < 16);
2137 }
2138
2139 void dump_stack(void)
2140 {
2141 show_stack(current, NULL);
2142 }
2143
2144 EXPORT_SYMBOL(dump_stack);
2145
2146 static inline int is_kernel_stack(struct task_struct *task,
2147 struct reg_window *rw)
2148 {
2149 unsigned long rw_addr = (unsigned long) rw;
2150 unsigned long thread_base, thread_end;
2151
2152 if (rw_addr < PAGE_OFFSET) {
2153 if (task != &init_task)
2154 return 0;
2155 }
2156
2157 thread_base = (unsigned long) task_stack_page(task);
2158 thread_end = thread_base + sizeof(union thread_union);
2159 if (rw_addr >= thread_base &&
2160 rw_addr < thread_end &&
2161 !(rw_addr & 0x7UL))
2162 return 1;
2163
2164 return 0;
2165 }
2166
2167 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2168 {
2169 unsigned long fp = rw->ins[6];
2170
2171 if (!fp)
2172 return NULL;
2173
2174 return (struct reg_window *) (fp + STACK_BIAS);
2175 }
2176
2177 void die_if_kernel(char *str, struct pt_regs *regs)
2178 {
2179 static int die_counter;
2180 int count = 0;
2181
2182 /* Amuse the user. */
2183 printk(
2184 " \\|/ ____ \\|/\n"
2185 " \"@'/ .. \\`@\"\n"
2186 " /_| \\__/ |_\\\n"
2187 " \\__U_/\n");
2188
2189 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2190 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2191 __asm__ __volatile__("flushw");
2192 show_regs(regs);
2193 add_taint(TAINT_DIE);
2194 if (regs->tstate & TSTATE_PRIV) {
2195 struct reg_window *rw = (struct reg_window *)
2196 (regs->u_regs[UREG_FP] + STACK_BIAS);
2197
2198 /* Stop the back trace when we hit userland or we
2199 * find some badly aligned kernel stack.
2200 */
2201 while (rw &&
2202 count++ < 30&&
2203 is_kernel_stack(current, rw)) {
2204 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2205 (void *) rw->ins[7]);
2206
2207 rw = kernel_stack_up(rw);
2208 }
2209 instruction_dump ((unsigned int *) regs->tpc);
2210 } else {
2211 if (test_thread_flag(TIF_32BIT)) {
2212 regs->tpc &= 0xffffffff;
2213 regs->tnpc &= 0xffffffff;
2214 }
2215 user_instruction_dump ((unsigned int __user *) regs->tpc);
2216 }
2217 if (regs->tstate & TSTATE_PRIV)
2218 do_exit(SIGKILL);
2219 do_exit(SIGSEGV);
2220 }
2221
2222 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2223 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2224
2225 extern int handle_popc(u32 insn, struct pt_regs *regs);
2226 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2227 extern int vis_emul(struct pt_regs *, unsigned int);
2228
2229 void do_illegal_instruction(struct pt_regs *regs)
2230 {
2231 unsigned long pc = regs->tpc;
2232 unsigned long tstate = regs->tstate;
2233 u32 insn;
2234 siginfo_t info;
2235
2236 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2237 0, 0x10, SIGILL) == NOTIFY_STOP)
2238 return;
2239
2240 if (tstate & TSTATE_PRIV)
2241 die_if_kernel("Kernel illegal instruction", regs);
2242 if (test_thread_flag(TIF_32BIT))
2243 pc = (u32)pc;
2244 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2245 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2246 if (handle_popc(insn, regs))
2247 return;
2248 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2249 if (handle_ldf_stq(insn, regs))
2250 return;
2251 } else if (tlb_type == hypervisor) {
2252 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2253 if (!vis_emul(regs, insn))
2254 return;
2255 } else {
2256 struct fpustate *f = FPUSTATE;
2257
2258 /* XXX maybe verify XFSR bits like
2259 * XXX do_fpother() does?
2260 */
2261 if (do_mathemu(regs, f))
2262 return;
2263 }
2264 }
2265 }
2266 info.si_signo = SIGILL;
2267 info.si_errno = 0;
2268 info.si_code = ILL_ILLOPC;
2269 info.si_addr = (void __user *)pc;
2270 info.si_trapno = 0;
2271 force_sig_info(SIGILL, &info, current);
2272 }
2273
2274 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2275
2276 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2277 {
2278 siginfo_t info;
2279
2280 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2281 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2282 return;
2283
2284 if (regs->tstate & TSTATE_PRIV) {
2285 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2286 return;
2287 }
2288 info.si_signo = SIGBUS;
2289 info.si_errno = 0;
2290 info.si_code = BUS_ADRALN;
2291 info.si_addr = (void __user *)sfar;
2292 info.si_trapno = 0;
2293 force_sig_info(SIGBUS, &info, current);
2294 }
2295
2296 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2297 {
2298 siginfo_t info;
2299
2300 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2301 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2302 return;
2303
2304 if (regs->tstate & TSTATE_PRIV) {
2305 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2306 return;
2307 }
2308 info.si_signo = SIGBUS;
2309 info.si_errno = 0;
2310 info.si_code = BUS_ADRALN;
2311 info.si_addr = (void __user *) addr;
2312 info.si_trapno = 0;
2313 force_sig_info(SIGBUS, &info, current);
2314 }
2315
2316 void do_privop(struct pt_regs *regs)
2317 {
2318 siginfo_t info;
2319
2320 if (notify_die(DIE_TRAP, "privileged operation", regs,
2321 0, 0x11, SIGILL) == NOTIFY_STOP)
2322 return;
2323
2324 if (test_thread_flag(TIF_32BIT)) {
2325 regs->tpc &= 0xffffffff;
2326 regs->tnpc &= 0xffffffff;
2327 }
2328 info.si_signo = SIGILL;
2329 info.si_errno = 0;
2330 info.si_code = ILL_PRVOPC;
2331 info.si_addr = (void __user *)regs->tpc;
2332 info.si_trapno = 0;
2333 force_sig_info(SIGILL, &info, current);
2334 }
2335
2336 void do_privact(struct pt_regs *regs)
2337 {
2338 do_privop(regs);
2339 }
2340
2341 /* Trap level 1 stuff or other traps we should never see... */
2342 void do_cee(struct pt_regs *regs)
2343 {
2344 die_if_kernel("TL0: Cache Error Exception", regs);
2345 }
2346
2347 void do_cee_tl1(struct pt_regs *regs)
2348 {
2349 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2350 die_if_kernel("TL1: Cache Error Exception", regs);
2351 }
2352
2353 void do_dae_tl1(struct pt_regs *regs)
2354 {
2355 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2356 die_if_kernel("TL1: Data Access Exception", regs);
2357 }
2358
2359 void do_iae_tl1(struct pt_regs *regs)
2360 {
2361 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2362 die_if_kernel("TL1: Instruction Access Exception", regs);
2363 }
2364
2365 void do_div0_tl1(struct pt_regs *regs)
2366 {
2367 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2368 die_if_kernel("TL1: DIV0 Exception", regs);
2369 }
2370
2371 void do_fpdis_tl1(struct pt_regs *regs)
2372 {
2373 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2374 die_if_kernel("TL1: FPU Disabled", regs);
2375 }
2376
2377 void do_fpieee_tl1(struct pt_regs *regs)
2378 {
2379 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2380 die_if_kernel("TL1: FPU IEEE Exception", regs);
2381 }
2382
2383 void do_fpother_tl1(struct pt_regs *regs)
2384 {
2385 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2386 die_if_kernel("TL1: FPU Other Exception", regs);
2387 }
2388
2389 void do_ill_tl1(struct pt_regs *regs)
2390 {
2391 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2392 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2393 }
2394
2395 void do_irq_tl1(struct pt_regs *regs)
2396 {
2397 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2398 die_if_kernel("TL1: IRQ Exception", regs);
2399 }
2400
2401 void do_lddfmna_tl1(struct pt_regs *regs)
2402 {
2403 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2404 die_if_kernel("TL1: LDDF Exception", regs);
2405 }
2406
2407 void do_stdfmna_tl1(struct pt_regs *regs)
2408 {
2409 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2410 die_if_kernel("TL1: STDF Exception", regs);
2411 }
2412
2413 void do_paw(struct pt_regs *regs)
2414 {
2415 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2416 }
2417
2418 void do_paw_tl1(struct pt_regs *regs)
2419 {
2420 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2421 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2422 }
2423
2424 void do_vaw(struct pt_regs *regs)
2425 {
2426 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2427 }
2428
2429 void do_vaw_tl1(struct pt_regs *regs)
2430 {
2431 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2432 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2433 }
2434
2435 void do_tof_tl1(struct pt_regs *regs)
2436 {
2437 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2438 die_if_kernel("TL1: Tag Overflow Exception", regs);
2439 }
2440
2441 void do_getpsr(struct pt_regs *regs)
2442 {
2443 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2444 regs->tpc = regs->tnpc;
2445 regs->tnpc += 4;
2446 if (test_thread_flag(TIF_32BIT)) {
2447 regs->tpc &= 0xffffffff;
2448 regs->tnpc &= 0xffffffff;
2449 }
2450 }
2451
2452 struct trap_per_cpu trap_block[NR_CPUS];
2453
2454 /* This can get invoked before sched_init() so play it super safe
2455 * and use hard_smp_processor_id().
2456 */
2457 void notrace init_cur_cpu_trap(struct thread_info *t)
2458 {
2459 int cpu = hard_smp_processor_id();
2460 struct trap_per_cpu *p = &trap_block[cpu];
2461
2462 p->thread = t;
2463 p->pgd_paddr = 0;
2464 }
2465
2466 extern void thread_info_offsets_are_bolixed_dave(void);
2467 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2468 extern void tsb_config_offsets_are_bolixed_dave(void);
2469
2470 /* Only invoked on boot processor. */
2471 void __init trap_init(void)
2472 {
2473 /* Compile time sanity check. */
2474 if (TI_TASK != offsetof(struct thread_info, task) ||
2475 TI_FLAGS != offsetof(struct thread_info, flags) ||
2476 TI_CPU != offsetof(struct thread_info, cpu) ||
2477 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2478 TI_KSP != offsetof(struct thread_info, ksp) ||
2479 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2480 TI_KREGS != offsetof(struct thread_info, kregs) ||
2481 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2482 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2483 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2484 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2485 TI_GSR != offsetof(struct thread_info, gsr) ||
2486 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2487 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2488 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2489 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2490 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2491 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2492 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2493 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2494 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2495 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2496 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2497 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2498 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2499 (TI_FPREGS & (64 - 1)))
2500 thread_info_offsets_are_bolixed_dave();
2501
2502 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2503 (TRAP_PER_CPU_PGD_PADDR !=
2504 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2505 (TRAP_PER_CPU_CPU_MONDO_PA !=
2506 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2507 (TRAP_PER_CPU_DEV_MONDO_PA !=
2508 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2509 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2510 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2511 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2512 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2513 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2514 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2515 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2516 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2517 (TRAP_PER_CPU_FAULT_INFO !=
2518 offsetof(struct trap_per_cpu, fault_info)) ||
2519 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2520 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2521 (TRAP_PER_CPU_CPU_LIST_PA !=
2522 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2523 (TRAP_PER_CPU_TSB_HUGE !=
2524 offsetof(struct trap_per_cpu, tsb_huge)) ||
2525 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2526 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2527 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2528 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2529 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2530 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2531 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2532 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2533 (TRAP_PER_CPU_RESUM_QMASK !=
2534 offsetof(struct trap_per_cpu, resum_qmask)) ||
2535 (TRAP_PER_CPU_NONRESUM_QMASK !=
2536 offsetof(struct trap_per_cpu, nonresum_qmask)))
2537 trap_per_cpu_offsets_are_bolixed_dave();
2538
2539 if ((TSB_CONFIG_TSB !=
2540 offsetof(struct tsb_config, tsb)) ||
2541 (TSB_CONFIG_RSS_LIMIT !=
2542 offsetof(struct tsb_config, tsb_rss_limit)) ||
2543 (TSB_CONFIG_NENTRIES !=
2544 offsetof(struct tsb_config, tsb_nentries)) ||
2545 (TSB_CONFIG_REG_VAL !=
2546 offsetof(struct tsb_config, tsb_reg_val)) ||
2547 (TSB_CONFIG_MAP_VADDR !=
2548 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2549 (TSB_CONFIG_MAP_PTE !=
2550 offsetof(struct tsb_config, tsb_map_pte)))
2551 tsb_config_offsets_are_bolixed_dave();
2552
2553 /* Attach to the address space of init_task. On SMP we
2554 * do this in smp.c:smp_callin for other cpus.
2555 */
2556 atomic_inc(&init_mm.mm_count);
2557 current->active_mm = &init_mm;
2558 }
This page took 0.145254 seconds and 6 git commands to generate.