Merge commit 'v2.6.27-rc6' into timers/hpet
[deliverable/linux.git] / arch / sparc64 / kernel / traps.c
1 /* arch/sparc64/kernel/traps.c
2 *
3 * Copyright (C) 1995,1997,2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7 /*
8 * I like traps on v9, :))))
9 */
10
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/signal.h>
15 #include <linux/smp.h>
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/kdebug.h>
19
20 #include <asm/smp.h>
21 #include <asm/delay.h>
22 #include <asm/system.h>
23 #include <asm/ptrace.h>
24 #include <asm/oplib.h>
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/unistd.h>
28 #include <asm/uaccess.h>
29 #include <asm/fpumacro.h>
30 #include <asm/lsu.h>
31 #include <asm/dcu.h>
32 #include <asm/estate.h>
33 #include <asm/chafsr.h>
34 #include <asm/sfafsr.h>
35 #include <asm/psrcompat.h>
36 #include <asm/processor.h>
37 #include <asm/timer.h>
38 #include <asm/head.h>
39 #include <asm/prom.h>
40
41 #include "entry.h"
42 #include "kstack.h"
43
44 /* When an irrecoverable trap occurs at tl > 0, the trap entry
45 * code logs the trap state registers at every level in the trap
46 * stack. It is found at (pt_regs + sizeof(pt_regs)) and the layout
47 * is as follows:
48 */
49 struct tl1_traplog {
50 struct {
51 unsigned long tstate;
52 unsigned long tpc;
53 unsigned long tnpc;
54 unsigned long tt;
55 } trapstack[4];
56 unsigned long tl;
57 };
58
59 static void dump_tl1_traplog(struct tl1_traplog *p)
60 {
61 int i, limit;
62
63 printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
64 "dumping track stack.\n", p->tl);
65
66 limit = (tlb_type == hypervisor) ? 2 : 4;
67 for (i = 0; i < limit; i++) {
68 printk(KERN_EMERG
69 "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
70 "TNPC[%016lx] TT[%lx]\n",
71 i + 1,
72 p->trapstack[i].tstate, p->trapstack[i].tpc,
73 p->trapstack[i].tnpc, p->trapstack[i].tt);
74 printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
75 }
76 }
77
78 void bad_trap(struct pt_regs *regs, long lvl)
79 {
80 char buffer[32];
81 siginfo_t info;
82
83 if (notify_die(DIE_TRAP, "bad trap", regs,
84 0, lvl, SIGTRAP) == NOTIFY_STOP)
85 return;
86
87 if (lvl < 0x100) {
88 sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
89 die_if_kernel(buffer, regs);
90 }
91
92 lvl -= 0x100;
93 if (regs->tstate & TSTATE_PRIV) {
94 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
95 die_if_kernel(buffer, regs);
96 }
97 if (test_thread_flag(TIF_32BIT)) {
98 regs->tpc &= 0xffffffff;
99 regs->tnpc &= 0xffffffff;
100 }
101 info.si_signo = SIGILL;
102 info.si_errno = 0;
103 info.si_code = ILL_ILLTRP;
104 info.si_addr = (void __user *)regs->tpc;
105 info.si_trapno = lvl;
106 force_sig_info(SIGILL, &info, current);
107 }
108
109 void bad_trap_tl1(struct pt_regs *regs, long lvl)
110 {
111 char buffer[32];
112
113 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
114 0, lvl, SIGTRAP) == NOTIFY_STOP)
115 return;
116
117 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
118
119 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
120 die_if_kernel (buffer, regs);
121 }
122
123 #ifdef CONFIG_DEBUG_BUGVERBOSE
124 void do_BUG(const char *file, int line)
125 {
126 bust_spinlocks(1);
127 printk("kernel BUG at %s:%d!\n", file, line);
128 }
129 #endif
130
131 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
132 {
133 siginfo_t info;
134
135 if (notify_die(DIE_TRAP, "instruction access exception", regs,
136 0, 0x8, SIGTRAP) == NOTIFY_STOP)
137 return;
138
139 if (regs->tstate & TSTATE_PRIV) {
140 printk("spitfire_insn_access_exception: SFSR[%016lx] "
141 "SFAR[%016lx], going.\n", sfsr, sfar);
142 die_if_kernel("Iax", regs);
143 }
144 if (test_thread_flag(TIF_32BIT)) {
145 regs->tpc &= 0xffffffff;
146 regs->tnpc &= 0xffffffff;
147 }
148 info.si_signo = SIGSEGV;
149 info.si_errno = 0;
150 info.si_code = SEGV_MAPERR;
151 info.si_addr = (void __user *)regs->tpc;
152 info.si_trapno = 0;
153 force_sig_info(SIGSEGV, &info, current);
154 }
155
156 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
157 {
158 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
159 0, 0x8, SIGTRAP) == NOTIFY_STOP)
160 return;
161
162 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
163 spitfire_insn_access_exception(regs, sfsr, sfar);
164 }
165
166 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
167 {
168 unsigned short type = (type_ctx >> 16);
169 unsigned short ctx = (type_ctx & 0xffff);
170 siginfo_t info;
171
172 if (notify_die(DIE_TRAP, "instruction access exception", regs,
173 0, 0x8, SIGTRAP) == NOTIFY_STOP)
174 return;
175
176 if (regs->tstate & TSTATE_PRIV) {
177 printk("sun4v_insn_access_exception: ADDR[%016lx] "
178 "CTX[%04x] TYPE[%04x], going.\n",
179 addr, ctx, type);
180 die_if_kernel("Iax", regs);
181 }
182
183 if (test_thread_flag(TIF_32BIT)) {
184 regs->tpc &= 0xffffffff;
185 regs->tnpc &= 0xffffffff;
186 }
187 info.si_signo = SIGSEGV;
188 info.si_errno = 0;
189 info.si_code = SEGV_MAPERR;
190 info.si_addr = (void __user *) addr;
191 info.si_trapno = 0;
192 force_sig_info(SIGSEGV, &info, current);
193 }
194
195 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
196 {
197 if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
198 0, 0x8, SIGTRAP) == NOTIFY_STOP)
199 return;
200
201 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
202 sun4v_insn_access_exception(regs, addr, type_ctx);
203 }
204
205 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
206 {
207 siginfo_t info;
208
209 if (notify_die(DIE_TRAP, "data access exception", regs,
210 0, 0x30, SIGTRAP) == NOTIFY_STOP)
211 return;
212
213 if (regs->tstate & TSTATE_PRIV) {
214 /* Test if this comes from uaccess places. */
215 const struct exception_table_entry *entry;
216
217 entry = search_exception_tables(regs->tpc);
218 if (entry) {
219 /* Ouch, somebody is trying VM hole tricks on us... */
220 #ifdef DEBUG_EXCEPTIONS
221 printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
222 printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
223 regs->tpc, entry->fixup);
224 #endif
225 regs->tpc = entry->fixup;
226 regs->tnpc = regs->tpc + 4;
227 return;
228 }
229 /* Shit... */
230 printk("spitfire_data_access_exception: SFSR[%016lx] "
231 "SFAR[%016lx], going.\n", sfsr, sfar);
232 die_if_kernel("Dax", regs);
233 }
234
235 info.si_signo = SIGSEGV;
236 info.si_errno = 0;
237 info.si_code = SEGV_MAPERR;
238 info.si_addr = (void __user *)sfar;
239 info.si_trapno = 0;
240 force_sig_info(SIGSEGV, &info, current);
241 }
242
243 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
244 {
245 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
246 0, 0x30, SIGTRAP) == NOTIFY_STOP)
247 return;
248
249 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
250 spitfire_data_access_exception(regs, sfsr, sfar);
251 }
252
253 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
254 {
255 unsigned short type = (type_ctx >> 16);
256 unsigned short ctx = (type_ctx & 0xffff);
257 siginfo_t info;
258
259 if (notify_die(DIE_TRAP, "data access exception", regs,
260 0, 0x8, SIGTRAP) == NOTIFY_STOP)
261 return;
262
263 if (regs->tstate & TSTATE_PRIV) {
264 printk("sun4v_data_access_exception: ADDR[%016lx] "
265 "CTX[%04x] TYPE[%04x], going.\n",
266 addr, ctx, type);
267 die_if_kernel("Dax", regs);
268 }
269
270 if (test_thread_flag(TIF_32BIT)) {
271 regs->tpc &= 0xffffffff;
272 regs->tnpc &= 0xffffffff;
273 }
274 info.si_signo = SIGSEGV;
275 info.si_errno = 0;
276 info.si_code = SEGV_MAPERR;
277 info.si_addr = (void __user *) addr;
278 info.si_trapno = 0;
279 force_sig_info(SIGSEGV, &info, current);
280 }
281
282 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
283 {
284 if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
285 0, 0x8, SIGTRAP) == NOTIFY_STOP)
286 return;
287
288 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
289 sun4v_data_access_exception(regs, addr, type_ctx);
290 }
291
292 #ifdef CONFIG_PCI
293 /* This is really pathetic... */
294 extern volatile int pci_poke_in_progress;
295 extern volatile int pci_poke_cpu;
296 extern volatile int pci_poke_faulted;
297 #endif
298
299 /* When access exceptions happen, we must do this. */
300 static void spitfire_clean_and_reenable_l1_caches(void)
301 {
302 unsigned long va;
303
304 if (tlb_type != spitfire)
305 BUG();
306
307 /* Clean 'em. */
308 for (va = 0; va < (PAGE_SIZE << 1); va += 32) {
309 spitfire_put_icache_tag(va, 0x0);
310 spitfire_put_dcache_tag(va, 0x0);
311 }
312
313 /* Re-enable in LSU. */
314 __asm__ __volatile__("flush %%g6\n\t"
315 "membar #Sync\n\t"
316 "stxa %0, [%%g0] %1\n\t"
317 "membar #Sync"
318 : /* no outputs */
319 : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
320 LSU_CONTROL_IM | LSU_CONTROL_DM),
321 "i" (ASI_LSU_CONTROL)
322 : "memory");
323 }
324
325 static void spitfire_enable_estate_errors(void)
326 {
327 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
328 "membar #Sync"
329 : /* no outputs */
330 : "r" (ESTATE_ERR_ALL),
331 "i" (ASI_ESTATE_ERROR_EN));
332 }
333
334 static char ecc_syndrome_table[] = {
335 0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
336 0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
337 0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
338 0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
339 0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
340 0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
341 0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
342 0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
343 0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
344 0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
345 0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
346 0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
347 0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
348 0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
349 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
350 0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
351 0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
352 0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
353 0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
354 0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
355 0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
356 0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
357 0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
358 0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
359 0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
360 0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
361 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
362 0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
363 0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
364 0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
365 0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
366 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
367 };
368
369 static char *syndrome_unknown = "<Unknown>";
370
371 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
372 {
373 unsigned short scode;
374 char memmod_str[64], *p;
375
376 if (udbl & bit) {
377 scode = ecc_syndrome_table[udbl & 0xff];
378 if (prom_getunumber(scode, afar,
379 memmod_str, sizeof(memmod_str)) == -1)
380 p = syndrome_unknown;
381 else
382 p = memmod_str;
383 printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
384 "Memory Module \"%s\"\n",
385 smp_processor_id(), scode, p);
386 }
387
388 if (udbh & bit) {
389 scode = ecc_syndrome_table[udbh & 0xff];
390 if (prom_getunumber(scode, afar,
391 memmod_str, sizeof(memmod_str)) == -1)
392 p = syndrome_unknown;
393 else
394 p = memmod_str;
395 printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
396 "Memory Module \"%s\"\n",
397 smp_processor_id(), scode, p);
398 }
399
400 }
401
402 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
403 {
404
405 printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
406 "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
407 smp_processor_id(), afsr, afar, udbl, udbh, tl1);
408
409 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
410
411 /* We always log it, even if someone is listening for this
412 * trap.
413 */
414 notify_die(DIE_TRAP, "Correctable ECC Error", regs,
415 0, TRAP_TYPE_CEE, SIGTRAP);
416
417 /* The Correctable ECC Error trap does not disable I/D caches. So
418 * we only have to restore the ESTATE Error Enable register.
419 */
420 spitfire_enable_estate_errors();
421 }
422
423 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
424 {
425 siginfo_t info;
426
427 printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
428 "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
429 smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
430
431 /* XXX add more human friendly logging of the error status
432 * XXX as is implemented for cheetah
433 */
434
435 spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
436
437 /* We always log it, even if someone is listening for this
438 * trap.
439 */
440 notify_die(DIE_TRAP, "Uncorrectable Error", regs,
441 0, tt, SIGTRAP);
442
443 if (regs->tstate & TSTATE_PRIV) {
444 if (tl1)
445 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
446 die_if_kernel("UE", regs);
447 }
448
449 /* XXX need more intelligent processing here, such as is implemented
450 * XXX for cheetah errors, in fact if the E-cache still holds the
451 * XXX line with bad parity this will loop
452 */
453
454 spitfire_clean_and_reenable_l1_caches();
455 spitfire_enable_estate_errors();
456
457 if (test_thread_flag(TIF_32BIT)) {
458 regs->tpc &= 0xffffffff;
459 regs->tnpc &= 0xffffffff;
460 }
461 info.si_signo = SIGBUS;
462 info.si_errno = 0;
463 info.si_code = BUS_OBJERR;
464 info.si_addr = (void *)0;
465 info.si_trapno = 0;
466 force_sig_info(SIGBUS, &info, current);
467 }
468
469 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
470 {
471 unsigned long afsr, tt, udbh, udbl;
472 int tl1;
473
474 afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
475 tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
476 tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
477 udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
478 udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
479
480 #ifdef CONFIG_PCI
481 if (tt == TRAP_TYPE_DAE &&
482 pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
483 spitfire_clean_and_reenable_l1_caches();
484 spitfire_enable_estate_errors();
485
486 pci_poke_faulted = 1;
487 regs->tnpc = regs->tpc + 4;
488 return;
489 }
490 #endif
491
492 if (afsr & SFAFSR_UE)
493 spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
494
495 if (tt == TRAP_TYPE_CEE) {
496 /* Handle the case where we took a CEE trap, but ACK'd
497 * only the UE state in the UDB error registers.
498 */
499 if (afsr & SFAFSR_UE) {
500 if (udbh & UDBE_CE) {
501 __asm__ __volatile__(
502 "stxa %0, [%1] %2\n\t"
503 "membar #Sync"
504 : /* no outputs */
505 : "r" (udbh & UDBE_CE),
506 "r" (0x0), "i" (ASI_UDB_ERROR_W));
507 }
508 if (udbl & UDBE_CE) {
509 __asm__ __volatile__(
510 "stxa %0, [%1] %2\n\t"
511 "membar #Sync"
512 : /* no outputs */
513 : "r" (udbl & UDBE_CE),
514 "r" (0x18), "i" (ASI_UDB_ERROR_W));
515 }
516 }
517
518 spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
519 }
520 }
521
522 int cheetah_pcache_forced_on;
523
524 void cheetah_enable_pcache(void)
525 {
526 unsigned long dcr;
527
528 printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
529 smp_processor_id());
530
531 __asm__ __volatile__("ldxa [%%g0] %1, %0"
532 : "=r" (dcr)
533 : "i" (ASI_DCU_CONTROL_REG));
534 dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
535 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
536 "membar #Sync"
537 : /* no outputs */
538 : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
539 }
540
541 /* Cheetah error trap handling. */
542 static unsigned long ecache_flush_physbase;
543 static unsigned long ecache_flush_linesize;
544 static unsigned long ecache_flush_size;
545
546 /* This table is ordered in priority of errors and matches the
547 * AFAR overwrite policy as well.
548 */
549
550 struct afsr_error_table {
551 unsigned long mask;
552 const char *name;
553 };
554
555 static const char CHAFSR_PERR_msg[] =
556 "System interface protocol error";
557 static const char CHAFSR_IERR_msg[] =
558 "Internal processor error";
559 static const char CHAFSR_ISAP_msg[] =
560 "System request parity error on incoming addresss";
561 static const char CHAFSR_UCU_msg[] =
562 "Uncorrectable E-cache ECC error for ifetch/data";
563 static const char CHAFSR_UCC_msg[] =
564 "SW Correctable E-cache ECC error for ifetch/data";
565 static const char CHAFSR_UE_msg[] =
566 "Uncorrectable system bus data ECC error for read";
567 static const char CHAFSR_EDU_msg[] =
568 "Uncorrectable E-cache ECC error for stmerge/blkld";
569 static const char CHAFSR_EMU_msg[] =
570 "Uncorrectable system bus MTAG error";
571 static const char CHAFSR_WDU_msg[] =
572 "Uncorrectable E-cache ECC error for writeback";
573 static const char CHAFSR_CPU_msg[] =
574 "Uncorrectable ECC error for copyout";
575 static const char CHAFSR_CE_msg[] =
576 "HW corrected system bus data ECC error for read";
577 static const char CHAFSR_EDC_msg[] =
578 "HW corrected E-cache ECC error for stmerge/blkld";
579 static const char CHAFSR_EMC_msg[] =
580 "HW corrected system bus MTAG ECC error";
581 static const char CHAFSR_WDC_msg[] =
582 "HW corrected E-cache ECC error for writeback";
583 static const char CHAFSR_CPC_msg[] =
584 "HW corrected ECC error for copyout";
585 static const char CHAFSR_TO_msg[] =
586 "Unmapped error from system bus";
587 static const char CHAFSR_BERR_msg[] =
588 "Bus error response from system bus";
589 static const char CHAFSR_IVC_msg[] =
590 "HW corrected system bus data ECC error for ivec read";
591 static const char CHAFSR_IVU_msg[] =
592 "Uncorrectable system bus data ECC error for ivec read";
593 static struct afsr_error_table __cheetah_error_table[] = {
594 { CHAFSR_PERR, CHAFSR_PERR_msg },
595 { CHAFSR_IERR, CHAFSR_IERR_msg },
596 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
597 { CHAFSR_UCU, CHAFSR_UCU_msg },
598 { CHAFSR_UCC, CHAFSR_UCC_msg },
599 { CHAFSR_UE, CHAFSR_UE_msg },
600 { CHAFSR_EDU, CHAFSR_EDU_msg },
601 { CHAFSR_EMU, CHAFSR_EMU_msg },
602 { CHAFSR_WDU, CHAFSR_WDU_msg },
603 { CHAFSR_CPU, CHAFSR_CPU_msg },
604 { CHAFSR_CE, CHAFSR_CE_msg },
605 { CHAFSR_EDC, CHAFSR_EDC_msg },
606 { CHAFSR_EMC, CHAFSR_EMC_msg },
607 { CHAFSR_WDC, CHAFSR_WDC_msg },
608 { CHAFSR_CPC, CHAFSR_CPC_msg },
609 { CHAFSR_TO, CHAFSR_TO_msg },
610 { CHAFSR_BERR, CHAFSR_BERR_msg },
611 /* These two do not update the AFAR. */
612 { CHAFSR_IVC, CHAFSR_IVC_msg },
613 { CHAFSR_IVU, CHAFSR_IVU_msg },
614 { 0, NULL },
615 };
616 static const char CHPAFSR_DTO_msg[] =
617 "System bus unmapped error for prefetch/storequeue-read";
618 static const char CHPAFSR_DBERR_msg[] =
619 "System bus error for prefetch/storequeue-read";
620 static const char CHPAFSR_THCE_msg[] =
621 "Hardware corrected E-cache Tag ECC error";
622 static const char CHPAFSR_TSCE_msg[] =
623 "SW handled correctable E-cache Tag ECC error";
624 static const char CHPAFSR_TUE_msg[] =
625 "Uncorrectable E-cache Tag ECC error";
626 static const char CHPAFSR_DUE_msg[] =
627 "System bus uncorrectable data ECC error due to prefetch/store-fill";
628 static struct afsr_error_table __cheetah_plus_error_table[] = {
629 { CHAFSR_PERR, CHAFSR_PERR_msg },
630 { CHAFSR_IERR, CHAFSR_IERR_msg },
631 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
632 { CHAFSR_UCU, CHAFSR_UCU_msg },
633 { CHAFSR_UCC, CHAFSR_UCC_msg },
634 { CHAFSR_UE, CHAFSR_UE_msg },
635 { CHAFSR_EDU, CHAFSR_EDU_msg },
636 { CHAFSR_EMU, CHAFSR_EMU_msg },
637 { CHAFSR_WDU, CHAFSR_WDU_msg },
638 { CHAFSR_CPU, CHAFSR_CPU_msg },
639 { CHAFSR_CE, CHAFSR_CE_msg },
640 { CHAFSR_EDC, CHAFSR_EDC_msg },
641 { CHAFSR_EMC, CHAFSR_EMC_msg },
642 { CHAFSR_WDC, CHAFSR_WDC_msg },
643 { CHAFSR_CPC, CHAFSR_CPC_msg },
644 { CHAFSR_TO, CHAFSR_TO_msg },
645 { CHAFSR_BERR, CHAFSR_BERR_msg },
646 { CHPAFSR_DTO, CHPAFSR_DTO_msg },
647 { CHPAFSR_DBERR, CHPAFSR_DBERR_msg },
648 { CHPAFSR_THCE, CHPAFSR_THCE_msg },
649 { CHPAFSR_TSCE, CHPAFSR_TSCE_msg },
650 { CHPAFSR_TUE, CHPAFSR_TUE_msg },
651 { CHPAFSR_DUE, CHPAFSR_DUE_msg },
652 /* These two do not update the AFAR. */
653 { CHAFSR_IVC, CHAFSR_IVC_msg },
654 { CHAFSR_IVU, CHAFSR_IVU_msg },
655 { 0, NULL },
656 };
657 static const char JPAFSR_JETO_msg[] =
658 "System interface protocol error, hw timeout caused";
659 static const char JPAFSR_SCE_msg[] =
660 "Parity error on system snoop results";
661 static const char JPAFSR_JEIC_msg[] =
662 "System interface protocol error, illegal command detected";
663 static const char JPAFSR_JEIT_msg[] =
664 "System interface protocol error, illegal ADTYPE detected";
665 static const char JPAFSR_OM_msg[] =
666 "Out of range memory error has occurred";
667 static const char JPAFSR_ETP_msg[] =
668 "Parity error on L2 cache tag SRAM";
669 static const char JPAFSR_UMS_msg[] =
670 "Error due to unsupported store";
671 static const char JPAFSR_RUE_msg[] =
672 "Uncorrectable ECC error from remote cache/memory";
673 static const char JPAFSR_RCE_msg[] =
674 "Correctable ECC error from remote cache/memory";
675 static const char JPAFSR_BP_msg[] =
676 "JBUS parity error on returned read data";
677 static const char JPAFSR_WBP_msg[] =
678 "JBUS parity error on data for writeback or block store";
679 static const char JPAFSR_FRC_msg[] =
680 "Foreign read to DRAM incurring correctable ECC error";
681 static const char JPAFSR_FRU_msg[] =
682 "Foreign read to DRAM incurring uncorrectable ECC error";
683 static struct afsr_error_table __jalapeno_error_table[] = {
684 { JPAFSR_JETO, JPAFSR_JETO_msg },
685 { JPAFSR_SCE, JPAFSR_SCE_msg },
686 { JPAFSR_JEIC, JPAFSR_JEIC_msg },
687 { JPAFSR_JEIT, JPAFSR_JEIT_msg },
688 { CHAFSR_PERR, CHAFSR_PERR_msg },
689 { CHAFSR_IERR, CHAFSR_IERR_msg },
690 { CHAFSR_ISAP, CHAFSR_ISAP_msg },
691 { CHAFSR_UCU, CHAFSR_UCU_msg },
692 { CHAFSR_UCC, CHAFSR_UCC_msg },
693 { CHAFSR_UE, CHAFSR_UE_msg },
694 { CHAFSR_EDU, CHAFSR_EDU_msg },
695 { JPAFSR_OM, JPAFSR_OM_msg },
696 { CHAFSR_WDU, CHAFSR_WDU_msg },
697 { CHAFSR_CPU, CHAFSR_CPU_msg },
698 { CHAFSR_CE, CHAFSR_CE_msg },
699 { CHAFSR_EDC, CHAFSR_EDC_msg },
700 { JPAFSR_ETP, JPAFSR_ETP_msg },
701 { CHAFSR_WDC, CHAFSR_WDC_msg },
702 { CHAFSR_CPC, CHAFSR_CPC_msg },
703 { CHAFSR_TO, CHAFSR_TO_msg },
704 { CHAFSR_BERR, CHAFSR_BERR_msg },
705 { JPAFSR_UMS, JPAFSR_UMS_msg },
706 { JPAFSR_RUE, JPAFSR_RUE_msg },
707 { JPAFSR_RCE, JPAFSR_RCE_msg },
708 { JPAFSR_BP, JPAFSR_BP_msg },
709 { JPAFSR_WBP, JPAFSR_WBP_msg },
710 { JPAFSR_FRC, JPAFSR_FRC_msg },
711 { JPAFSR_FRU, JPAFSR_FRU_msg },
712 /* These two do not update the AFAR. */
713 { CHAFSR_IVU, CHAFSR_IVU_msg },
714 { 0, NULL },
715 };
716 static struct afsr_error_table *cheetah_error_table;
717 static unsigned long cheetah_afsr_errors;
718
719 struct cheetah_err_info *cheetah_error_log;
720
721 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
722 {
723 struct cheetah_err_info *p;
724 int cpu = smp_processor_id();
725
726 if (!cheetah_error_log)
727 return NULL;
728
729 p = cheetah_error_log + (cpu * 2);
730 if ((afsr & CHAFSR_TL1) != 0UL)
731 p++;
732
733 return p;
734 }
735
736 extern unsigned int tl0_icpe[], tl1_icpe[];
737 extern unsigned int tl0_dcpe[], tl1_dcpe[];
738 extern unsigned int tl0_fecc[], tl1_fecc[];
739 extern unsigned int tl0_cee[], tl1_cee[];
740 extern unsigned int tl0_iae[], tl1_iae[];
741 extern unsigned int tl0_dae[], tl1_dae[];
742 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
743 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
744 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
745 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
746 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
747
748 void __init cheetah_ecache_flush_init(void)
749 {
750 unsigned long largest_size, smallest_linesize, order, ver;
751 int i, sz;
752
753 /* Scan all cpu device tree nodes, note two values:
754 * 1) largest E-cache size
755 * 2) smallest E-cache line size
756 */
757 largest_size = 0UL;
758 smallest_linesize = ~0UL;
759
760 for (i = 0; i < NR_CPUS; i++) {
761 unsigned long val;
762
763 val = cpu_data(i).ecache_size;
764 if (!val)
765 continue;
766
767 if (val > largest_size)
768 largest_size = val;
769
770 val = cpu_data(i).ecache_line_size;
771 if (val < smallest_linesize)
772 smallest_linesize = val;
773
774 }
775
776 if (largest_size == 0UL || smallest_linesize == ~0UL) {
777 prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
778 "parameters.\n");
779 prom_halt();
780 }
781
782 ecache_flush_size = (2 * largest_size);
783 ecache_flush_linesize = smallest_linesize;
784
785 ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
786
787 if (ecache_flush_physbase == ~0UL) {
788 prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
789 "contiguous physical memory.\n",
790 ecache_flush_size);
791 prom_halt();
792 }
793
794 /* Now allocate error trap reporting scoreboard. */
795 sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
796 for (order = 0; order < MAX_ORDER; order++) {
797 if ((PAGE_SIZE << order) >= sz)
798 break;
799 }
800 cheetah_error_log = (struct cheetah_err_info *)
801 __get_free_pages(GFP_KERNEL, order);
802 if (!cheetah_error_log) {
803 prom_printf("cheetah_ecache_flush_init: Failed to allocate "
804 "error logging scoreboard (%d bytes).\n", sz);
805 prom_halt();
806 }
807 memset(cheetah_error_log, 0, PAGE_SIZE << order);
808
809 /* Mark all AFSRs as invalid so that the trap handler will
810 * log new new information there.
811 */
812 for (i = 0; i < 2 * NR_CPUS; i++)
813 cheetah_error_log[i].afsr = CHAFSR_INVALID;
814
815 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
816 if ((ver >> 32) == __JALAPENO_ID ||
817 (ver >> 32) == __SERRANO_ID) {
818 cheetah_error_table = &__jalapeno_error_table[0];
819 cheetah_afsr_errors = JPAFSR_ERRORS;
820 } else if ((ver >> 32) == 0x003e0015) {
821 cheetah_error_table = &__cheetah_plus_error_table[0];
822 cheetah_afsr_errors = CHPAFSR_ERRORS;
823 } else {
824 cheetah_error_table = &__cheetah_error_table[0];
825 cheetah_afsr_errors = CHAFSR_ERRORS;
826 }
827
828 /* Now patch trap tables. */
829 memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
830 memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
831 memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
832 memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
833 memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
834 memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
835 memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
836 memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
837 if (tlb_type == cheetah_plus) {
838 memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
839 memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
840 memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
841 memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
842 }
843 flushi(PAGE_OFFSET);
844 }
845
846 static void cheetah_flush_ecache(void)
847 {
848 unsigned long flush_base = ecache_flush_physbase;
849 unsigned long flush_linesize = ecache_flush_linesize;
850 unsigned long flush_size = ecache_flush_size;
851
852 __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
853 " bne,pt %%xcc, 1b\n\t"
854 " ldxa [%2 + %0] %3, %%g0\n\t"
855 : "=&r" (flush_size)
856 : "0" (flush_size), "r" (flush_base),
857 "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
858 }
859
860 static void cheetah_flush_ecache_line(unsigned long physaddr)
861 {
862 unsigned long alias;
863
864 physaddr &= ~(8UL - 1UL);
865 physaddr = (ecache_flush_physbase +
866 (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
867 alias = physaddr + (ecache_flush_size >> 1UL);
868 __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
869 "ldxa [%1] %2, %%g0\n\t"
870 "membar #Sync"
871 : /* no outputs */
872 : "r" (physaddr), "r" (alias),
873 "i" (ASI_PHYS_USE_EC));
874 }
875
876 /* Unfortunately, the diagnostic access to the I-cache tags we need to
877 * use to clear the thing interferes with I-cache coherency transactions.
878 *
879 * So we must only flush the I-cache when it is disabled.
880 */
881 static void __cheetah_flush_icache(void)
882 {
883 unsigned int icache_size, icache_line_size;
884 unsigned long addr;
885
886 icache_size = local_cpu_data().icache_size;
887 icache_line_size = local_cpu_data().icache_line_size;
888
889 /* Clear the valid bits in all the tags. */
890 for (addr = 0; addr < icache_size; addr += icache_line_size) {
891 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
892 "membar #Sync"
893 : /* no outputs */
894 : "r" (addr | (2 << 3)),
895 "i" (ASI_IC_TAG));
896 }
897 }
898
899 static void cheetah_flush_icache(void)
900 {
901 unsigned long dcu_save;
902
903 /* Save current DCU, disable I-cache. */
904 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
905 "or %0, %2, %%g1\n\t"
906 "stxa %%g1, [%%g0] %1\n\t"
907 "membar #Sync"
908 : "=r" (dcu_save)
909 : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
910 : "g1");
911
912 __cheetah_flush_icache();
913
914 /* Restore DCU register */
915 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
916 "membar #Sync"
917 : /* no outputs */
918 : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
919 }
920
921 static void cheetah_flush_dcache(void)
922 {
923 unsigned int dcache_size, dcache_line_size;
924 unsigned long addr;
925
926 dcache_size = local_cpu_data().dcache_size;
927 dcache_line_size = local_cpu_data().dcache_line_size;
928
929 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
930 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
931 "membar #Sync"
932 : /* no outputs */
933 : "r" (addr), "i" (ASI_DCACHE_TAG));
934 }
935 }
936
937 /* In order to make the even parity correct we must do two things.
938 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
939 * Next, we clear out all 32-bytes of data for that line. Data of
940 * all-zero + tag parity value of zero == correct parity.
941 */
942 static void cheetah_plus_zap_dcache_parity(void)
943 {
944 unsigned int dcache_size, dcache_line_size;
945 unsigned long addr;
946
947 dcache_size = local_cpu_data().dcache_size;
948 dcache_line_size = local_cpu_data().dcache_line_size;
949
950 for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
951 unsigned long tag = (addr >> 14);
952 unsigned long line;
953
954 __asm__ __volatile__("membar #Sync\n\t"
955 "stxa %0, [%1] %2\n\t"
956 "membar #Sync"
957 : /* no outputs */
958 : "r" (tag), "r" (addr),
959 "i" (ASI_DCACHE_UTAG));
960 for (line = addr; line < addr + dcache_line_size; line += 8)
961 __asm__ __volatile__("membar #Sync\n\t"
962 "stxa %%g0, [%0] %1\n\t"
963 "membar #Sync"
964 : /* no outputs */
965 : "r" (line),
966 "i" (ASI_DCACHE_DATA));
967 }
968 }
969
970 /* Conversion tables used to frob Cheetah AFSR syndrome values into
971 * something palatable to the memory controller driver get_unumber
972 * routine.
973 */
974 #define MT0 137
975 #define MT1 138
976 #define MT2 139
977 #define NONE 254
978 #define MTC0 140
979 #define MTC1 141
980 #define MTC2 142
981 #define MTC3 143
982 #define C0 128
983 #define C1 129
984 #define C2 130
985 #define C3 131
986 #define C4 132
987 #define C5 133
988 #define C6 134
989 #define C7 135
990 #define C8 136
991 #define M2 144
992 #define M3 145
993 #define M4 146
994 #define M 147
995 static unsigned char cheetah_ecc_syntab[] = {
996 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
997 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
998 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
999 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1000 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1001 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1002 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1003 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1004 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1005 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1006 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1007 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1008 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1009 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1010 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1011 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1012 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1013 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1014 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1015 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1016 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1017 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1018 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1019 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1020 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1021 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1022 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1023 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1024 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1025 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1026 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1027 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1028 };
1029 static unsigned char cheetah_mtag_syntab[] = {
1030 NONE, MTC0,
1031 MTC1, NONE,
1032 MTC2, NONE,
1033 NONE, MT0,
1034 MTC3, NONE,
1035 NONE, MT1,
1036 NONE, MT2,
1037 NONE, NONE
1038 };
1039
1040 /* Return the highest priority error conditon mentioned. */
1041 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1042 {
1043 unsigned long tmp = 0;
1044 int i;
1045
1046 for (i = 0; cheetah_error_table[i].mask; i++) {
1047 if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1048 return tmp;
1049 }
1050 return tmp;
1051 }
1052
1053 static const char *cheetah_get_string(unsigned long bit)
1054 {
1055 int i;
1056
1057 for (i = 0; cheetah_error_table[i].mask; i++) {
1058 if ((bit & cheetah_error_table[i].mask) != 0UL)
1059 return cheetah_error_table[i].name;
1060 }
1061 return "???";
1062 }
1063
1064 extern int chmc_getunumber(int, unsigned long, char *, int);
1065
1066 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1067 unsigned long afsr, unsigned long afar, int recoverable)
1068 {
1069 unsigned long hipri;
1070 char unum[256];
1071
1072 printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1073 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1074 afsr, afar,
1075 (afsr & CHAFSR_TL1) ? 1 : 0);
1076 printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1077 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1078 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1079 printk("%s" "ERROR(%d): ",
1080 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1081 printk("TPC<%pS>\n", (void *) regs->tpc);
1082 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
1083 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1084 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1085 (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1086 (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1087 (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1088 hipri = cheetah_get_hipri(afsr);
1089 printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1090 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1091 hipri, cheetah_get_string(hipri));
1092
1093 /* Try to get unumber if relevant. */
1094 #define ESYND_ERRORS (CHAFSR_IVC | CHAFSR_IVU | \
1095 CHAFSR_CPC | CHAFSR_CPU | \
1096 CHAFSR_UE | CHAFSR_CE | \
1097 CHAFSR_EDC | CHAFSR_EDU | \
1098 CHAFSR_UCC | CHAFSR_UCU | \
1099 CHAFSR_WDU | CHAFSR_WDC)
1100 #define MSYND_ERRORS (CHAFSR_EMC | CHAFSR_EMU)
1101 if (afsr & ESYND_ERRORS) {
1102 int syndrome;
1103 int ret;
1104
1105 syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1106 syndrome = cheetah_ecc_syntab[syndrome];
1107 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1108 if (ret != -1)
1109 printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1110 (recoverable ? KERN_WARNING : KERN_CRIT),
1111 smp_processor_id(), unum);
1112 } else if (afsr & MSYND_ERRORS) {
1113 int syndrome;
1114 int ret;
1115
1116 syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1117 syndrome = cheetah_mtag_syntab[syndrome];
1118 ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1119 if (ret != -1)
1120 printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1121 (recoverable ? KERN_WARNING : KERN_CRIT),
1122 smp_processor_id(), unum);
1123 }
1124
1125 /* Now dump the cache snapshots. */
1126 printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1127 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1128 (int) info->dcache_index,
1129 info->dcache_tag,
1130 info->dcache_utag,
1131 info->dcache_stag);
1132 printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1133 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1134 info->dcache_data[0],
1135 info->dcache_data[1],
1136 info->dcache_data[2],
1137 info->dcache_data[3]);
1138 printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1139 "u[%016lx] l[%016lx]\n",
1140 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1141 (int) info->icache_index,
1142 info->icache_tag,
1143 info->icache_utag,
1144 info->icache_stag,
1145 info->icache_upper,
1146 info->icache_lower);
1147 printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1148 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1149 info->icache_data[0],
1150 info->icache_data[1],
1151 info->icache_data[2],
1152 info->icache_data[3]);
1153 printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1154 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1155 info->icache_data[4],
1156 info->icache_data[5],
1157 info->icache_data[6],
1158 info->icache_data[7]);
1159 printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1160 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1161 (int) info->ecache_index, info->ecache_tag);
1162 printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1163 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1164 info->ecache_data[0],
1165 info->ecache_data[1],
1166 info->ecache_data[2],
1167 info->ecache_data[3]);
1168
1169 afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1170 while (afsr != 0UL) {
1171 unsigned long bit = cheetah_get_hipri(afsr);
1172
1173 printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1174 (recoverable ? KERN_WARNING : KERN_CRIT),
1175 bit, cheetah_get_string(bit));
1176
1177 afsr &= ~bit;
1178 }
1179
1180 if (!recoverable)
1181 printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1182 }
1183
1184 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1185 {
1186 unsigned long afsr, afar;
1187 int ret = 0;
1188
1189 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1190 : "=r" (afsr)
1191 : "i" (ASI_AFSR));
1192 if ((afsr & cheetah_afsr_errors) != 0) {
1193 if (logp != NULL) {
1194 __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1195 : "=r" (afar)
1196 : "i" (ASI_AFAR));
1197 logp->afsr = afsr;
1198 logp->afar = afar;
1199 }
1200 ret = 1;
1201 }
1202 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1203 "membar #Sync\n\t"
1204 : : "r" (afsr), "i" (ASI_AFSR));
1205
1206 return ret;
1207 }
1208
1209 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1210 {
1211 struct cheetah_err_info local_snapshot, *p;
1212 int recoverable;
1213
1214 /* Flush E-cache */
1215 cheetah_flush_ecache();
1216
1217 p = cheetah_get_error_log(afsr);
1218 if (!p) {
1219 prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1220 afsr, afar);
1221 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1222 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1223 prom_halt();
1224 }
1225
1226 /* Grab snapshot of logged error. */
1227 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1228
1229 /* If the current trap snapshot does not match what the
1230 * trap handler passed along into our args, big trouble.
1231 * In such a case, mark the local copy as invalid.
1232 *
1233 * Else, it matches and we mark the afsr in the non-local
1234 * copy as invalid so we may log new error traps there.
1235 */
1236 if (p->afsr != afsr || p->afar != afar)
1237 local_snapshot.afsr = CHAFSR_INVALID;
1238 else
1239 p->afsr = CHAFSR_INVALID;
1240
1241 cheetah_flush_icache();
1242 cheetah_flush_dcache();
1243
1244 /* Re-enable I-cache/D-cache */
1245 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1246 "or %%g1, %1, %%g1\n\t"
1247 "stxa %%g1, [%%g0] %0\n\t"
1248 "membar #Sync"
1249 : /* no outputs */
1250 : "i" (ASI_DCU_CONTROL_REG),
1251 "i" (DCU_DC | DCU_IC)
1252 : "g1");
1253
1254 /* Re-enable error reporting */
1255 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1256 "or %%g1, %1, %%g1\n\t"
1257 "stxa %%g1, [%%g0] %0\n\t"
1258 "membar #Sync"
1259 : /* no outputs */
1260 : "i" (ASI_ESTATE_ERROR_EN),
1261 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1262 : "g1");
1263
1264 /* Decide if we can continue after handling this trap and
1265 * logging the error.
1266 */
1267 recoverable = 1;
1268 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1269 recoverable = 0;
1270
1271 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1272 * error was logged while we had error reporting traps disabled.
1273 */
1274 if (cheetah_recheck_errors(&local_snapshot)) {
1275 unsigned long new_afsr = local_snapshot.afsr;
1276
1277 /* If we got a new asynchronous error, die... */
1278 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1279 CHAFSR_WDU | CHAFSR_CPU |
1280 CHAFSR_IVU | CHAFSR_UE |
1281 CHAFSR_BERR | CHAFSR_TO))
1282 recoverable = 0;
1283 }
1284
1285 /* Log errors. */
1286 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1287
1288 if (!recoverable)
1289 panic("Irrecoverable Fast-ECC error trap.\n");
1290
1291 /* Flush E-cache to kick the error trap handlers out. */
1292 cheetah_flush_ecache();
1293 }
1294
1295 /* Try to fix a correctable error by pushing the line out from
1296 * the E-cache. Recheck error reporting registers to see if the
1297 * problem is intermittent.
1298 */
1299 static int cheetah_fix_ce(unsigned long physaddr)
1300 {
1301 unsigned long orig_estate;
1302 unsigned long alias1, alias2;
1303 int ret;
1304
1305 /* Make sure correctable error traps are disabled. */
1306 __asm__ __volatile__("ldxa [%%g0] %2, %0\n\t"
1307 "andn %0, %1, %%g1\n\t"
1308 "stxa %%g1, [%%g0] %2\n\t"
1309 "membar #Sync"
1310 : "=&r" (orig_estate)
1311 : "i" (ESTATE_ERROR_CEEN),
1312 "i" (ASI_ESTATE_ERROR_EN)
1313 : "g1");
1314
1315 /* We calculate alias addresses that will force the
1316 * cache line in question out of the E-cache. Then
1317 * we bring it back in with an atomic instruction so
1318 * that we get it in some modified/exclusive state,
1319 * then we displace it again to try and get proper ECC
1320 * pushed back into the system.
1321 */
1322 physaddr &= ~(8UL - 1UL);
1323 alias1 = (ecache_flush_physbase +
1324 (physaddr & ((ecache_flush_size >> 1) - 1)));
1325 alias2 = alias1 + (ecache_flush_size >> 1);
1326 __asm__ __volatile__("ldxa [%0] %3, %%g0\n\t"
1327 "ldxa [%1] %3, %%g0\n\t"
1328 "casxa [%2] %3, %%g0, %%g0\n\t"
1329 "membar #StoreLoad | #StoreStore\n\t"
1330 "ldxa [%0] %3, %%g0\n\t"
1331 "ldxa [%1] %3, %%g0\n\t"
1332 "membar #Sync"
1333 : /* no outputs */
1334 : "r" (alias1), "r" (alias2),
1335 "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1336
1337 /* Did that trigger another error? */
1338 if (cheetah_recheck_errors(NULL)) {
1339 /* Try one more time. */
1340 __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1341 "membar #Sync"
1342 : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1343 if (cheetah_recheck_errors(NULL))
1344 ret = 2;
1345 else
1346 ret = 1;
1347 } else {
1348 /* No new error, intermittent problem. */
1349 ret = 0;
1350 }
1351
1352 /* Restore error enables. */
1353 __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1354 "membar #Sync"
1355 : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1356
1357 return ret;
1358 }
1359
1360 /* Return non-zero if PADDR is a valid physical memory address. */
1361 static int cheetah_check_main_memory(unsigned long paddr)
1362 {
1363 unsigned long vaddr = PAGE_OFFSET + paddr;
1364
1365 if (vaddr > (unsigned long) high_memory)
1366 return 0;
1367
1368 return kern_addr_valid(vaddr);
1369 }
1370
1371 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1372 {
1373 struct cheetah_err_info local_snapshot, *p;
1374 int recoverable, is_memory;
1375
1376 p = cheetah_get_error_log(afsr);
1377 if (!p) {
1378 prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1379 afsr, afar);
1380 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1381 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1382 prom_halt();
1383 }
1384
1385 /* Grab snapshot of logged error. */
1386 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1387
1388 /* If the current trap snapshot does not match what the
1389 * trap handler passed along into our args, big trouble.
1390 * In such a case, mark the local copy as invalid.
1391 *
1392 * Else, it matches and we mark the afsr in the non-local
1393 * copy as invalid so we may log new error traps there.
1394 */
1395 if (p->afsr != afsr || p->afar != afar)
1396 local_snapshot.afsr = CHAFSR_INVALID;
1397 else
1398 p->afsr = CHAFSR_INVALID;
1399
1400 is_memory = cheetah_check_main_memory(afar);
1401
1402 if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1403 /* XXX Might want to log the results of this operation
1404 * XXX somewhere... -DaveM
1405 */
1406 cheetah_fix_ce(afar);
1407 }
1408
1409 {
1410 int flush_all, flush_line;
1411
1412 flush_all = flush_line = 0;
1413 if ((afsr & CHAFSR_EDC) != 0UL) {
1414 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1415 flush_line = 1;
1416 else
1417 flush_all = 1;
1418 } else if ((afsr & CHAFSR_CPC) != 0UL) {
1419 if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1420 flush_line = 1;
1421 else
1422 flush_all = 1;
1423 }
1424
1425 /* Trap handler only disabled I-cache, flush it. */
1426 cheetah_flush_icache();
1427
1428 /* Re-enable I-cache */
1429 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1430 "or %%g1, %1, %%g1\n\t"
1431 "stxa %%g1, [%%g0] %0\n\t"
1432 "membar #Sync"
1433 : /* no outputs */
1434 : "i" (ASI_DCU_CONTROL_REG),
1435 "i" (DCU_IC)
1436 : "g1");
1437
1438 if (flush_all)
1439 cheetah_flush_ecache();
1440 else if (flush_line)
1441 cheetah_flush_ecache_line(afar);
1442 }
1443
1444 /* Re-enable error reporting */
1445 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1446 "or %%g1, %1, %%g1\n\t"
1447 "stxa %%g1, [%%g0] %0\n\t"
1448 "membar #Sync"
1449 : /* no outputs */
1450 : "i" (ASI_ESTATE_ERROR_EN),
1451 "i" (ESTATE_ERROR_CEEN)
1452 : "g1");
1453
1454 /* Decide if we can continue after handling this trap and
1455 * logging the error.
1456 */
1457 recoverable = 1;
1458 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1459 recoverable = 0;
1460
1461 /* Re-check AFSR/AFAR */
1462 (void) cheetah_recheck_errors(&local_snapshot);
1463
1464 /* Log errors. */
1465 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1466
1467 if (!recoverable)
1468 panic("Irrecoverable Correctable-ECC error trap.\n");
1469 }
1470
1471 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1472 {
1473 struct cheetah_err_info local_snapshot, *p;
1474 int recoverable, is_memory;
1475
1476 #ifdef CONFIG_PCI
1477 /* Check for the special PCI poke sequence. */
1478 if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1479 cheetah_flush_icache();
1480 cheetah_flush_dcache();
1481
1482 /* Re-enable I-cache/D-cache */
1483 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1484 "or %%g1, %1, %%g1\n\t"
1485 "stxa %%g1, [%%g0] %0\n\t"
1486 "membar #Sync"
1487 : /* no outputs */
1488 : "i" (ASI_DCU_CONTROL_REG),
1489 "i" (DCU_DC | DCU_IC)
1490 : "g1");
1491
1492 /* Re-enable error reporting */
1493 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1494 "or %%g1, %1, %%g1\n\t"
1495 "stxa %%g1, [%%g0] %0\n\t"
1496 "membar #Sync"
1497 : /* no outputs */
1498 : "i" (ASI_ESTATE_ERROR_EN),
1499 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1500 : "g1");
1501
1502 (void) cheetah_recheck_errors(NULL);
1503
1504 pci_poke_faulted = 1;
1505 regs->tpc += 4;
1506 regs->tnpc = regs->tpc + 4;
1507 return;
1508 }
1509 #endif
1510
1511 p = cheetah_get_error_log(afsr);
1512 if (!p) {
1513 prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1514 afsr, afar);
1515 prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1516 smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1517 prom_halt();
1518 }
1519
1520 /* Grab snapshot of logged error. */
1521 memcpy(&local_snapshot, p, sizeof(local_snapshot));
1522
1523 /* If the current trap snapshot does not match what the
1524 * trap handler passed along into our args, big trouble.
1525 * In such a case, mark the local copy as invalid.
1526 *
1527 * Else, it matches and we mark the afsr in the non-local
1528 * copy as invalid so we may log new error traps there.
1529 */
1530 if (p->afsr != afsr || p->afar != afar)
1531 local_snapshot.afsr = CHAFSR_INVALID;
1532 else
1533 p->afsr = CHAFSR_INVALID;
1534
1535 is_memory = cheetah_check_main_memory(afar);
1536
1537 {
1538 int flush_all, flush_line;
1539
1540 flush_all = flush_line = 0;
1541 if ((afsr & CHAFSR_EDU) != 0UL) {
1542 if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1543 flush_line = 1;
1544 else
1545 flush_all = 1;
1546 } else if ((afsr & CHAFSR_BERR) != 0UL) {
1547 if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1548 flush_line = 1;
1549 else
1550 flush_all = 1;
1551 }
1552
1553 cheetah_flush_icache();
1554 cheetah_flush_dcache();
1555
1556 /* Re-enable I/D caches */
1557 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1558 "or %%g1, %1, %%g1\n\t"
1559 "stxa %%g1, [%%g0] %0\n\t"
1560 "membar #Sync"
1561 : /* no outputs */
1562 : "i" (ASI_DCU_CONTROL_REG),
1563 "i" (DCU_IC | DCU_DC)
1564 : "g1");
1565
1566 if (flush_all)
1567 cheetah_flush_ecache();
1568 else if (flush_line)
1569 cheetah_flush_ecache_line(afar);
1570 }
1571
1572 /* Re-enable error reporting */
1573 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1574 "or %%g1, %1, %%g1\n\t"
1575 "stxa %%g1, [%%g0] %0\n\t"
1576 "membar #Sync"
1577 : /* no outputs */
1578 : "i" (ASI_ESTATE_ERROR_EN),
1579 "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1580 : "g1");
1581
1582 /* Decide if we can continue after handling this trap and
1583 * logging the error.
1584 */
1585 recoverable = 1;
1586 if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1587 recoverable = 0;
1588
1589 /* Re-check AFSR/AFAR. What we are looking for here is whether a new
1590 * error was logged while we had error reporting traps disabled.
1591 */
1592 if (cheetah_recheck_errors(&local_snapshot)) {
1593 unsigned long new_afsr = local_snapshot.afsr;
1594
1595 /* If we got a new asynchronous error, die... */
1596 if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1597 CHAFSR_WDU | CHAFSR_CPU |
1598 CHAFSR_IVU | CHAFSR_UE |
1599 CHAFSR_BERR | CHAFSR_TO))
1600 recoverable = 0;
1601 }
1602
1603 /* Log errors. */
1604 cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1605
1606 /* "Recoverable" here means we try to yank the page from ever
1607 * being newly used again. This depends upon a few things:
1608 * 1) Must be main memory, and AFAR must be valid.
1609 * 2) If we trapped from user, OK.
1610 * 3) Else, if we trapped from kernel we must find exception
1611 * table entry (ie. we have to have been accessing user
1612 * space).
1613 *
1614 * If AFAR is not in main memory, or we trapped from kernel
1615 * and cannot find an exception table entry, it is unacceptable
1616 * to try and continue.
1617 */
1618 if (recoverable && is_memory) {
1619 if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1620 /* OK, usermode access. */
1621 recoverable = 1;
1622 } else {
1623 const struct exception_table_entry *entry;
1624
1625 entry = search_exception_tables(regs->tpc);
1626 if (entry) {
1627 /* OK, kernel access to userspace. */
1628 recoverable = 1;
1629
1630 } else {
1631 /* BAD, privileged state is corrupted. */
1632 recoverable = 0;
1633 }
1634
1635 if (recoverable) {
1636 if (pfn_valid(afar >> PAGE_SHIFT))
1637 get_page(pfn_to_page(afar >> PAGE_SHIFT));
1638 else
1639 recoverable = 0;
1640
1641 /* Only perform fixup if we still have a
1642 * recoverable condition.
1643 */
1644 if (recoverable) {
1645 regs->tpc = entry->fixup;
1646 regs->tnpc = regs->tpc + 4;
1647 }
1648 }
1649 }
1650 } else {
1651 recoverable = 0;
1652 }
1653
1654 if (!recoverable)
1655 panic("Irrecoverable deferred error trap.\n");
1656 }
1657
1658 /* Handle a D/I cache parity error trap. TYPE is encoded as:
1659 *
1660 * Bit0: 0=dcache,1=icache
1661 * Bit1: 0=recoverable,1=unrecoverable
1662 *
1663 * The hardware has disabled both the I-cache and D-cache in
1664 * the %dcr register.
1665 */
1666 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1667 {
1668 if (type & 0x1)
1669 __cheetah_flush_icache();
1670 else
1671 cheetah_plus_zap_dcache_parity();
1672 cheetah_flush_dcache();
1673
1674 /* Re-enable I-cache/D-cache */
1675 __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1676 "or %%g1, %1, %%g1\n\t"
1677 "stxa %%g1, [%%g0] %0\n\t"
1678 "membar #Sync"
1679 : /* no outputs */
1680 : "i" (ASI_DCU_CONTROL_REG),
1681 "i" (DCU_DC | DCU_IC)
1682 : "g1");
1683
1684 if (type & 0x2) {
1685 printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1686 smp_processor_id(),
1687 (type & 0x1) ? 'I' : 'D',
1688 regs->tpc);
1689 printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1690 panic("Irrecoverable Cheetah+ parity error.");
1691 }
1692
1693 printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1694 smp_processor_id(),
1695 (type & 0x1) ? 'I' : 'D',
1696 regs->tpc);
1697 printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1698 }
1699
1700 struct sun4v_error_entry {
1701 u64 err_handle;
1702 u64 err_stick;
1703
1704 u32 err_type;
1705 #define SUN4V_ERR_TYPE_UNDEFINED 0
1706 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
1707 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2
1708 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
1709 #define SUN4V_ERR_TYPE_WARNING_RES 4
1710
1711 u32 err_attrs;
1712 #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
1713 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002
1714 #define SUN4V_ERR_ATTRS_PIO 0x00000004
1715 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
1716 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
1717 #define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
1718 #define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
1719 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
1720
1721 u64 err_raddr;
1722 u32 err_size;
1723 u16 err_cpu;
1724 u16 err_pad;
1725 };
1726
1727 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1728 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1729
1730 static const char *sun4v_err_type_to_str(u32 type)
1731 {
1732 switch (type) {
1733 case SUN4V_ERR_TYPE_UNDEFINED:
1734 return "undefined";
1735 case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1736 return "uncorrected resumable";
1737 case SUN4V_ERR_TYPE_PRECISE_NONRES:
1738 return "precise nonresumable";
1739 case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1740 return "deferred nonresumable";
1741 case SUN4V_ERR_TYPE_WARNING_RES:
1742 return "warning resumable";
1743 default:
1744 return "unknown";
1745 };
1746 }
1747
1748 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1749 {
1750 int cnt;
1751
1752 printk("%s: Reporting on cpu %d\n", pfx, cpu);
1753 printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
1754 pfx,
1755 ent->err_handle, ent->err_stick,
1756 ent->err_type,
1757 sun4v_err_type_to_str(ent->err_type));
1758 printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1759 pfx,
1760 ent->err_attrs,
1761 ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1762 "processor" : ""),
1763 ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1764 "memory" : ""),
1765 ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1766 "pio" : ""),
1767 ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1768 "integer-regs" : ""),
1769 ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1770 "fpu-regs" : ""),
1771 ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1772 "user" : ""),
1773 ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1774 "privileged" : ""),
1775 ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1776 "queue-full" : ""));
1777 printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
1778 pfx,
1779 ent->err_raddr, ent->err_size, ent->err_cpu);
1780
1781 show_regs(regs);
1782
1783 if ((cnt = atomic_read(ocnt)) != 0) {
1784 atomic_set(ocnt, 0);
1785 wmb();
1786 printk("%s: Queue overflowed %d times.\n",
1787 pfx, cnt);
1788 }
1789 }
1790
1791 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1792 * Log the event and clear the first word of the entry.
1793 */
1794 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1795 {
1796 struct sun4v_error_entry *ent, local_copy;
1797 struct trap_per_cpu *tb;
1798 unsigned long paddr;
1799 int cpu;
1800
1801 cpu = get_cpu();
1802
1803 tb = &trap_block[cpu];
1804 paddr = tb->resum_kernel_buf_pa + offset;
1805 ent = __va(paddr);
1806
1807 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1808
1809 /* We have a local copy now, so release the entry. */
1810 ent->err_handle = 0;
1811 wmb();
1812
1813 put_cpu();
1814
1815 if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1816 /* If err_type is 0x4, it's a powerdown request. Do
1817 * not do the usual resumable error log because that
1818 * makes it look like some abnormal error.
1819 */
1820 printk(KERN_INFO "Power down request...\n");
1821 kill_cad_pid(SIGINT, 1);
1822 return;
1823 }
1824
1825 sun4v_log_error(regs, &local_copy, cpu,
1826 KERN_ERR "RESUMABLE ERROR",
1827 &sun4v_resum_oflow_cnt);
1828 }
1829
1830 /* If we try to printk() we'll probably make matters worse, by trying
1831 * to retake locks this cpu already holds or causing more errors. So
1832 * just bump a counter, and we'll report these counter bumps above.
1833 */
1834 void sun4v_resum_overflow(struct pt_regs *regs)
1835 {
1836 atomic_inc(&sun4v_resum_oflow_cnt);
1837 }
1838
1839 /* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
1840 * Log the event, clear the first word of the entry, and die.
1841 */
1842 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1843 {
1844 struct sun4v_error_entry *ent, local_copy;
1845 struct trap_per_cpu *tb;
1846 unsigned long paddr;
1847 int cpu;
1848
1849 cpu = get_cpu();
1850
1851 tb = &trap_block[cpu];
1852 paddr = tb->nonresum_kernel_buf_pa + offset;
1853 ent = __va(paddr);
1854
1855 memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1856
1857 /* We have a local copy now, so release the entry. */
1858 ent->err_handle = 0;
1859 wmb();
1860
1861 put_cpu();
1862
1863 #ifdef CONFIG_PCI
1864 /* Check for the special PCI poke sequence. */
1865 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1866 pci_poke_faulted = 1;
1867 regs->tpc += 4;
1868 regs->tnpc = regs->tpc + 4;
1869 return;
1870 }
1871 #endif
1872
1873 sun4v_log_error(regs, &local_copy, cpu,
1874 KERN_EMERG "NON-RESUMABLE ERROR",
1875 &sun4v_nonresum_oflow_cnt);
1876
1877 panic("Non-resumable error.");
1878 }
1879
1880 /* If we try to printk() we'll probably make matters worse, by trying
1881 * to retake locks this cpu already holds or causing more errors. So
1882 * just bump a counter, and we'll report these counter bumps above.
1883 */
1884 void sun4v_nonresum_overflow(struct pt_regs *regs)
1885 {
1886 /* XXX Actually even this can make not that much sense. Perhaps
1887 * XXX we should just pull the plug and panic directly from here?
1888 */
1889 atomic_inc(&sun4v_nonresum_oflow_cnt);
1890 }
1891
1892 unsigned long sun4v_err_itlb_vaddr;
1893 unsigned long sun4v_err_itlb_ctx;
1894 unsigned long sun4v_err_itlb_pte;
1895 unsigned long sun4v_err_itlb_error;
1896
1897 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1898 {
1899 if (tl > 1)
1900 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1901
1902 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1903 regs->tpc, tl);
1904 printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
1905 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1906 printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1907 (void *) regs->u_regs[UREG_I7]);
1908 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1909 "pte[%lx] error[%lx]\n",
1910 sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1911 sun4v_err_itlb_pte, sun4v_err_itlb_error);
1912
1913 prom_halt();
1914 }
1915
1916 unsigned long sun4v_err_dtlb_vaddr;
1917 unsigned long sun4v_err_dtlb_ctx;
1918 unsigned long sun4v_err_dtlb_pte;
1919 unsigned long sun4v_err_dtlb_error;
1920
1921 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1922 {
1923 if (tl > 1)
1924 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1925
1926 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1927 regs->tpc, tl);
1928 printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
1929 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1930 printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1931 (void *) regs->u_regs[UREG_I7]);
1932 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1933 "pte[%lx] error[%lx]\n",
1934 sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1935 sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1936
1937 prom_halt();
1938 }
1939
1940 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
1941 {
1942 printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
1943 err, op);
1944 }
1945
1946 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
1947 {
1948 printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
1949 err, op);
1950 }
1951
1952 void do_fpe_common(struct pt_regs *regs)
1953 {
1954 if (regs->tstate & TSTATE_PRIV) {
1955 regs->tpc = regs->tnpc;
1956 regs->tnpc += 4;
1957 } else {
1958 unsigned long fsr = current_thread_info()->xfsr[0];
1959 siginfo_t info;
1960
1961 if (test_thread_flag(TIF_32BIT)) {
1962 regs->tpc &= 0xffffffff;
1963 regs->tnpc &= 0xffffffff;
1964 }
1965 info.si_signo = SIGFPE;
1966 info.si_errno = 0;
1967 info.si_addr = (void __user *)regs->tpc;
1968 info.si_trapno = 0;
1969 info.si_code = __SI_FAULT;
1970 if ((fsr & 0x1c000) == (1 << 14)) {
1971 if (fsr & 0x10)
1972 info.si_code = FPE_FLTINV;
1973 else if (fsr & 0x08)
1974 info.si_code = FPE_FLTOVF;
1975 else if (fsr & 0x04)
1976 info.si_code = FPE_FLTUND;
1977 else if (fsr & 0x02)
1978 info.si_code = FPE_FLTDIV;
1979 else if (fsr & 0x01)
1980 info.si_code = FPE_FLTRES;
1981 }
1982 force_sig_info(SIGFPE, &info, current);
1983 }
1984 }
1985
1986 void do_fpieee(struct pt_regs *regs)
1987 {
1988 if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
1989 0, 0x24, SIGFPE) == NOTIFY_STOP)
1990 return;
1991
1992 do_fpe_common(regs);
1993 }
1994
1995 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1996
1997 void do_fpother(struct pt_regs *regs)
1998 {
1999 struct fpustate *f = FPUSTATE;
2000 int ret = 0;
2001
2002 if (notify_die(DIE_TRAP, "fpu exception other", regs,
2003 0, 0x25, SIGFPE) == NOTIFY_STOP)
2004 return;
2005
2006 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2007 case (2 << 14): /* unfinished_FPop */
2008 case (3 << 14): /* unimplemented_FPop */
2009 ret = do_mathemu(regs, f);
2010 break;
2011 }
2012 if (ret)
2013 return;
2014 do_fpe_common(regs);
2015 }
2016
2017 void do_tof(struct pt_regs *regs)
2018 {
2019 siginfo_t info;
2020
2021 if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2022 0, 0x26, SIGEMT) == NOTIFY_STOP)
2023 return;
2024
2025 if (regs->tstate & TSTATE_PRIV)
2026 die_if_kernel("Penguin overflow trap from kernel mode", regs);
2027 if (test_thread_flag(TIF_32BIT)) {
2028 regs->tpc &= 0xffffffff;
2029 regs->tnpc &= 0xffffffff;
2030 }
2031 info.si_signo = SIGEMT;
2032 info.si_errno = 0;
2033 info.si_code = EMT_TAGOVF;
2034 info.si_addr = (void __user *)regs->tpc;
2035 info.si_trapno = 0;
2036 force_sig_info(SIGEMT, &info, current);
2037 }
2038
2039 void do_div0(struct pt_regs *regs)
2040 {
2041 siginfo_t info;
2042
2043 if (notify_die(DIE_TRAP, "integer division by zero", regs,
2044 0, 0x28, SIGFPE) == NOTIFY_STOP)
2045 return;
2046
2047 if (regs->tstate & TSTATE_PRIV)
2048 die_if_kernel("TL0: Kernel divide by zero.", regs);
2049 if (test_thread_flag(TIF_32BIT)) {
2050 regs->tpc &= 0xffffffff;
2051 regs->tnpc &= 0xffffffff;
2052 }
2053 info.si_signo = SIGFPE;
2054 info.si_errno = 0;
2055 info.si_code = FPE_INTDIV;
2056 info.si_addr = (void __user *)regs->tpc;
2057 info.si_trapno = 0;
2058 force_sig_info(SIGFPE, &info, current);
2059 }
2060
2061 static void instruction_dump(unsigned int *pc)
2062 {
2063 int i;
2064
2065 if ((((unsigned long) pc) & 3))
2066 return;
2067
2068 printk("Instruction DUMP:");
2069 for (i = -3; i < 6; i++)
2070 printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2071 printk("\n");
2072 }
2073
2074 static void user_instruction_dump(unsigned int __user *pc)
2075 {
2076 int i;
2077 unsigned int buf[9];
2078
2079 if ((((unsigned long) pc) & 3))
2080 return;
2081
2082 if (copy_from_user(buf, pc - 3, sizeof(buf)))
2083 return;
2084
2085 printk("Instruction DUMP:");
2086 for (i = 0; i < 9; i++)
2087 printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2088 printk("\n");
2089 }
2090
2091 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2092 {
2093 unsigned long fp, thread_base, ksp;
2094 struct thread_info *tp;
2095 int count = 0;
2096
2097 ksp = (unsigned long) _ksp;
2098 if (!tsk)
2099 tsk = current;
2100 tp = task_thread_info(tsk);
2101 if (ksp == 0UL) {
2102 if (tsk == current)
2103 asm("mov %%fp, %0" : "=r" (ksp));
2104 else
2105 ksp = tp->ksp;
2106 }
2107 if (tp == current_thread_info())
2108 flushw_all();
2109
2110 fp = ksp + STACK_BIAS;
2111 thread_base = (unsigned long) tp;
2112
2113 printk("Call Trace:\n");
2114 do {
2115 struct sparc_stackf *sf;
2116 struct pt_regs *regs;
2117 unsigned long pc;
2118
2119 if (!kstack_valid(tp, fp))
2120 break;
2121 sf = (struct sparc_stackf *) fp;
2122 regs = (struct pt_regs *) (sf + 1);
2123
2124 if (kstack_is_trap_frame(tp, regs)) {
2125 if (!(regs->tstate & TSTATE_PRIV))
2126 break;
2127 pc = regs->tpc;
2128 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2129 } else {
2130 pc = sf->callers_pc;
2131 fp = (unsigned long)sf->fp + STACK_BIAS;
2132 }
2133
2134 printk(" [%016lx] %pS\n", pc, (void *) pc);
2135 } while (++count < 16);
2136 }
2137
2138 void dump_stack(void)
2139 {
2140 show_stack(current, NULL);
2141 }
2142
2143 EXPORT_SYMBOL(dump_stack);
2144
2145 static inline int is_kernel_stack(struct task_struct *task,
2146 struct reg_window *rw)
2147 {
2148 unsigned long rw_addr = (unsigned long) rw;
2149 unsigned long thread_base, thread_end;
2150
2151 if (rw_addr < PAGE_OFFSET) {
2152 if (task != &init_task)
2153 return 0;
2154 }
2155
2156 thread_base = (unsigned long) task_stack_page(task);
2157 thread_end = thread_base + sizeof(union thread_union);
2158 if (rw_addr >= thread_base &&
2159 rw_addr < thread_end &&
2160 !(rw_addr & 0x7UL))
2161 return 1;
2162
2163 return 0;
2164 }
2165
2166 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2167 {
2168 unsigned long fp = rw->ins[6];
2169
2170 if (!fp)
2171 return NULL;
2172
2173 return (struct reg_window *) (fp + STACK_BIAS);
2174 }
2175
2176 void die_if_kernel(char *str, struct pt_regs *regs)
2177 {
2178 static int die_counter;
2179 int count = 0;
2180
2181 /* Amuse the user. */
2182 printk(
2183 " \\|/ ____ \\|/\n"
2184 " \"@'/ .. \\`@\"\n"
2185 " /_| \\__/ |_\\\n"
2186 " \\__U_/\n");
2187
2188 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2189 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2190 __asm__ __volatile__("flushw");
2191 show_regs(regs);
2192 add_taint(TAINT_DIE);
2193 if (regs->tstate & TSTATE_PRIV) {
2194 struct reg_window *rw = (struct reg_window *)
2195 (regs->u_regs[UREG_FP] + STACK_BIAS);
2196
2197 /* Stop the back trace when we hit userland or we
2198 * find some badly aligned kernel stack.
2199 */
2200 while (rw &&
2201 count++ < 30&&
2202 is_kernel_stack(current, rw)) {
2203 printk("Caller[%016lx]: %pS\n", rw->ins[7],
2204 (void *) rw->ins[7]);
2205
2206 rw = kernel_stack_up(rw);
2207 }
2208 instruction_dump ((unsigned int *) regs->tpc);
2209 } else {
2210 if (test_thread_flag(TIF_32BIT)) {
2211 regs->tpc &= 0xffffffff;
2212 regs->tnpc &= 0xffffffff;
2213 }
2214 user_instruction_dump ((unsigned int __user *) regs->tpc);
2215 }
2216 if (regs->tstate & TSTATE_PRIV)
2217 do_exit(SIGKILL);
2218 do_exit(SIGSEGV);
2219 }
2220
2221 #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2222 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
2223
2224 extern int handle_popc(u32 insn, struct pt_regs *regs);
2225 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2226 extern int vis_emul(struct pt_regs *, unsigned int);
2227
2228 void do_illegal_instruction(struct pt_regs *regs)
2229 {
2230 unsigned long pc = regs->tpc;
2231 unsigned long tstate = regs->tstate;
2232 u32 insn;
2233 siginfo_t info;
2234
2235 if (notify_die(DIE_TRAP, "illegal instruction", regs,
2236 0, 0x10, SIGILL) == NOTIFY_STOP)
2237 return;
2238
2239 if (tstate & TSTATE_PRIV)
2240 die_if_kernel("Kernel illegal instruction", regs);
2241 if (test_thread_flag(TIF_32BIT))
2242 pc = (u32)pc;
2243 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2244 if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2245 if (handle_popc(insn, regs))
2246 return;
2247 } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2248 if (handle_ldf_stq(insn, regs))
2249 return;
2250 } else if (tlb_type == hypervisor) {
2251 if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2252 if (!vis_emul(regs, insn))
2253 return;
2254 } else {
2255 struct fpustate *f = FPUSTATE;
2256
2257 /* XXX maybe verify XFSR bits like
2258 * XXX do_fpother() does?
2259 */
2260 if (do_mathemu(regs, f))
2261 return;
2262 }
2263 }
2264 }
2265 info.si_signo = SIGILL;
2266 info.si_errno = 0;
2267 info.si_code = ILL_ILLOPC;
2268 info.si_addr = (void __user *)pc;
2269 info.si_trapno = 0;
2270 force_sig_info(SIGILL, &info, current);
2271 }
2272
2273 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2274
2275 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2276 {
2277 siginfo_t info;
2278
2279 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2280 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2281 return;
2282
2283 if (regs->tstate & TSTATE_PRIV) {
2284 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2285 return;
2286 }
2287 info.si_signo = SIGBUS;
2288 info.si_errno = 0;
2289 info.si_code = BUS_ADRALN;
2290 info.si_addr = (void __user *)sfar;
2291 info.si_trapno = 0;
2292 force_sig_info(SIGBUS, &info, current);
2293 }
2294
2295 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2296 {
2297 siginfo_t info;
2298
2299 if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2300 0, 0x34, SIGSEGV) == NOTIFY_STOP)
2301 return;
2302
2303 if (regs->tstate & TSTATE_PRIV) {
2304 kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2305 return;
2306 }
2307 info.si_signo = SIGBUS;
2308 info.si_errno = 0;
2309 info.si_code = BUS_ADRALN;
2310 info.si_addr = (void __user *) addr;
2311 info.si_trapno = 0;
2312 force_sig_info(SIGBUS, &info, current);
2313 }
2314
2315 void do_privop(struct pt_regs *regs)
2316 {
2317 siginfo_t info;
2318
2319 if (notify_die(DIE_TRAP, "privileged operation", regs,
2320 0, 0x11, SIGILL) == NOTIFY_STOP)
2321 return;
2322
2323 if (test_thread_flag(TIF_32BIT)) {
2324 regs->tpc &= 0xffffffff;
2325 regs->tnpc &= 0xffffffff;
2326 }
2327 info.si_signo = SIGILL;
2328 info.si_errno = 0;
2329 info.si_code = ILL_PRVOPC;
2330 info.si_addr = (void __user *)regs->tpc;
2331 info.si_trapno = 0;
2332 force_sig_info(SIGILL, &info, current);
2333 }
2334
2335 void do_privact(struct pt_regs *regs)
2336 {
2337 do_privop(regs);
2338 }
2339
2340 /* Trap level 1 stuff or other traps we should never see... */
2341 void do_cee(struct pt_regs *regs)
2342 {
2343 die_if_kernel("TL0: Cache Error Exception", regs);
2344 }
2345
2346 void do_cee_tl1(struct pt_regs *regs)
2347 {
2348 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2349 die_if_kernel("TL1: Cache Error Exception", regs);
2350 }
2351
2352 void do_dae_tl1(struct pt_regs *regs)
2353 {
2354 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2355 die_if_kernel("TL1: Data Access Exception", regs);
2356 }
2357
2358 void do_iae_tl1(struct pt_regs *regs)
2359 {
2360 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2361 die_if_kernel("TL1: Instruction Access Exception", regs);
2362 }
2363
2364 void do_div0_tl1(struct pt_regs *regs)
2365 {
2366 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2367 die_if_kernel("TL1: DIV0 Exception", regs);
2368 }
2369
2370 void do_fpdis_tl1(struct pt_regs *regs)
2371 {
2372 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2373 die_if_kernel("TL1: FPU Disabled", regs);
2374 }
2375
2376 void do_fpieee_tl1(struct pt_regs *regs)
2377 {
2378 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2379 die_if_kernel("TL1: FPU IEEE Exception", regs);
2380 }
2381
2382 void do_fpother_tl1(struct pt_regs *regs)
2383 {
2384 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2385 die_if_kernel("TL1: FPU Other Exception", regs);
2386 }
2387
2388 void do_ill_tl1(struct pt_regs *regs)
2389 {
2390 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2391 die_if_kernel("TL1: Illegal Instruction Exception", regs);
2392 }
2393
2394 void do_irq_tl1(struct pt_regs *regs)
2395 {
2396 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2397 die_if_kernel("TL1: IRQ Exception", regs);
2398 }
2399
2400 void do_lddfmna_tl1(struct pt_regs *regs)
2401 {
2402 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2403 die_if_kernel("TL1: LDDF Exception", regs);
2404 }
2405
2406 void do_stdfmna_tl1(struct pt_regs *regs)
2407 {
2408 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2409 die_if_kernel("TL1: STDF Exception", regs);
2410 }
2411
2412 void do_paw(struct pt_regs *regs)
2413 {
2414 die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2415 }
2416
2417 void do_paw_tl1(struct pt_regs *regs)
2418 {
2419 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2420 die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2421 }
2422
2423 void do_vaw(struct pt_regs *regs)
2424 {
2425 die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2426 }
2427
2428 void do_vaw_tl1(struct pt_regs *regs)
2429 {
2430 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2431 die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2432 }
2433
2434 void do_tof_tl1(struct pt_regs *regs)
2435 {
2436 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2437 die_if_kernel("TL1: Tag Overflow Exception", regs);
2438 }
2439
2440 void do_getpsr(struct pt_regs *regs)
2441 {
2442 regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2443 regs->tpc = regs->tnpc;
2444 regs->tnpc += 4;
2445 if (test_thread_flag(TIF_32BIT)) {
2446 regs->tpc &= 0xffffffff;
2447 regs->tnpc &= 0xffffffff;
2448 }
2449 }
2450
2451 struct trap_per_cpu trap_block[NR_CPUS];
2452
2453 /* This can get invoked before sched_init() so play it super safe
2454 * and use hard_smp_processor_id().
2455 */
2456 void init_cur_cpu_trap(struct thread_info *t)
2457 {
2458 int cpu = hard_smp_processor_id();
2459 struct trap_per_cpu *p = &trap_block[cpu];
2460
2461 p->thread = t;
2462 p->pgd_paddr = 0;
2463 }
2464
2465 extern void thread_info_offsets_are_bolixed_dave(void);
2466 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2467 extern void tsb_config_offsets_are_bolixed_dave(void);
2468
2469 /* Only invoked on boot processor. */
2470 void __init trap_init(void)
2471 {
2472 /* Compile time sanity check. */
2473 if (TI_TASK != offsetof(struct thread_info, task) ||
2474 TI_FLAGS != offsetof(struct thread_info, flags) ||
2475 TI_CPU != offsetof(struct thread_info, cpu) ||
2476 TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2477 TI_KSP != offsetof(struct thread_info, ksp) ||
2478 TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2479 TI_KREGS != offsetof(struct thread_info, kregs) ||
2480 TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2481 TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2482 TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2483 TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2484 TI_GSR != offsetof(struct thread_info, gsr) ||
2485 TI_XFSR != offsetof(struct thread_info, xfsr) ||
2486 TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2487 TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2488 TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2489 TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2490 TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2491 TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2492 TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2493 TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2494 TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2495 TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2496 TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2497 TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2498 (TI_FPREGS & (64 - 1)))
2499 thread_info_offsets_are_bolixed_dave();
2500
2501 if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2502 (TRAP_PER_CPU_PGD_PADDR !=
2503 offsetof(struct trap_per_cpu, pgd_paddr)) ||
2504 (TRAP_PER_CPU_CPU_MONDO_PA !=
2505 offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2506 (TRAP_PER_CPU_DEV_MONDO_PA !=
2507 offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2508 (TRAP_PER_CPU_RESUM_MONDO_PA !=
2509 offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2510 (TRAP_PER_CPU_RESUM_KBUF_PA !=
2511 offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2512 (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2513 offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2514 (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2515 offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2516 (TRAP_PER_CPU_FAULT_INFO !=
2517 offsetof(struct trap_per_cpu, fault_info)) ||
2518 (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2519 offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2520 (TRAP_PER_CPU_CPU_LIST_PA !=
2521 offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2522 (TRAP_PER_CPU_TSB_HUGE !=
2523 offsetof(struct trap_per_cpu, tsb_huge)) ||
2524 (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2525 offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2526 (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2527 offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2528 (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2529 offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2530 (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2531 offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2532 (TRAP_PER_CPU_RESUM_QMASK !=
2533 offsetof(struct trap_per_cpu, resum_qmask)) ||
2534 (TRAP_PER_CPU_NONRESUM_QMASK !=
2535 offsetof(struct trap_per_cpu, nonresum_qmask)))
2536 trap_per_cpu_offsets_are_bolixed_dave();
2537
2538 if ((TSB_CONFIG_TSB !=
2539 offsetof(struct tsb_config, tsb)) ||
2540 (TSB_CONFIG_RSS_LIMIT !=
2541 offsetof(struct tsb_config, tsb_rss_limit)) ||
2542 (TSB_CONFIG_NENTRIES !=
2543 offsetof(struct tsb_config, tsb_nentries)) ||
2544 (TSB_CONFIG_REG_VAL !=
2545 offsetof(struct tsb_config, tsb_reg_val)) ||
2546 (TSB_CONFIG_MAP_VADDR !=
2547 offsetof(struct tsb_config, tsb_map_vaddr)) ||
2548 (TSB_CONFIG_MAP_PTE !=
2549 offsetof(struct tsb_config, tsb_map_pte)))
2550 tsb_config_offsets_are_bolixed_dave();
2551
2552 /* Attach to the address space of init_task. On SMP we
2553 * do this in smp.c:smp_callin for other cpus.
2554 */
2555 atomic_inc(&init_mm.mm_count);
2556 current->active_mm = &init_mm;
2557 }
This page took 0.169104 seconds and 6 git commands to generate.