pid namespaces: define is_global_init() and is_container_init()
[deliverable/linux.git] / arch / ppc / kernel / traps.c
1 /*
2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Modified by Cort Dougan (cort@cs.nmt.edu)
10 * and Paul Mackerras (paulus@cs.anu.edu.au)
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of hardware exceptions
15 */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/prctl.h>
31 #include <linux/bug.h>
32
33 #include <asm/pgtable.h>
34 #include <asm/uaccess.h>
35 #include <asm/system.h>
36 #include <asm/io.h>
37 #include <asm/reg.h>
38 #include <asm/xmon.h>
39 #include <asm/pmc.h>
40
41 #ifdef CONFIG_XMON
42 extern int xmon_bpt(struct pt_regs *regs);
43 extern int xmon_sstep(struct pt_regs *regs);
44 extern int xmon_iabr_match(struct pt_regs *regs);
45 extern int xmon_dabr_match(struct pt_regs *regs);
46
47 int (*debugger)(struct pt_regs *regs) = xmon;
48 int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
49 int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
50 int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
51 int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
52 void (*debugger_fault_handler)(struct pt_regs *regs);
53 #else
54 #ifdef CONFIG_KGDB
55 int (*debugger)(struct pt_regs *regs);
56 int (*debugger_bpt)(struct pt_regs *regs);
57 int (*debugger_sstep)(struct pt_regs *regs);
58 int (*debugger_iabr_match)(struct pt_regs *regs);
59 int (*debugger_dabr_match)(struct pt_regs *regs);
60 void (*debugger_fault_handler)(struct pt_regs *regs);
61 #else
62 #define debugger(regs) do { } while (0)
63 #define debugger_bpt(regs) 0
64 #define debugger_sstep(regs) 0
65 #define debugger_iabr_match(regs) 0
66 #define debugger_dabr_match(regs) 0
67 #define debugger_fault_handler ((void (*)(struct pt_regs *))0)
68 #endif
69 #endif
70
71 /*
72 * Trap & Exception support
73 */
74
75 DEFINE_SPINLOCK(die_lock);
76
77 int die(const char * str, struct pt_regs * fp, long err)
78 {
79 static int die_counter;
80 int nl = 0;
81 console_verbose();
82 spin_lock_irq(&die_lock);
83 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
84 #ifdef CONFIG_PREEMPT
85 printk("PREEMPT ");
86 nl = 1;
87 #endif
88 #ifdef CONFIG_SMP
89 printk("SMP NR_CPUS=%d ", NR_CPUS);
90 nl = 1;
91 #endif
92 if (nl)
93 printk("\n");
94 show_regs(fp);
95 add_taint(TAINT_DIE);
96 spin_unlock_irq(&die_lock);
97 /* do_exit() should take care of panic'ing from an interrupt
98 * context so we don't handle it here
99 */
100 do_exit(err);
101 }
102
103 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
104 {
105 siginfo_t info;
106
107 if (!user_mode(regs)) {
108 debugger(regs);
109 die("Exception in kernel mode", regs, signr);
110 }
111 info.si_signo = signr;
112 info.si_errno = 0;
113 info.si_code = code;
114 info.si_addr = (void __user *) addr;
115 force_sig_info(signr, &info, current);
116
117 /*
118 * Init gets no signals that it doesn't have a handler for.
119 * That's all very well, but if it has caused a synchronous
120 * exception and we ignore the resulting signal, it will just
121 * generate the same exception over and over again and we get
122 * nowhere. Better to kill it and let the kernel panic.
123 */
124 if (is_global_init(current)) {
125 __sighandler_t handler;
126
127 spin_lock_irq(&current->sighand->siglock);
128 handler = current->sighand->action[signr-1].sa.sa_handler;
129 spin_unlock_irq(&current->sighand->siglock);
130 if (handler == SIG_DFL) {
131 /* init has generated a synchronous exception
132 and it doesn't have a handler for the signal */
133 printk(KERN_CRIT "init has generated signal %d "
134 "but has no handler for it\n", signr);
135 do_exit(signr);
136 }
137 }
138 }
139
140 /*
141 * I/O accesses can cause machine checks on powermacs.
142 * Check if the NIP corresponds to the address of a sync
143 * instruction for which there is an entry in the exception
144 * table.
145 * Note that the 601 only takes a machine check on TEA
146 * (transfer error ack) signal assertion, and does not
147 * set any of the top 16 bits of SRR1.
148 * -- paulus.
149 */
150 static inline int check_io_access(struct pt_regs *regs)
151 {
152 #if defined CONFIG_8xx
153 unsigned long msr = regs->msr;
154 const struct exception_table_entry *entry;
155 unsigned int *nip = (unsigned int *)regs->nip;
156
157 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
158 && (entry = search_exception_tables(regs->nip)) != NULL) {
159 /*
160 * Check that it's a sync instruction, or somewhere
161 * in the twi; isync; nop sequence that inb/inw/inl uses.
162 * As the address is in the exception table
163 * we should be able to read the instr there.
164 * For the debug message, we look at the preceding
165 * load or store.
166 */
167 if (*nip == 0x60000000) /* nop */
168 nip -= 2;
169 else if (*nip == 0x4c00012c) /* isync */
170 --nip;
171 /* eieio from I/O string functions */
172 else if ((*nip) == 0x7c0006ac || *(nip+1) == 0x7c0006ac)
173 nip += 2;
174 if (*nip == 0x7c0004ac || (*nip >> 26) == 3 ||
175 (*(nip+1) >> 26) == 3) {
176 /* sync or twi */
177 unsigned int rb;
178
179 --nip;
180 rb = (*nip >> 11) & 0x1f;
181 printk(KERN_DEBUG "%s bad port %lx at %p\n",
182 (*nip & 0x100)? "OUT to": "IN from",
183 regs->gpr[rb] - _IO_BASE, nip);
184 regs->msr |= MSR_RI;
185 regs->nip = entry->fixup;
186 return 1;
187 }
188 }
189 #endif /* CONFIG_8xx */
190 return 0;
191 }
192
193 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
194 /* On 4xx, the reason for the machine check or program exception
195 is in the ESR. */
196 #define get_reason(regs) ((regs)->dsisr)
197 #ifndef CONFIG_FSL_BOOKE
198 #define get_mc_reason(regs) ((regs)->dsisr)
199 #else
200 #define get_mc_reason(regs) (mfspr(SPRN_MCSR))
201 #endif
202 #define REASON_FP ESR_FP
203 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
204 #define REASON_PRIVILEGED ESR_PPR
205 #define REASON_TRAP ESR_PTR
206
207 /* single-step stuff */
208 #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
209 #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
210
211 #else
212 /* On non-4xx, the reason for the machine check or program
213 exception is in the MSR. */
214 #define get_reason(regs) ((regs)->msr)
215 #define get_mc_reason(regs) ((regs)->msr)
216 #define REASON_FP 0x100000
217 #define REASON_ILLEGAL 0x80000
218 #define REASON_PRIVILEGED 0x40000
219 #define REASON_TRAP 0x20000
220
221 #define single_stepping(regs) ((regs)->msr & MSR_SE)
222 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
223 #endif
224
225 /*
226 * This is "fall-back" implementation for configurations
227 * which don't provide platform-specific machine check info
228 */
229 void __attribute__ ((weak))
230 platform_machine_check(struct pt_regs *regs)
231 {
232 }
233
234 void machine_check_exception(struct pt_regs *regs)
235 {
236 unsigned long reason = get_mc_reason(regs);
237
238 if (user_mode(regs)) {
239 regs->msr |= MSR_RI;
240 _exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
241 return;
242 }
243
244 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
245 /* the qspan pci read routines can cause machine checks -- Cort */
246 bad_page_fault(regs, regs->dar, SIGBUS);
247 return;
248 #endif
249
250 if (debugger_fault_handler) {
251 debugger_fault_handler(regs);
252 regs->msr |= MSR_RI;
253 return;
254 }
255
256 if (check_io_access(regs))
257 return;
258
259 #if defined(CONFIG_4xx) && !defined(CONFIG_440A)
260 if (reason & ESR_IMCP) {
261 printk("Instruction");
262 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
263 } else
264 printk("Data");
265 printk(" machine check in kernel mode.\n");
266 #elif defined(CONFIG_440A)
267 printk("Machine check in kernel mode.\n");
268 if (reason & ESR_IMCP){
269 printk("Instruction Synchronous Machine Check exception\n");
270 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
271 }
272 else {
273 u32 mcsr = mfspr(SPRN_MCSR);
274 if (mcsr & MCSR_IB)
275 printk("Instruction Read PLB Error\n");
276 if (mcsr & MCSR_DRB)
277 printk("Data Read PLB Error\n");
278 if (mcsr & MCSR_DWB)
279 printk("Data Write PLB Error\n");
280 if (mcsr & MCSR_TLBP)
281 printk("TLB Parity Error\n");
282 if (mcsr & MCSR_ICP){
283 flush_instruction_cache();
284 printk("I-Cache Parity Error\n");
285 }
286 if (mcsr & MCSR_DCSP)
287 printk("D-Cache Search Parity Error\n");
288 if (mcsr & MCSR_DCFP)
289 printk("D-Cache Flush Parity Error\n");
290 if (mcsr & MCSR_IMPE)
291 printk("Machine Check exception is imprecise\n");
292
293 /* Clear MCSR */
294 mtspr(SPRN_MCSR, mcsr);
295 }
296 #elif defined (CONFIG_E500)
297 printk("Machine check in kernel mode.\n");
298 printk("Caused by (from MCSR=%lx): ", reason);
299
300 if (reason & MCSR_MCP)
301 printk("Machine Check Signal\n");
302 if (reason & MCSR_ICPERR)
303 printk("Instruction Cache Parity Error\n");
304 if (reason & MCSR_DCP_PERR)
305 printk("Data Cache Push Parity Error\n");
306 if (reason & MCSR_DCPERR)
307 printk("Data Cache Parity Error\n");
308 if (reason & MCSR_GL_CI)
309 printk("Guarded Load or Cache-Inhibited stwcx.\n");
310 if (reason & MCSR_BUS_IAERR)
311 printk("Bus - Instruction Address Error\n");
312 if (reason & MCSR_BUS_RAERR)
313 printk("Bus - Read Address Error\n");
314 if (reason & MCSR_BUS_WAERR)
315 printk("Bus - Write Address Error\n");
316 if (reason & MCSR_BUS_IBERR)
317 printk("Bus - Instruction Data Error\n");
318 if (reason & MCSR_BUS_RBERR)
319 printk("Bus - Read Data Bus Error\n");
320 if (reason & MCSR_BUS_WBERR)
321 printk("Bus - Write Data Bus Error\n");
322 if (reason & MCSR_BUS_IPERR)
323 printk("Bus - Instruction Parity Error\n");
324 if (reason & MCSR_BUS_RPERR)
325 printk("Bus - Read Parity Error\n");
326 #elif defined (CONFIG_E200)
327 printk("Machine check in kernel mode.\n");
328 printk("Caused by (from MCSR=%lx): ", reason);
329
330 if (reason & MCSR_MCP)
331 printk("Machine Check Signal\n");
332 if (reason & MCSR_CP_PERR)
333 printk("Cache Push Parity Error\n");
334 if (reason & MCSR_CPERR)
335 printk("Cache Parity Error\n");
336 if (reason & MCSR_EXCP_ERR)
337 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
338 if (reason & MCSR_BUS_IRERR)
339 printk("Bus - Read Bus Error on instruction fetch\n");
340 if (reason & MCSR_BUS_DRERR)
341 printk("Bus - Read Bus Error on data load\n");
342 if (reason & MCSR_BUS_WRERR)
343 printk("Bus - Write Bus Error on buffered store or cache line push\n");
344 #else /* !CONFIG_4xx && !CONFIG_E500 && !CONFIG_E200 */
345 printk("Machine check in kernel mode.\n");
346 printk("Caused by (from SRR1=%lx): ", reason);
347 switch (reason & 0x601F0000) {
348 case 0x80000:
349 printk("Machine check signal\n");
350 break;
351 case 0: /* for 601 */
352 case 0x40000:
353 case 0x140000: /* 7450 MSS error and TEA */
354 printk("Transfer error ack signal\n");
355 break;
356 case 0x20000:
357 printk("Data parity error signal\n");
358 break;
359 case 0x10000:
360 printk("Address parity error signal\n");
361 break;
362 case 0x20000000:
363 printk("L1 Data Cache error\n");
364 break;
365 case 0x40000000:
366 printk("L1 Instruction Cache error\n");
367 break;
368 case 0x00100000:
369 printk("L2 data cache parity error\n");
370 break;
371 default:
372 printk("Unknown values in msr\n");
373 }
374 #endif /* CONFIG_4xx */
375
376 /*
377 * Optional platform-provided routine to print out
378 * additional info, e.g. bus error registers.
379 */
380 platform_machine_check(regs);
381
382 debugger(regs);
383 die("machine check", regs, SIGBUS);
384 }
385
386 void SMIException(struct pt_regs *regs)
387 {
388 debugger(regs);
389 #if !(defined(CONFIG_XMON) || defined(CONFIG_KGDB))
390 show_regs(regs);
391 panic("System Management Interrupt");
392 #endif
393 }
394
395 void unknown_exception(struct pt_regs *regs)
396 {
397 printk("Bad trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
398 regs->nip, regs->msr, regs->trap, print_tainted());
399 _exception(SIGTRAP, regs, 0, 0);
400 }
401
402 void instruction_breakpoint_exception(struct pt_regs *regs)
403 {
404 if (debugger_iabr_match(regs))
405 return;
406 _exception(SIGTRAP, regs, TRAP_BRKPT, 0);
407 }
408
409 void RunModeException(struct pt_regs *regs)
410 {
411 _exception(SIGTRAP, regs, 0, 0);
412 }
413
414 /* Illegal instruction emulation support. Originally written to
415 * provide the PVR to user applications using the mfspr rd, PVR.
416 * Return non-zero if we can't emulate, or -EFAULT if the associated
417 * memory access caused an access fault. Return zero on success.
418 *
419 * There are a couple of ways to do this, either "decode" the instruction
420 * or directly match lots of bits. In this case, matching lots of
421 * bits is faster and easier.
422 *
423 */
424 #define INST_MFSPR_PVR 0x7c1f42a6
425 #define INST_MFSPR_PVR_MASK 0xfc1fffff
426
427 #define INST_DCBA 0x7c0005ec
428 #define INST_DCBA_MASK 0x7c0007fe
429
430 #define INST_MCRXR 0x7c000400
431 #define INST_MCRXR_MASK 0x7c0007fe
432
433 #define INST_STRING 0x7c00042a
434 #define INST_STRING_MASK 0x7c0007fe
435 #define INST_STRING_GEN_MASK 0x7c00067e
436 #define INST_LSWI 0x7c0004aa
437 #define INST_LSWX 0x7c00042a
438 #define INST_STSWI 0x7c0005aa
439 #define INST_STSWX 0x7c00052a
440
441 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
442 {
443 u8 rT = (instword >> 21) & 0x1f;
444 u8 rA = (instword >> 16) & 0x1f;
445 u8 NB_RB = (instword >> 11) & 0x1f;
446 u32 num_bytes;
447 unsigned long EA;
448 int pos = 0;
449
450 /* Early out if we are an invalid form of lswx */
451 if ((instword & INST_STRING_MASK) == INST_LSWX)
452 if ((rT == rA) || (rT == NB_RB))
453 return -EINVAL;
454
455 EA = (rA == 0) ? 0 : regs->gpr[rA];
456
457 switch (instword & INST_STRING_MASK) {
458 case INST_LSWX:
459 case INST_STSWX:
460 EA += NB_RB;
461 num_bytes = regs->xer & 0x7f;
462 break;
463 case INST_LSWI:
464 case INST_STSWI:
465 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
466 break;
467 default:
468 return -EINVAL;
469 }
470
471 while (num_bytes != 0)
472 {
473 u8 val;
474 u32 shift = 8 * (3 - (pos & 0x3));
475
476 switch ((instword & INST_STRING_MASK)) {
477 case INST_LSWX:
478 case INST_LSWI:
479 if (get_user(val, (u8 __user *)EA))
480 return -EFAULT;
481 /* first time updating this reg,
482 * zero it out */
483 if (pos == 0)
484 regs->gpr[rT] = 0;
485 regs->gpr[rT] |= val << shift;
486 break;
487 case INST_STSWI:
488 case INST_STSWX:
489 val = regs->gpr[rT] >> shift;
490 if (put_user(val, (u8 __user *)EA))
491 return -EFAULT;
492 break;
493 }
494 /* move EA to next address */
495 EA += 1;
496 num_bytes--;
497
498 /* manage our position within the register */
499 if (++pos == 4) {
500 pos = 0;
501 if (++rT == 32)
502 rT = 0;
503 }
504 }
505
506 return 0;
507 }
508
509 static int emulate_instruction(struct pt_regs *regs)
510 {
511 u32 instword;
512 u32 rd;
513
514 if (!user_mode(regs))
515 return -EINVAL;
516 CHECK_FULL_REGS(regs);
517
518 if (get_user(instword, (u32 __user *)(regs->nip)))
519 return -EFAULT;
520
521 /* Emulate the mfspr rD, PVR.
522 */
523 if ((instword & INST_MFSPR_PVR_MASK) == INST_MFSPR_PVR) {
524 rd = (instword >> 21) & 0x1f;
525 regs->gpr[rd] = mfspr(SPRN_PVR);
526 return 0;
527 }
528
529 /* Emulating the dcba insn is just a no-op. */
530 if ((instword & INST_DCBA_MASK) == INST_DCBA)
531 return 0;
532
533 /* Emulate the mcrxr insn. */
534 if ((instword & INST_MCRXR_MASK) == INST_MCRXR) {
535 int shift = (instword >> 21) & 0x1c;
536 unsigned long msk = 0xf0000000UL >> shift;
537
538 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
539 regs->xer &= ~0xf0000000UL;
540 return 0;
541 }
542
543 /* Emulate load/store string insn. */
544 if ((instword & INST_STRING_GEN_MASK) == INST_STRING)
545 return emulate_string_inst(regs, instword);
546
547 return -EINVAL;
548 }
549
550 /*
551 * After we have successfully emulated an instruction, we have to
552 * check if the instruction was being single-stepped, and if so,
553 * pretend we got a single-step exception. This was pointed out
554 * by Kumar Gala. -- paulus
555 */
556 static void emulate_single_step(struct pt_regs *regs)
557 {
558 if (single_stepping(regs)) {
559 clear_single_step(regs);
560 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
561 }
562 }
563
564 int is_valid_bugaddr(unsigned long addr)
565 {
566 return addr >= PAGE_OFFSET;
567 }
568
569 void program_check_exception(struct pt_regs *regs)
570 {
571 unsigned int reason = get_reason(regs);
572 extern int do_mathemu(struct pt_regs *regs);
573
574 #ifdef CONFIG_MATH_EMULATION
575 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
576 * but there seems to be a hardware bug on the 405GP (RevD)
577 * that means ESR is sometimes set incorrectly - either to
578 * ESR_DST (!?) or 0. In the process of chasing this with the
579 * hardware people - not sure if it can happen on any illegal
580 * instruction or only on FP instructions, whether there is a
581 * pattern to occurrences etc. -dgibson 31/Mar/2003 */
582 if (!(reason & REASON_TRAP) && do_mathemu(regs) == 0) {
583 emulate_single_step(regs);
584 return;
585 }
586 #endif /* CONFIG_MATH_EMULATION */
587
588 if (reason & REASON_FP) {
589 /* IEEE FP exception */
590 int code = 0;
591 u32 fpscr;
592
593 /* We must make sure the FP state is consistent with
594 * our MSR_FP in regs
595 */
596 preempt_disable();
597 if (regs->msr & MSR_FP)
598 giveup_fpu(current);
599 preempt_enable();
600
601 fpscr = current->thread.fpscr.val;
602 fpscr &= fpscr << 22; /* mask summary bits with enables */
603 if (fpscr & FPSCR_VX)
604 code = FPE_FLTINV;
605 else if (fpscr & FPSCR_OX)
606 code = FPE_FLTOVF;
607 else if (fpscr & FPSCR_UX)
608 code = FPE_FLTUND;
609 else if (fpscr & FPSCR_ZX)
610 code = FPE_FLTDIV;
611 else if (fpscr & FPSCR_XX)
612 code = FPE_FLTRES;
613 _exception(SIGFPE, regs, code, regs->nip);
614 return;
615 }
616
617 if (reason & REASON_TRAP) {
618 /* trap exception */
619 if (debugger_bpt(regs))
620 return;
621
622 if (!(regs->msr & MSR_PR) && /* not user-mode */
623 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
624 regs->nip += 4;
625 return;
626 }
627 _exception(SIGTRAP, regs, TRAP_BRKPT, 0);
628 return;
629 }
630
631 /* Try to emulate it if we should. */
632 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
633 switch (emulate_instruction(regs)) {
634 case 0:
635 regs->nip += 4;
636 emulate_single_step(regs);
637 return;
638 case -EFAULT:
639 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
640 return;
641 }
642 }
643
644 if (reason & REASON_PRIVILEGED)
645 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
646 else
647 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
648 }
649
650 void single_step_exception(struct pt_regs *regs)
651 {
652 regs->msr &= ~(MSR_SE | MSR_BE); /* Turn off 'trace' bits */
653 if (debugger_sstep(regs))
654 return;
655 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
656 }
657
658 void alignment_exception(struct pt_regs *regs)
659 {
660 int sig, code, fixed = 0;
661
662 fixed = fix_alignment(regs);
663 if (fixed == 1) {
664 regs->nip += 4; /* skip over emulated instruction */
665 emulate_single_step(regs);
666 return;
667 }
668 if (fixed == -EFAULT) {
669 sig = SIGSEGV;
670 code = SEGV_ACCERR;
671 } else {
672 sig = SIGBUS;
673 code = BUS_ADRALN;
674 }
675 if (user_mode(regs))
676 _exception(sig, regs, code, regs->dar);
677 else
678 bad_page_fault(regs, regs->dar, sig);
679 }
680
681 void StackOverflow(struct pt_regs *regs)
682 {
683 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
684 current, regs->gpr[1]);
685 debugger(regs);
686 show_regs(regs);
687 panic("kernel stack overflow");
688 }
689
690 void nonrecoverable_exception(struct pt_regs *regs)
691 {
692 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
693 regs->nip, regs->msr);
694 debugger(regs);
695 die("nonrecoverable exception", regs, SIGKILL);
696 }
697
698 void trace_syscall(struct pt_regs *regs)
699 {
700 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
701 current, current->pid, regs->nip, regs->link, regs->gpr[0],
702 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
703 }
704
705 #ifdef CONFIG_8xx
706 void SoftwareEmulation(struct pt_regs *regs)
707 {
708 extern int do_mathemu(struct pt_regs *);
709 extern int Soft_emulate_8xx(struct pt_regs *);
710 int errcode;
711
712 CHECK_FULL_REGS(regs);
713
714 if (!user_mode(regs)) {
715 debugger(regs);
716 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
717 }
718
719 #ifdef CONFIG_MATH_EMULATION
720 errcode = do_mathemu(regs);
721 #else
722 errcode = Soft_emulate_8xx(regs);
723 #endif
724 if (errcode) {
725 if (errcode > 0)
726 _exception(SIGFPE, regs, 0, 0);
727 else if (errcode == -EFAULT)
728 _exception(SIGSEGV, regs, 0, 0);
729 else
730 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
731 } else
732 emulate_single_step(regs);
733 }
734 #endif /* CONFIG_8xx */
735
736 #if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
737
738 void DebugException(struct pt_regs *regs, unsigned long debug_status)
739 {
740 if (debug_status & DBSR_IC) { /* instruction completion */
741 regs->msr &= ~MSR_DE;
742 if (user_mode(regs)) {
743 current->thread.dbcr0 &= ~DBCR0_IC;
744 } else {
745 /* Disable instruction completion */
746 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
747 /* Clear the instruction completion event */
748 mtspr(SPRN_DBSR, DBSR_IC);
749 if (debugger_sstep(regs))
750 return;
751 }
752 _exception(SIGTRAP, regs, TRAP_TRACE, 0);
753 }
754 }
755 #endif /* CONFIG_4xx || CONFIG_BOOKE */
756
757 #if !defined(CONFIG_TAU_INT)
758 void TAUException(struct pt_regs *regs)
759 {
760 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
761 regs->nip, regs->msr, regs->trap, print_tainted());
762 }
763 #endif /* CONFIG_INT_TAU */
764
765 /*
766 * FP unavailable trap from kernel - print a message, but let
767 * the task use FP in the kernel until it returns to user mode.
768 */
769 void kernel_fp_unavailable_exception(struct pt_regs *regs)
770 {
771 regs->msr |= MSR_FP;
772 printk(KERN_ERR "floating point used in kernel (task=%p, pc=%lx)\n",
773 current, regs->nip);
774 }
775
776 void altivec_unavailable_exception(struct pt_regs *regs)
777 {
778 static int kernel_altivec_count;
779
780 #ifndef CONFIG_ALTIVEC
781 if (user_mode(regs)) {
782 /* A user program has executed an altivec instruction,
783 but this kernel doesn't support altivec. */
784 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
785 return;
786 }
787 #endif
788 /* The kernel has executed an altivec instruction without
789 first enabling altivec. Whinge but let it do it. */
790 if (++kernel_altivec_count < 10)
791 printk(KERN_ERR "AltiVec used in kernel (task=%p, pc=%lx)\n",
792 current, regs->nip);
793 regs->msr |= MSR_VEC;
794 }
795
796 #ifdef CONFIG_ALTIVEC
797 void altivec_assist_exception(struct pt_regs *regs)
798 {
799 int err;
800
801 preempt_disable();
802 if (regs->msr & MSR_VEC)
803 giveup_altivec(current);
804 preempt_enable();
805 if (!user_mode(regs)) {
806 printk(KERN_ERR "altivec assist exception in kernel mode"
807 " at %lx\n", regs->nip);
808 debugger(regs);
809 die("altivec assist exception", regs, SIGFPE);
810 return;
811 }
812
813 err = emulate_altivec(regs);
814 if (err == 0) {
815 regs->nip += 4; /* skip emulated instruction */
816 emulate_single_step(regs);
817 return;
818 }
819
820 if (err == -EFAULT) {
821 /* got an error reading the instruction */
822 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
823 } else {
824 /* didn't recognize the instruction */
825 /* XXX quick hack for now: set the non-Java bit in the VSCR */
826 printk(KERN_ERR "unrecognized altivec instruction "
827 "in %s at %lx\n", current->comm, regs->nip);
828 current->thread.vscr.u[3] |= 0x10000;
829 }
830 }
831 #endif /* CONFIG_ALTIVEC */
832
833 #ifdef CONFIG_E500
834 void performance_monitor_exception(struct pt_regs *regs)
835 {
836 perf_irq(regs);
837 }
838 #endif
839
840 #ifdef CONFIG_FSL_BOOKE
841 void CacheLockingException(struct pt_regs *regs, unsigned long address,
842 unsigned long error_code)
843 {
844 /* We treat cache locking instructions from the user
845 * as priv ops, in the future we could try to do
846 * something smarter
847 */
848 if (error_code & (ESR_DLK|ESR_ILK))
849 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
850 return;
851 }
852 #endif /* CONFIG_FSL_BOOKE */
853
854 #ifdef CONFIG_SPE
855 void SPEFloatingPointException(struct pt_regs *regs)
856 {
857 unsigned long spefscr;
858 int fpexc_mode;
859 int code = 0;
860
861 spefscr = current->thread.spefscr;
862 fpexc_mode = current->thread.fpexc_mode;
863
864 /* Hardware does not necessarily set sticky
865 * underflow/overflow/invalid flags */
866 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
867 code = FPE_FLTOVF;
868 spefscr |= SPEFSCR_FOVFS;
869 }
870 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
871 code = FPE_FLTUND;
872 spefscr |= SPEFSCR_FUNFS;
873 }
874 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
875 code = FPE_FLTDIV;
876 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
877 code = FPE_FLTINV;
878 spefscr |= SPEFSCR_FINVS;
879 }
880 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
881 code = FPE_FLTRES;
882
883 current->thread.spefscr = spefscr;
884
885 _exception(SIGFPE, regs, code, regs->nip);
886 return;
887 }
888 #endif
889
890 #ifdef CONFIG_BOOKE_WDT
891 /*
892 * Default handler for a Watchdog exception,
893 * spins until a reboot occurs
894 */
895 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
896 {
897 /* Generic WatchdogHandler, implement your own */
898 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
899 return;
900 }
901
902 void WatchdogException(struct pt_regs *regs)
903 {
904 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
905 WatchdogHandler(regs);
906 }
907 #endif
908
909 void __init trap_init(void)
910 {
911 }
This page took 0.067212 seconds and 5 git commands to generate.