2 * Architecture-specific trap handling.
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/tty.h>
15 #include <linux/vt_kern.h> /* For unblank_screen() */
16 #include <linux/module.h> /* for EXPORT_SYMBOL */
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
20 #include <asm/fpswa.h>
22 #include <asm/intrinsics.h>
23 #include <asm/processor.h>
24 #include <asm/uaccess.h>
25 #include <asm/kdebug.h>
27 extern spinlock_t timerlist_lock
;
29 fpswa_interface_t
*fpswa_interface
;
30 EXPORT_SYMBOL(fpswa_interface
);
32 struct notifier_block
*ia64die_chain
;
35 register_die_notifier(struct notifier_block
*nb
)
37 return notifier_chain_register(&ia64die_chain
, nb
);
39 EXPORT_SYMBOL_GPL(register_die_notifier
);
42 unregister_die_notifier(struct notifier_block
*nb
)
44 return notifier_chain_unregister(&ia64die_chain
, nb
);
46 EXPORT_SYMBOL_GPL(unregister_die_notifier
);
51 if (ia64_boot_param
->fpswa
)
52 /* FPSWA fixup: make the interface pointer a kernel virtual address: */
53 fpswa_interface
= __va(ia64_boot_param
->fpswa
);
57 * Unlock any spinlocks which will prevent us from getting the message out (timerlist_lock
58 * is acquired through the console unblank code)
61 bust_spinlocks (int yes
)
63 int loglevel_save
= console_loglevel
;
75 * OK, the message is on the console. Now we call printk() without
76 * oops_in_progress set so that printk will give klogd a poke. Hold onto
79 console_loglevel
= 15; /* NMI oopser may have shut the console up */
81 console_loglevel
= loglevel_save
;
85 die (const char *str
, struct pt_regs
*regs
, long err
)
92 .lock
= SPIN_LOCK_UNLOCKED
,
96 static int die_counter
;
99 if (die
.lock_owner
!= cpu
) {
101 spin_lock_irq(&die
.lock
);
102 die
.lock_owner
= cpu
;
103 die
.lock_owner_depth
= 0;
108 if (++die
.lock_owner_depth
< 3) {
109 printk("%s[%d]: %s %ld [%d]\n",
110 current
->comm
, current
->pid
, str
, err
, ++die_counter
);
111 (void) notify_die(DIE_OOPS
, (char *)str
, regs
, err
, 255, SIGSEGV
);
114 printk(KERN_ERR
"Recursive die() failure, output suppressed\n");
118 spin_unlock_irq(&die
.lock
);
123 die_if_kernel (char *str
, struct pt_regs
*regs
, long err
)
125 if (!user_mode(regs
))
130 __kprobes
ia64_bad_break (unsigned long break_num
, struct pt_regs
*regs
)
135 /* break.b always sets cr.iim to 0, which causes problems for
136 * debuggers. Get the real break number from the original instruction,
137 * but only for kernel code. User space break.b is left alone, to
138 * preserve the existing behaviour. All break codings have the same
139 * format, so there is no need to check the slot type.
141 if (break_num
== 0 && !user_mode(regs
)) {
142 struct ia64_psr
*ipsr
= ia64_psr(regs
);
143 unsigned long *bundle
= (unsigned long *)regs
->cr_iip
;
146 case 0: slot
= (bundle
[0] >> 5); break;
147 case 1: slot
= (bundle
[0] >> 46) | (bundle
[1] << 18); break;
148 default: slot
= (bundle
[1] >> 23); break;
150 break_num
= ((slot
>> 36 & 1) << 20) | (slot
>> 6 & 0xfffff);
153 /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
154 siginfo
.si_addr
= (void __user
*) (regs
->cr_iip
+ ia64_psr(regs
)->ri
);
155 siginfo
.si_imm
= break_num
;
156 siginfo
.si_flags
= 0; /* clear __ISR_VALID */
160 case 0: /* unknown error (used by GCC for __builtin_abort()) */
161 if (notify_die(DIE_BREAK
, "break 0", regs
, break_num
, TRAP_BRKPT
, SIGTRAP
)
164 die_if_kernel("bugcheck!", regs
, break_num
);
165 sig
= SIGILL
; code
= ILL_ILLOPC
;
168 case 1: /* integer divide by zero */
169 sig
= SIGFPE
; code
= FPE_INTDIV
;
172 case 2: /* integer overflow */
173 sig
= SIGFPE
; code
= FPE_INTOVF
;
176 case 3: /* range check/bounds check */
177 sig
= SIGFPE
; code
= FPE_FLTSUB
;
180 case 4: /* null pointer dereference */
181 sig
= SIGSEGV
; code
= SEGV_MAPERR
;
184 case 5: /* misaligned data */
185 sig
= SIGSEGV
; code
= BUS_ADRALN
;
188 case 6: /* decimal overflow */
189 sig
= SIGFPE
; code
= __FPE_DECOVF
;
192 case 7: /* decimal divide by zero */
193 sig
= SIGFPE
; code
= __FPE_DECDIV
;
196 case 8: /* packed decimal error */
197 sig
= SIGFPE
; code
= __FPE_DECERR
;
200 case 9: /* invalid ASCII digit */
201 sig
= SIGFPE
; code
= __FPE_INVASC
;
204 case 10: /* invalid decimal digit */
205 sig
= SIGFPE
; code
= __FPE_INVDEC
;
208 case 11: /* paragraph stack overflow */
209 sig
= SIGSEGV
; code
= __SEGV_PSTKOVF
;
212 case 0x3f000 ... 0x3ffff: /* bundle-update in progress */
213 sig
= SIGILL
; code
= __ILL_BNDMOD
;
217 if (break_num
< 0x40000 || break_num
> 0x100000)
218 die_if_kernel("Bad break", regs
, break_num
);
220 if (break_num
< 0x80000) {
221 sig
= SIGILL
; code
= __ILL_BREAK
;
223 if (notify_die(DIE_BREAK
, "bad break", regs
, break_num
, TRAP_BRKPT
, SIGTRAP
)
226 sig
= SIGTRAP
; code
= TRAP_BRKPT
;
229 siginfo
.si_signo
= sig
;
230 siginfo
.si_errno
= 0;
231 siginfo
.si_code
= code
;
232 force_sig_info(sig
, &siginfo
, current
);
236 * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
237 * and it doesn't own the fp-high register partition. When this happens, we save the
238 * current fph partition in the task_struct of the fpu-owner (if necessary) and then load
239 * the fp-high partition of the current task (if necessary). Note that the kernel has
240 * access to fph by the time we get here, as the IVT's "Disabled FP-Register" handler takes
241 * care of clearing psr.dfh.
244 disabled_fph_fault (struct pt_regs
*regs
)
246 struct ia64_psr
*psr
= ia64_psr(regs
);
248 /* first, grant user-level access to fph partition: */
252 * Make sure that no other task gets in on this processor
253 * while we're claiming the FPU
258 struct task_struct
*fpu_owner
259 = (struct task_struct
*)ia64_get_kr(IA64_KR_FPU_OWNER
);
261 if (ia64_is_local_fpu_owner(current
)) {
262 preempt_enable_no_resched();
267 ia64_flush_fph(fpu_owner
);
269 #endif /* !CONFIG_SMP */
270 ia64_set_local_fpu_owner(current
);
271 if ((current
->thread
.flags
& IA64_THREAD_FPH_VALID
) != 0) {
272 __ia64_load_fpu(current
->thread
.fph
);
277 * Set mfh because the state in thread.fph does not match the state in
282 preempt_enable_no_resched();
286 fp_emulate (int fp_fault
, void *bundle
, long *ipsr
, long *fpsr
, long *isr
, long *pr
, long *ifs
,
287 struct pt_regs
*regs
)
292 if (!fpswa_interface
)
295 memset(&fp_state
, 0, sizeof(fp_state_t
));
298 * compute fp_state. only FP registers f6 - f11 are used by the
299 * kernel, so set those bits in the mask and set the low volatile
300 * pointer to point to these registers.
302 fp_state
.bitmask_low64
= 0xfc0; /* bit6..bit11 */
304 fp_state
.fp_state_low_volatile
= (fp_state_low_volatile_t
*) ®s
->f6
;
306 * unsigned long (*EFI_FPSWA) (
307 * unsigned long trap_type,
309 * unsigned long *pipsr,
310 * unsigned long *pfsr,
311 * unsigned long *pisr,
312 * unsigned long *ppreds,
313 * unsigned long *pifs,
316 ret
= (*fpswa_interface
->fpswa
)((unsigned long) fp_fault
, bundle
,
317 (unsigned long *) ipsr
, (unsigned long *) fpsr
,
318 (unsigned long *) isr
, (unsigned long *) pr
,
319 (unsigned long *) ifs
, &fp_state
);
325 * Handle floating-point assist faults and traps.
328 handle_fpu_swa (int fp_fault
, struct pt_regs
*regs
, unsigned long isr
)
330 long exception
, bundle
[2];
331 unsigned long fault_ip
;
332 struct siginfo siginfo
;
333 static int fpu_swa_count
= 0;
334 static unsigned long last_time
;
336 fault_ip
= regs
->cr_iip
;
337 if (!fp_fault
&& (ia64_psr(regs
)->ri
== 0))
339 if (copy_from_user(bundle
, (void __user
*) fault_ip
, sizeof(bundle
)))
342 if (jiffies
- last_time
> 5*HZ
)
344 if ((fpu_swa_count
< 4) && !(current
->thread
.flags
& IA64_THREAD_FPEMU_NOPRINT
)) {
348 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
349 current
->comm
, current
->pid
, regs
->cr_iip
+ ia64_psr(regs
)->ri
, isr
);
352 exception
= fp_emulate(fp_fault
, bundle
, ®s
->cr_ipsr
, ®s
->ar_fpsr
, &isr
, ®s
->pr
,
353 ®s
->cr_ifs
, regs
);
355 if (exception
== 0) {
356 /* emulation was successful */
357 ia64_increment_ip(regs
);
358 } else if (exception
== -1) {
359 printk(KERN_ERR
"handle_fpu_swa: fp_emulate() returned -1\n");
362 /* is next instruction a trap? */
364 ia64_increment_ip(regs
);
366 siginfo
.si_signo
= SIGFPE
;
367 siginfo
.si_errno
= 0;
368 siginfo
.si_code
= __SI_FAULT
; /* default code */
369 siginfo
.si_addr
= (void __user
*) (regs
->cr_iip
+ ia64_psr(regs
)->ri
);
371 siginfo
.si_code
= FPE_FLTINV
;
372 } else if (isr
& 0x22) {
373 /* denormal operand gets the same si_code as underflow
374 * see arch/i386/kernel/traps.c:math_error() */
375 siginfo
.si_code
= FPE_FLTUND
;
376 } else if (isr
& 0x44) {
377 siginfo
.si_code
= FPE_FLTDIV
;
379 siginfo
.si_isr
= isr
;
380 siginfo
.si_flags
= __ISR_VALID
;
382 force_sig_info(SIGFPE
, &siginfo
, current
);
385 if (exception
== -1) {
386 printk(KERN_ERR
"handle_fpu_swa: fp_emulate() returned -1\n");
388 } else if (exception
!= 0) {
389 /* raise exception */
390 siginfo
.si_signo
= SIGFPE
;
391 siginfo
.si_errno
= 0;
392 siginfo
.si_code
= __SI_FAULT
; /* default code */
393 siginfo
.si_addr
= (void __user
*) (regs
->cr_iip
+ ia64_psr(regs
)->ri
);
395 siginfo
.si_code
= FPE_FLTOVF
;
396 } else if (isr
& 0x1100) {
397 siginfo
.si_code
= FPE_FLTUND
;
398 } else if (isr
& 0x2200) {
399 siginfo
.si_code
= FPE_FLTRES
;
401 siginfo
.si_isr
= isr
;
402 siginfo
.si_flags
= __ISR_VALID
;
404 force_sig_info(SIGFPE
, &siginfo
, current
);
410 struct illegal_op_return
{
411 unsigned long fkt
, arg1
, arg2
, arg3
;
414 struct illegal_op_return
415 ia64_illegal_op_fault (unsigned long ec
, long arg1
, long arg2
, long arg3
,
416 long arg4
, long arg5
, long arg6
, long arg7
,
419 struct illegal_op_return rv
;
423 #ifdef CONFIG_IA64_BRL_EMU
425 extern struct illegal_op_return
ia64_emulate_brl (struct pt_regs
*, unsigned long);
427 rv
= ia64_emulate_brl(®s
, ec
);
428 if (rv
.fkt
!= (unsigned long) -1)
433 sprintf(buf
, "IA-64 Illegal operation fault");
434 die_if_kernel(buf
, ®s
, 0);
436 memset(&si
, 0, sizeof(si
));
437 si
.si_signo
= SIGILL
;
438 si
.si_code
= ILL_ILLOPC
;
439 si
.si_addr
= (void __user
*) (regs
.cr_iip
+ ia64_psr(®s
)->ri
);
440 force_sig_info(SIGILL
, &si
, current
);
446 ia64_fault (unsigned long vector
, unsigned long isr
, unsigned long ifa
,
447 unsigned long iim
, unsigned long itir
, long arg5
, long arg6
,
448 long arg7
, struct pt_regs regs
)
450 unsigned long code
, error
= isr
, iip
;
451 struct siginfo siginfo
;
454 static const char *reason
[] = {
455 "IA-64 Illegal Operation fault",
456 "IA-64 Privileged Operation fault",
457 "IA-64 Privileged Register fault",
458 "IA-64 Reserved Register/Field fault",
459 "Disabled Instruction Set Transition fault",
460 "Unknown fault 5", "Unknown fault 6", "Unknown fault 7", "Illegal Hazard fault",
461 "Unknown fault 9", "Unknown fault 10", "Unknown fault 11", "Unknown fault 12",
462 "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
465 if ((isr
& IA64_ISR_NA
) && ((isr
& IA64_ISR_CODE_MASK
) == IA64_ISR_CODE_LFETCH
)) {
467 * This fault was due to lfetch.fault, set "ed" bit in the psr to cancel
470 ia64_psr(®s
)->ed
= 1;
474 iip
= regs
.cr_iip
+ ia64_psr(®s
)->ri
;
477 case 24: /* General Exception */
478 code
= (isr
>> 4) & 0xf;
479 sprintf(buf
, "General Exception: %s%s", reason
[code
],
480 (code
== 3) ? ((isr
& (1UL << 37))
481 ? " (RSE access)" : " (data access)") : "");
483 # ifdef CONFIG_IA64_PRINT_HAZARDS
484 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
485 current
->comm
, current
->pid
,
486 regs
.cr_iip
+ ia64_psr(®s
)->ri
, regs
.pr
);
492 case 25: /* Disabled FP-Register */
494 disabled_fph_fault(®s
);
497 sprintf(buf
, "Disabled FPL fault---not supposed to happen!");
500 case 26: /* NaT Consumption */
501 if (user_mode(®s
)) {
504 if (((isr
>> 4) & 0xf) == 2) {
505 /* NaT page consumption */
508 addr
= (void __user
*) ifa
;
510 /* register NaT consumption */
513 addr
= (void __user
*) (regs
.cr_iip
514 + ia64_psr(®s
)->ri
);
516 siginfo
.si_signo
= sig
;
517 siginfo
.si_code
= code
;
518 siginfo
.si_errno
= 0;
519 siginfo
.si_addr
= addr
;
520 siginfo
.si_imm
= vector
;
521 siginfo
.si_flags
= __ISR_VALID
;
522 siginfo
.si_isr
= isr
;
523 force_sig_info(sig
, &siginfo
, current
);
525 } else if (ia64_done_with_exception(®s
))
527 sprintf(buf
, "NaT consumption");
530 case 31: /* Unsupported Data Reference */
531 if (user_mode(®s
)) {
532 siginfo
.si_signo
= SIGILL
;
533 siginfo
.si_code
= ILL_ILLOPN
;
534 siginfo
.si_errno
= 0;
535 siginfo
.si_addr
= (void __user
*) iip
;
536 siginfo
.si_imm
= vector
;
537 siginfo
.si_flags
= __ISR_VALID
;
538 siginfo
.si_isr
= isr
;
539 force_sig_info(SIGILL
, &siginfo
, current
);
542 sprintf(buf
, "Unsupported data reference");
546 case 35: /* Taken Branch Trap */
547 case 36: /* Single Step Trap */
548 if (fsys_mode(current
, ®s
)) {
549 extern char __kernel_syscall_via_break
[];
551 * Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
552 * need special handling; Debug trap is not supposed to happen.
554 if (unlikely(vector
== 29)) {
555 die("Got debug trap in fsys-mode---not supposed to happen!",
559 /* re-do the system call via break 0x100000: */
560 regs
.cr_iip
= (unsigned long) __kernel_syscall_via_break
;
561 ia64_psr(®s
)->ri
= 0;
562 ia64_psr(®s
)->cpl
= 3;
567 siginfo
.si_code
= TRAP_HWBKPT
;
568 #ifdef CONFIG_ITANIUM
570 * Erratum 10 (IFA may contain incorrect address) now has
571 * "NoFix" status. There are no plans for fixing this.
573 if (ia64_psr(®s
)->is
== 0)
577 case 35: siginfo
.si_code
= TRAP_BRANCH
; ifa
= 0; break;
578 case 36: siginfo
.si_code
= TRAP_TRACE
; ifa
= 0; break;
580 if (notify_die(DIE_FAULT
, "ia64_fault", ®s
, vector
, siginfo
.si_code
, SIGTRAP
)
583 siginfo
.si_signo
= SIGTRAP
;
584 siginfo
.si_errno
= 0;
585 siginfo
.si_addr
= (void __user
*) ifa
;
587 siginfo
.si_flags
= __ISR_VALID
;
588 siginfo
.si_isr
= isr
;
589 force_sig_info(SIGTRAP
, &siginfo
, current
);
592 case 32: /* fp fault */
593 case 33: /* fp trap */
594 result
= handle_fpu_swa((vector
== 32) ? 1 : 0, ®s
, isr
);
595 if ((result
< 0) || (current
->thread
.flags
& IA64_THREAD_FPEMU_SIGFPE
)) {
596 siginfo
.si_signo
= SIGFPE
;
597 siginfo
.si_errno
= 0;
598 siginfo
.si_code
= FPE_FLTINV
;
599 siginfo
.si_addr
= (void __user
*) iip
;
600 siginfo
.si_flags
= __ISR_VALID
;
601 siginfo
.si_isr
= isr
;
603 force_sig_info(SIGFPE
, &siginfo
, current
);
609 /* Lower-Privilege Transfer Trap */
611 * Just clear PSR.lp and then return immediately: all the
612 * interesting work (e.g., signal delivery is done in the kernel
615 ia64_psr(®s
)->lp
= 0;
618 /* Unimplemented Instr. Address Trap */
619 if (user_mode(®s
)) {
620 siginfo
.si_signo
= SIGILL
;
621 siginfo
.si_code
= ILL_BADIADDR
;
622 siginfo
.si_errno
= 0;
623 siginfo
.si_flags
= 0;
626 siginfo
.si_addr
= (void __user
*) iip
;
627 force_sig_info(SIGILL
, &siginfo
, current
);
630 sprintf(buf
, "Unimplemented Instruction Address fault");
635 #ifdef CONFIG_IA32_SUPPORT
636 if (ia32_exception(®s
, isr
) == 0)
639 printk(KERN_ERR
"Unexpected IA-32 exception (Trap 45)\n");
640 printk(KERN_ERR
" iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
642 force_sig(SIGSEGV
, current
);
646 #ifdef CONFIG_IA32_SUPPORT
647 if (ia32_intercept(®s
, isr
) == 0)
650 printk(KERN_ERR
"Unexpected IA-32 intercept trap (Trap 46)\n");
651 printk(KERN_ERR
" iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
653 force_sig(SIGSEGV
, current
);
657 sprintf(buf
, "IA-32 Interruption Fault (int 0x%lx)", isr
>> 16);
661 sprintf(buf
, "Fault %lu", vector
);
664 die_if_kernel(buf
, ®s
, error
);
665 force_sig(SIGILL
, current
);