s390/comments: unify copyright messages and remove file names
[deliverable/linux.git] / arch / s390 / kernel / traps.c
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 *
7 * Derived from "arch/i386/kernel/traps.c"
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 */
10
11 /*
12 * 'Traps.c' handles hardware traps and faults after we have saved some
13 * state in 'asm.s'.
14 */
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/seq_file.h>
26 #include <linux/delay.h>
27 #include <linux/module.h>
28 #include <linux/kdebug.h>
29 #include <linux/kallsyms.h>
30 #include <linux/reboot.h>
31 #include <linux/kprobes.h>
32 #include <linux/bug.h>
33 #include <linux/utsname.h>
34 #include <asm/uaccess.h>
35 #include <asm/io.h>
36 #include <linux/atomic.h>
37 #include <asm/mathemu.h>
38 #include <asm/cpcmd.h>
39 #include <asm/lowcore.h>
40 #include <asm/debug.h>
41 #include <asm/ipl.h>
42 #include "entry.h"
43
44 void (*pgm_check_table[128])(struct pt_regs *regs);
45
46 int show_unhandled_signals = 1;
47
48 #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
49
50 #ifndef CONFIG_64BIT
51 #define LONG "%08lx "
52 #define FOURLONG "%08lx %08lx %08lx %08lx\n"
53 static int kstack_depth_to_print = 12;
54 #else /* CONFIG_64BIT */
55 #define LONG "%016lx "
56 #define FOURLONG "%016lx %016lx %016lx %016lx\n"
57 static int kstack_depth_to_print = 20;
58 #endif /* CONFIG_64BIT */
59
60 /*
61 * For show_trace we have tree different stack to consider:
62 * - the panic stack which is used if the kernel stack has overflown
63 * - the asynchronous interrupt stack (cpu related)
64 * - the synchronous kernel stack (process related)
65 * The stack trace can start at any of the three stack and can potentially
66 * touch all of them. The order is: panic stack, async stack, sync stack.
67 */
68 static unsigned long
69 __show_trace(unsigned long sp, unsigned long low, unsigned long high)
70 {
71 struct stack_frame *sf;
72 struct pt_regs *regs;
73
74 while (1) {
75 sp = sp & PSW_ADDR_INSN;
76 if (sp < low || sp > high - sizeof(*sf))
77 return sp;
78 sf = (struct stack_frame *) sp;
79 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
80 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
81 /* Follow the backchain. */
82 while (1) {
83 low = sp;
84 sp = sf->back_chain & PSW_ADDR_INSN;
85 if (!sp)
86 break;
87 if (sp <= low || sp > high - sizeof(*sf))
88 return sp;
89 sf = (struct stack_frame *) sp;
90 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
91 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
92 }
93 /* Zero backchain detected, check for interrupt frame. */
94 sp = (unsigned long) (sf + 1);
95 if (sp <= low || sp > high - sizeof(*regs))
96 return sp;
97 regs = (struct pt_regs *) sp;
98 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
99 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
100 low = sp;
101 sp = regs->gprs[15];
102 }
103 }
104
105 static void show_trace(struct task_struct *task, unsigned long *stack)
106 {
107 register unsigned long __r15 asm ("15");
108 unsigned long sp;
109
110 sp = (unsigned long) stack;
111 if (!sp)
112 sp = task ? task->thread.ksp : __r15;
113 printk("Call Trace:\n");
114 #ifdef CONFIG_CHECK_STACK
115 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
116 S390_lowcore.panic_stack);
117 #endif
118 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
119 S390_lowcore.async_stack);
120 if (task)
121 __show_trace(sp, (unsigned long) task_stack_page(task),
122 (unsigned long) task_stack_page(task) + THREAD_SIZE);
123 else
124 __show_trace(sp, S390_lowcore.thread_info,
125 S390_lowcore.thread_info + THREAD_SIZE);
126 if (!task)
127 task = current;
128 debug_show_held_locks(task);
129 }
130
131 void show_stack(struct task_struct *task, unsigned long *sp)
132 {
133 register unsigned long * __r15 asm ("15");
134 unsigned long *stack;
135 int i;
136
137 if (!sp)
138 stack = task ? (unsigned long *) task->thread.ksp : __r15;
139 else
140 stack = sp;
141
142 for (i = 0; i < kstack_depth_to_print; i++) {
143 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
144 break;
145 if ((i * sizeof(long) % 32) == 0)
146 printk("%s ", i == 0 ? "" : "\n");
147 printk(LONG, *stack++);
148 }
149 printk("\n");
150 show_trace(task, sp);
151 }
152
153 static void show_last_breaking_event(struct pt_regs *regs)
154 {
155 #ifdef CONFIG_64BIT
156 printk("Last Breaking-Event-Address:\n");
157 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
158 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
159 #endif
160 }
161
162 /*
163 * The architecture-independent dump_stack generator
164 */
165 void dump_stack(void)
166 {
167 printk("CPU: %d %s %s %.*s\n",
168 task_thread_info(current)->cpu, print_tainted(),
169 init_utsname()->release,
170 (int)strcspn(init_utsname()->version, " "),
171 init_utsname()->version);
172 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
173 current->comm, current->pid, current,
174 (void *) current->thread.ksp);
175 show_stack(NULL, NULL);
176 }
177 EXPORT_SYMBOL(dump_stack);
178
179 static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
180 {
181 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
182 }
183
184 void show_registers(struct pt_regs *regs)
185 {
186 char *mode;
187
188 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
189 printk("%s PSW : %p %p",
190 mode, (void *) regs->psw.mask,
191 (void *) regs->psw.addr);
192 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
193 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
194 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
195 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
196 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
197 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
198 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
199 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
200 #ifdef CONFIG_64BIT
201 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
202 #endif
203 printk("\n%s GPRS: " FOURLONG, mode,
204 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
205 printk(" " FOURLONG,
206 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
207 printk(" " FOURLONG,
208 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
209 printk(" " FOURLONG,
210 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
211
212 show_code(regs);
213 }
214
215 void show_regs(struct pt_regs *regs)
216 {
217 print_modules();
218 printk("CPU: %d %s %s %.*s\n",
219 task_thread_info(current)->cpu, print_tainted(),
220 init_utsname()->release,
221 (int)strcspn(init_utsname()->version, " "),
222 init_utsname()->version);
223 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
224 current->comm, current->pid, current,
225 (void *) current->thread.ksp);
226 show_registers(regs);
227 /* Show stack backtrace if pt_regs is from kernel mode */
228 if (!(regs->psw.mask & PSW_MASK_PSTATE))
229 show_trace(NULL, (unsigned long *) regs->gprs[15]);
230 show_last_breaking_event(regs);
231 }
232
233 static DEFINE_SPINLOCK(die_lock);
234
235 void die(struct pt_regs *regs, const char *str)
236 {
237 static int die_counter;
238
239 oops_enter();
240 lgr_info_log();
241 debug_stop_all();
242 console_verbose();
243 spin_lock_irq(&die_lock);
244 bust_spinlocks(1);
245 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
246 #ifdef CONFIG_PREEMPT
247 printk("PREEMPT ");
248 #endif
249 #ifdef CONFIG_SMP
250 printk("SMP ");
251 #endif
252 #ifdef CONFIG_DEBUG_PAGEALLOC
253 printk("DEBUG_PAGEALLOC");
254 #endif
255 printk("\n");
256 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
257 show_regs(regs);
258 bust_spinlocks(0);
259 add_taint(TAINT_DIE);
260 spin_unlock_irq(&die_lock);
261 if (in_interrupt())
262 panic("Fatal exception in interrupt");
263 if (panic_on_oops)
264 panic("Fatal exception: panic_on_oops");
265 oops_exit();
266 do_exit(SIGSEGV);
267 }
268
269 static inline void report_user_fault(struct pt_regs *regs, int signr)
270 {
271 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
272 return;
273 if (!unhandled_signal(current, signr))
274 return;
275 if (!printk_ratelimit())
276 return;
277 printk("User process fault: interruption code 0x%X ", regs->int_code);
278 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
279 printk("\n");
280 show_regs(regs);
281 }
282
283 int is_valid_bugaddr(unsigned long addr)
284 {
285 return 1;
286 }
287
288 static inline void __user *get_psw_address(struct pt_regs *regs)
289 {
290 return (void __user *)
291 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
292 }
293
294 static void __kprobes do_trap(struct pt_regs *regs,
295 int si_signo, int si_code, char *str)
296 {
297 siginfo_t info;
298
299 if (notify_die(DIE_TRAP, str, regs, 0,
300 regs->int_code, si_signo) == NOTIFY_STOP)
301 return;
302
303 if (regs->psw.mask & PSW_MASK_PSTATE) {
304 info.si_signo = si_signo;
305 info.si_errno = 0;
306 info.si_code = si_code;
307 info.si_addr = get_psw_address(regs);
308 force_sig_info(si_signo, &info, current);
309 report_user_fault(regs, si_signo);
310 } else {
311 const struct exception_table_entry *fixup;
312 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
313 if (fixup)
314 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
315 else {
316 enum bug_trap_type btt;
317
318 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
319 if (btt == BUG_TRAP_TYPE_WARN)
320 return;
321 die(regs, str);
322 }
323 }
324 }
325
326 void __kprobes do_per_trap(struct pt_regs *regs)
327 {
328 siginfo_t info;
329
330 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
331 return;
332 if (!current->ptrace)
333 return;
334 info.si_signo = SIGTRAP;
335 info.si_errno = 0;
336 info.si_code = TRAP_HWBKPT;
337 info.si_addr =
338 (void __force __user *) current->thread.per_event.address;
339 force_sig_info(SIGTRAP, &info, current);
340 }
341
342 static void default_trap_handler(struct pt_regs *regs)
343 {
344 if (regs->psw.mask & PSW_MASK_PSTATE) {
345 report_user_fault(regs, SIGSEGV);
346 do_exit(SIGSEGV);
347 } else
348 die(regs, "Unknown program exception");
349 }
350
351 #define DO_ERROR_INFO(name, signr, sicode, str) \
352 static void name(struct pt_regs *regs) \
353 { \
354 do_trap(regs, signr, sicode, str); \
355 }
356
357 DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
358 "addressing exception")
359 DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
360 "execute exception")
361 DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
362 "fixpoint divide exception")
363 DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
364 "fixpoint overflow exception")
365 DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
366 "HFP overflow exception")
367 DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
368 "HFP underflow exception")
369 DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
370 "HFP significance exception")
371 DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
372 "HFP divide exception")
373 DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
374 "HFP square root exception")
375 DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
376 "operand exception")
377 DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
378 "privileged operation")
379 DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
380 "special operation exception")
381 DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
382 "translation exception")
383
384 static inline void do_fp_trap(struct pt_regs *regs, int fpc)
385 {
386 int si_code = 0;
387 /* FPC[2] is Data Exception Code */
388 if ((fpc & 0x00000300) == 0) {
389 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
390 if (fpc & 0x8000) /* invalid fp operation */
391 si_code = FPE_FLTINV;
392 else if (fpc & 0x4000) /* div by 0 */
393 si_code = FPE_FLTDIV;
394 else if (fpc & 0x2000) /* overflow */
395 si_code = FPE_FLTOVF;
396 else if (fpc & 0x1000) /* underflow */
397 si_code = FPE_FLTUND;
398 else if (fpc & 0x0800) /* inexact */
399 si_code = FPE_FLTRES;
400 }
401 do_trap(regs, SIGFPE, si_code, "floating point exception");
402 }
403
404 static void __kprobes illegal_op(struct pt_regs *regs)
405 {
406 siginfo_t info;
407 __u8 opcode[6];
408 __u16 __user *location;
409 int signal = 0;
410
411 location = get_psw_address(regs);
412
413 if (regs->psw.mask & PSW_MASK_PSTATE) {
414 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
415 return;
416 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
417 if (current->ptrace) {
418 info.si_signo = SIGTRAP;
419 info.si_errno = 0;
420 info.si_code = TRAP_BRKPT;
421 info.si_addr = location;
422 force_sig_info(SIGTRAP, &info, current);
423 } else
424 signal = SIGILL;
425 #ifdef CONFIG_MATHEMU
426 } else if (opcode[0] == 0xb3) {
427 if (get_user(*((__u16 *) (opcode+2)), location+1))
428 return;
429 signal = math_emu_b3(opcode, regs);
430 } else if (opcode[0] == 0xed) {
431 if (get_user(*((__u32 *) (opcode+2)),
432 (__u32 __user *)(location+1)))
433 return;
434 signal = math_emu_ed(opcode, regs);
435 } else if (*((__u16 *) opcode) == 0xb299) {
436 if (get_user(*((__u16 *) (opcode+2)), location+1))
437 return;
438 signal = math_emu_srnm(opcode, regs);
439 } else if (*((__u16 *) opcode) == 0xb29c) {
440 if (get_user(*((__u16 *) (opcode+2)), location+1))
441 return;
442 signal = math_emu_stfpc(opcode, regs);
443 } else if (*((__u16 *) opcode) == 0xb29d) {
444 if (get_user(*((__u16 *) (opcode+2)), location+1))
445 return;
446 signal = math_emu_lfpc(opcode, regs);
447 #endif
448 } else
449 signal = SIGILL;
450 } else {
451 /*
452 * If we get an illegal op in kernel mode, send it through the
453 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
454 */
455 if (notify_die(DIE_BPT, "bpt", regs, 0,
456 3, SIGTRAP) != NOTIFY_STOP)
457 signal = SIGILL;
458 }
459
460 #ifdef CONFIG_MATHEMU
461 if (signal == SIGFPE)
462 do_fp_trap(regs, current->thread.fp_regs.fpc);
463 else if (signal == SIGSEGV)
464 do_trap(regs, signal, SEGV_MAPERR, "user address fault");
465 else
466 #endif
467 if (signal)
468 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
469 }
470
471
472 #ifdef CONFIG_MATHEMU
473 void specification_exception(struct pt_regs *regs)
474 {
475 __u8 opcode[6];
476 __u16 __user *location = NULL;
477 int signal = 0;
478
479 location = (__u16 __user *) get_psw_address(regs);
480
481 if (regs->psw.mask & PSW_MASK_PSTATE) {
482 get_user(*((__u16 *) opcode), location);
483 switch (opcode[0]) {
484 case 0x28: /* LDR Rx,Ry */
485 signal = math_emu_ldr(opcode);
486 break;
487 case 0x38: /* LER Rx,Ry */
488 signal = math_emu_ler(opcode);
489 break;
490 case 0x60: /* STD R,D(X,B) */
491 get_user(*((__u16 *) (opcode+2)), location+1);
492 signal = math_emu_std(opcode, regs);
493 break;
494 case 0x68: /* LD R,D(X,B) */
495 get_user(*((__u16 *) (opcode+2)), location+1);
496 signal = math_emu_ld(opcode, regs);
497 break;
498 case 0x70: /* STE R,D(X,B) */
499 get_user(*((__u16 *) (opcode+2)), location+1);
500 signal = math_emu_ste(opcode, regs);
501 break;
502 case 0x78: /* LE R,D(X,B) */
503 get_user(*((__u16 *) (opcode+2)), location+1);
504 signal = math_emu_le(opcode, regs);
505 break;
506 default:
507 signal = SIGILL;
508 break;
509 }
510 } else
511 signal = SIGILL;
512
513 if (signal == SIGFPE)
514 do_fp_trap(regs, current->thread.fp_regs.fpc);
515 else if (signal)
516 do_trap(regs, signal, ILL_ILLOPN, "specification exception");
517 }
518 #else
519 DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
520 "specification exception");
521 #endif
522
523 static void data_exception(struct pt_regs *regs)
524 {
525 __u16 __user *location;
526 int signal = 0;
527
528 location = get_psw_address(regs);
529
530 if (MACHINE_HAS_IEEE)
531 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
532
533 #ifdef CONFIG_MATHEMU
534 else if (regs->psw.mask & PSW_MASK_PSTATE) {
535 __u8 opcode[6];
536 get_user(*((__u16 *) opcode), location);
537 switch (opcode[0]) {
538 case 0x28: /* LDR Rx,Ry */
539 signal = math_emu_ldr(opcode);
540 break;
541 case 0x38: /* LER Rx,Ry */
542 signal = math_emu_ler(opcode);
543 break;
544 case 0x60: /* STD R,D(X,B) */
545 get_user(*((__u16 *) (opcode+2)), location+1);
546 signal = math_emu_std(opcode, regs);
547 break;
548 case 0x68: /* LD R,D(X,B) */
549 get_user(*((__u16 *) (opcode+2)), location+1);
550 signal = math_emu_ld(opcode, regs);
551 break;
552 case 0x70: /* STE R,D(X,B) */
553 get_user(*((__u16 *) (opcode+2)), location+1);
554 signal = math_emu_ste(opcode, regs);
555 break;
556 case 0x78: /* LE R,D(X,B) */
557 get_user(*((__u16 *) (opcode+2)), location+1);
558 signal = math_emu_le(opcode, regs);
559 break;
560 case 0xb3:
561 get_user(*((__u16 *) (opcode+2)), location+1);
562 signal = math_emu_b3(opcode, regs);
563 break;
564 case 0xed:
565 get_user(*((__u32 *) (opcode+2)),
566 (__u32 __user *)(location+1));
567 signal = math_emu_ed(opcode, regs);
568 break;
569 case 0xb2:
570 if (opcode[1] == 0x99) {
571 get_user(*((__u16 *) (opcode+2)), location+1);
572 signal = math_emu_srnm(opcode, regs);
573 } else if (opcode[1] == 0x9c) {
574 get_user(*((__u16 *) (opcode+2)), location+1);
575 signal = math_emu_stfpc(opcode, regs);
576 } else if (opcode[1] == 0x9d) {
577 get_user(*((__u16 *) (opcode+2)), location+1);
578 signal = math_emu_lfpc(opcode, regs);
579 } else
580 signal = SIGILL;
581 break;
582 default:
583 signal = SIGILL;
584 break;
585 }
586 }
587 #endif
588 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
589 signal = SIGFPE;
590 else
591 signal = SIGILL;
592 if (signal == SIGFPE)
593 do_fp_trap(regs, current->thread.fp_regs.fpc);
594 else if (signal)
595 do_trap(regs, signal, ILL_ILLOPN, "data exception");
596 }
597
598 static void space_switch_exception(struct pt_regs *regs)
599 {
600 /* Set user psw back to home space mode. */
601 if (regs->psw.mask & PSW_MASK_PSTATE)
602 regs->psw.mask |= PSW_ASC_HOME;
603 /* Send SIGILL. */
604 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
605 }
606
607 void __kprobes kernel_stack_overflow(struct pt_regs * regs)
608 {
609 bust_spinlocks(1);
610 printk("Kernel stack overflow.\n");
611 show_regs(regs);
612 bust_spinlocks(0);
613 panic("Corrupt kernel stack, can't continue.");
614 }
615
616 /* init is done in lowcore.S and head.S */
617
618 void __init trap_init(void)
619 {
620 int i;
621
622 for (i = 0; i < 128; i++)
623 pgm_check_table[i] = &default_trap_handler;
624 pgm_check_table[1] = &illegal_op;
625 pgm_check_table[2] = &privileged_op;
626 pgm_check_table[3] = &execute_exception;
627 pgm_check_table[4] = &do_protection_exception;
628 pgm_check_table[5] = &addressing_exception;
629 pgm_check_table[6] = &specification_exception;
630 pgm_check_table[7] = &data_exception;
631 pgm_check_table[8] = &overflow_exception;
632 pgm_check_table[9] = &divide_exception;
633 pgm_check_table[0x0A] = &overflow_exception;
634 pgm_check_table[0x0B] = &divide_exception;
635 pgm_check_table[0x0C] = &hfp_overflow_exception;
636 pgm_check_table[0x0D] = &hfp_underflow_exception;
637 pgm_check_table[0x0E] = &hfp_significance_exception;
638 pgm_check_table[0x0F] = &hfp_divide_exception;
639 pgm_check_table[0x10] = &do_dat_exception;
640 pgm_check_table[0x11] = &do_dat_exception;
641 pgm_check_table[0x12] = &translation_exception;
642 pgm_check_table[0x13] = &special_op_exception;
643 #ifdef CONFIG_64BIT
644 pgm_check_table[0x38] = &do_asce_exception;
645 pgm_check_table[0x39] = &do_dat_exception;
646 pgm_check_table[0x3A] = &do_dat_exception;
647 pgm_check_table[0x3B] = &do_dat_exception;
648 #endif /* CONFIG_64BIT */
649 pgm_check_table[0x15] = &operand_exception;
650 pgm_check_table[0x1C] = &space_switch_exception;
651 pgm_check_table[0x1D] = &hfp_sqrt_exception;
652 /* Enable machine checks early. */
653 local_mcck_enable();
654 }
This page took 0.045104 seconds and 5 git commands to generate.