Commit | Line | Data |
---|---|---|
14cf11af | 1 | /* |
14cf11af | 2 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
fe04b112 | 3 | * Copyright 2007-2010 Freescale Semiconductor, Inc. |
14cf11af PM |
4 | * |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version | |
8 | * 2 of the License, or (at your option) any later version. | |
9 | * | |
10 | * Modified by Cort Dougan (cort@cs.nmt.edu) | |
11 | * and Paul Mackerras (paulus@samba.org) | |
12 | */ | |
13 | ||
14 | /* | |
15 | * This file handles the architecture-dependent parts of hardware exceptions | |
16 | */ | |
17 | ||
14cf11af PM |
18 | #include <linux/errno.h> |
19 | #include <linux/sched.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/stddef.h> | |
23 | #include <linux/unistd.h> | |
8dad3f92 | 24 | #include <linux/ptrace.h> |
14cf11af | 25 | #include <linux/user.h> |
14cf11af | 26 | #include <linux/interrupt.h> |
14cf11af PM |
27 | #include <linux/init.h> |
28 | #include <linux/module.h> | |
8dad3f92 | 29 | #include <linux/prctl.h> |
14cf11af PM |
30 | #include <linux/delay.h> |
31 | #include <linux/kprobes.h> | |
cc532915 | 32 | #include <linux/kexec.h> |
5474c120 | 33 | #include <linux/backlight.h> |
73c9ceab | 34 | #include <linux/bug.h> |
1eeb66a1 | 35 | #include <linux/kdebug.h> |
80947e7c | 36 | #include <linux/debugfs.h> |
14cf11af | 37 | |
80947e7c | 38 | #include <asm/emulated_ops.h> |
14cf11af PM |
39 | #include <asm/pgtable.h> |
40 | #include <asm/uaccess.h> | |
41 | #include <asm/system.h> | |
42 | #include <asm/io.h> | |
86417780 PM |
43 | #include <asm/machdep.h> |
44 | #include <asm/rtas.h> | |
f7f6f4fe | 45 | #include <asm/pmc.h> |
dc1c1ca3 | 46 | #ifdef CONFIG_PPC32 |
14cf11af | 47 | #include <asm/reg.h> |
86417780 | 48 | #endif |
14cf11af PM |
49 | #ifdef CONFIG_PMAC_BACKLIGHT |
50 | #include <asm/backlight.h> | |
51 | #endif | |
dc1c1ca3 | 52 | #ifdef CONFIG_PPC64 |
86417780 | 53 | #include <asm/firmware.h> |
dc1c1ca3 | 54 | #include <asm/processor.h> |
dc1c1ca3 | 55 | #endif |
c0ce7d08 | 56 | #include <asm/kexec.h> |
16c57b36 | 57 | #include <asm/ppc-opcode.h> |
dc1c1ca3 | 58 | |
7dbb922c | 59 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
5be3492f AB |
60 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
61 | int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; | |
62 | int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly; | |
63 | int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly; | |
64 | int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly; | |
65 | int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly; | |
66 | int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly; | |
14cf11af PM |
67 | |
68 | EXPORT_SYMBOL(__debugger); | |
69 | EXPORT_SYMBOL(__debugger_ipi); | |
70 | EXPORT_SYMBOL(__debugger_bpt); | |
71 | EXPORT_SYMBOL(__debugger_sstep); | |
72 | EXPORT_SYMBOL(__debugger_iabr_match); | |
73 | EXPORT_SYMBOL(__debugger_dabr_match); | |
74 | EXPORT_SYMBOL(__debugger_fault_handler); | |
75 | #endif | |
76 | ||
14cf11af PM |
77 | /* |
78 | * Trap & Exception support | |
79 | */ | |
80 | ||
6031d9d9 | 81 | #ifdef CONFIG_PMAC_BACKLIGHT |
82 | static void pmac_backlight_unblank(void) | |
83 | { | |
84 | mutex_lock(&pmac_backlight_mutex); | |
85 | if (pmac_backlight) { | |
86 | struct backlight_properties *props; | |
87 | ||
88 | props = &pmac_backlight->props; | |
89 | props->brightness = props->max_brightness; | |
90 | props->power = FB_BLANK_UNBLANK; | |
91 | backlight_update_status(pmac_backlight); | |
92 | } | |
93 | mutex_unlock(&pmac_backlight_mutex); | |
94 | } | |
95 | #else | |
96 | static inline void pmac_backlight_unblank(void) { } | |
97 | #endif | |
98 | ||
14cf11af PM |
99 | int die(const char *str, struct pt_regs *regs, long err) |
100 | { | |
34c2a14f | 101 | static struct { |
b8f87782 | 102 | raw_spinlock_t lock; |
34c2a14f | 103 | u32 lock_owner; |
104 | int lock_owner_depth; | |
105 | } die = { | |
b8f87782 | 106 | .lock = __RAW_SPIN_LOCK_UNLOCKED(die.lock), |
34c2a14f | 107 | .lock_owner = -1, |
108 | .lock_owner_depth = 0 | |
109 | }; | |
c0ce7d08 | 110 | static int die_counter; |
34c2a14f | 111 | unsigned long flags; |
14cf11af PM |
112 | |
113 | if (debugger(regs)) | |
114 | return 1; | |
115 | ||
293e4688 | 116 | oops_enter(); |
117 | ||
34c2a14f | 118 | if (die.lock_owner != raw_smp_processor_id()) { |
119 | console_verbose(); | |
b8f87782 | 120 | raw_spin_lock_irqsave(&die.lock, flags); |
34c2a14f | 121 | die.lock_owner = smp_processor_id(); |
122 | die.lock_owner_depth = 0; | |
123 | bust_spinlocks(1); | |
124 | if (machine_is(powermac)) | |
125 | pmac_backlight_unblank(); | |
126 | } else { | |
127 | local_save_flags(flags); | |
128 | } | |
5474c120 | 129 | |
34c2a14f | 130 | if (++die.lock_owner_depth < 3) { |
131 | printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); | |
14cf11af | 132 | #ifdef CONFIG_PREEMPT |
34c2a14f | 133 | printk("PREEMPT "); |
14cf11af PM |
134 | #endif |
135 | #ifdef CONFIG_SMP | |
34c2a14f | 136 | printk("SMP NR_CPUS=%d ", NR_CPUS); |
14cf11af PM |
137 | #endif |
138 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
34c2a14f | 139 | printk("DEBUG_PAGEALLOC "); |
14cf11af PM |
140 | #endif |
141 | #ifdef CONFIG_NUMA | |
34c2a14f | 142 | printk("NUMA "); |
14cf11af | 143 | #endif |
ae7f4463 | 144 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); |
34c2a14f | 145 | |
66fcb105 AB |
146 | sysfs_printk_last_file(); |
147 | if (notify_die(DIE_OOPS, str, regs, err, 255, | |
148 | SIGSEGV) == NOTIFY_STOP) | |
149 | return 1; | |
150 | ||
34c2a14f | 151 | print_modules(); |
152 | show_regs(regs); | |
153 | } else { | |
154 | printk("Recursive die() failure, output suppressed\n"); | |
155 | } | |
e8222502 | 156 | |
14cf11af | 157 | bust_spinlocks(0); |
34c2a14f | 158 | die.lock_owner = -1; |
bcdcd8e7 | 159 | add_taint(TAINT_DIE); |
b8f87782 | 160 | raw_spin_unlock_irqrestore(&die.lock, flags); |
cc532915 | 161 | |
c0ce7d08 DW |
162 | if (kexec_should_crash(current) || |
163 | kexec_sr_activated(smp_processor_id())) | |
cc532915 | 164 | crash_kexec(regs); |
c0ce7d08 | 165 | crash_kexec_secondary(regs); |
14cf11af PM |
166 | |
167 | if (in_interrupt()) | |
168 | panic("Fatal exception in interrupt"); | |
169 | ||
cea6a4ba | 170 | if (panic_on_oops) |
012c437d | 171 | panic("Fatal exception"); |
cea6a4ba | 172 | |
293e4688 | 173 | oops_exit(); |
14cf11af PM |
174 | do_exit(err); |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
25baa35b ON |
179 | void user_single_step_siginfo(struct task_struct *tsk, |
180 | struct pt_regs *regs, siginfo_t *info) | |
181 | { | |
182 | memset(info, 0, sizeof(*info)); | |
183 | info->si_signo = SIGTRAP; | |
184 | info->si_code = TRAP_TRACE; | |
185 | info->si_addr = (void __user *)regs->nip; | |
186 | } | |
187 | ||
14cf11af PM |
188 | void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) |
189 | { | |
190 | siginfo_t info; | |
d0c3d534 OJ |
191 | const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \ |
192 | "at %08lx nip %08lx lr %08lx code %x\n"; | |
193 | const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \ | |
194 | "at %016lx nip %016lx lr %016lx code %x\n"; | |
14cf11af PM |
195 | |
196 | if (!user_mode(regs)) { | |
197 | if (die("Exception in kernel mode", regs, signr)) | |
198 | return; | |
d0c3d534 OJ |
199 | } else if (show_unhandled_signals && |
200 | unhandled_signal(current, signr) && | |
201 | printk_ratelimit()) { | |
202 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | |
203 | current->comm, current->pid, signr, | |
204 | addr, regs->nip, regs->link, code); | |
205 | } | |
14cf11af PM |
206 | |
207 | memset(&info, 0, sizeof(info)); | |
208 | info.si_signo = signr; | |
209 | info.si_code = code; | |
210 | info.si_addr = (void __user *) addr; | |
211 | force_sig_info(signr, &info, current); | |
14cf11af PM |
212 | } |
213 | ||
214 | #ifdef CONFIG_PPC64 | |
215 | void system_reset_exception(struct pt_regs *regs) | |
216 | { | |
217 | /* See if any machine dependent calls */ | |
c902be71 AB |
218 | if (ppc_md.system_reset_exception) { |
219 | if (ppc_md.system_reset_exception(regs)) | |
220 | return; | |
221 | } | |
14cf11af | 222 | |
c0ce7d08 DW |
223 | #ifdef CONFIG_KEXEC |
224 | cpu_set(smp_processor_id(), cpus_in_sr); | |
225 | #endif | |
226 | ||
8dad3f92 | 227 | die("System Reset", regs, SIGABRT); |
14cf11af | 228 | |
eac8392f DW |
229 | /* |
230 | * Some CPUs when released from the debugger will execute this path. | |
231 | * These CPUs entered the debugger via a soft-reset. If the CPU was | |
232 | * hung before entering the debugger it will return to the hung | |
233 | * state when exiting this function. This causes a problem in | |
234 | * kdump since the hung CPU(s) will not respond to the IPI sent | |
235 | * from kdump. To prevent the problem we call crash_kexec_secondary() | |
236 | * here. If a kdump had not been initiated or we exit the debugger | |
237 | * with the "exit and recover" command (x) crash_kexec_secondary() | |
238 | * will return after 5ms and the CPU returns to its previous state. | |
239 | */ | |
240 | crash_kexec_secondary(regs); | |
241 | ||
14cf11af PM |
242 | /* Must die if the interrupt is not recoverable */ |
243 | if (!(regs->msr & MSR_RI)) | |
244 | panic("Unrecoverable System Reset"); | |
245 | ||
246 | /* What should we do here? We could issue a shutdown or hard reset. */ | |
247 | } | |
248 | #endif | |
249 | ||
250 | /* | |
251 | * I/O accesses can cause machine checks on powermacs. | |
252 | * Check if the NIP corresponds to the address of a sync | |
253 | * instruction for which there is an entry in the exception | |
254 | * table. | |
255 | * Note that the 601 only takes a machine check on TEA | |
256 | * (transfer error ack) signal assertion, and does not | |
257 | * set any of the top 16 bits of SRR1. | |
258 | * -- paulus. | |
259 | */ | |
260 | static inline int check_io_access(struct pt_regs *regs) | |
261 | { | |
68a64357 | 262 | #ifdef CONFIG_PPC32 |
14cf11af PM |
263 | unsigned long msr = regs->msr; |
264 | const struct exception_table_entry *entry; | |
265 | unsigned int *nip = (unsigned int *)regs->nip; | |
266 | ||
267 | if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000))) | |
268 | && (entry = search_exception_tables(regs->nip)) != NULL) { | |
269 | /* | |
270 | * Check that it's a sync instruction, or somewhere | |
271 | * in the twi; isync; nop sequence that inb/inw/inl uses. | |
272 | * As the address is in the exception table | |
273 | * we should be able to read the instr there. | |
274 | * For the debug message, we look at the preceding | |
275 | * load or store. | |
276 | */ | |
277 | if (*nip == 0x60000000) /* nop */ | |
278 | nip -= 2; | |
279 | else if (*nip == 0x4c00012c) /* isync */ | |
280 | --nip; | |
281 | if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { | |
282 | /* sync or twi */ | |
283 | unsigned int rb; | |
284 | ||
285 | --nip; | |
286 | rb = (*nip >> 11) & 0x1f; | |
287 | printk(KERN_DEBUG "%s bad port %lx at %p\n", | |
288 | (*nip & 0x100)? "OUT to": "IN from", | |
289 | regs->gpr[rb] - _IO_BASE, nip); | |
290 | regs->msr |= MSR_RI; | |
291 | regs->nip = entry->fixup; | |
292 | return 1; | |
293 | } | |
294 | } | |
68a64357 | 295 | #endif /* CONFIG_PPC32 */ |
14cf11af PM |
296 | return 0; |
297 | } | |
298 | ||
172ae2e7 | 299 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
14cf11af PM |
300 | /* On 4xx, the reason for the machine check or program exception |
301 | is in the ESR. */ | |
302 | #define get_reason(regs) ((regs)->dsisr) | |
303 | #ifndef CONFIG_FSL_BOOKE | |
304 | #define get_mc_reason(regs) ((regs)->dsisr) | |
305 | #else | |
fe04b112 | 306 | #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) |
14cf11af PM |
307 | #endif |
308 | #define REASON_FP ESR_FP | |
309 | #define REASON_ILLEGAL (ESR_PIL | ESR_PUO) | |
310 | #define REASON_PRIVILEGED ESR_PPR | |
311 | #define REASON_TRAP ESR_PTR | |
312 | ||
313 | /* single-step stuff */ | |
314 | #define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC) | |
315 | #define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC) | |
316 | ||
317 | #else | |
318 | /* On non-4xx, the reason for the machine check or program | |
319 | exception is in the MSR. */ | |
320 | #define get_reason(regs) ((regs)->msr) | |
321 | #define get_mc_reason(regs) ((regs)->msr) | |
322 | #define REASON_FP 0x100000 | |
323 | #define REASON_ILLEGAL 0x80000 | |
324 | #define REASON_PRIVILEGED 0x40000 | |
325 | #define REASON_TRAP 0x20000 | |
326 | ||
327 | #define single_stepping(regs) ((regs)->msr & MSR_SE) | |
328 | #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE) | |
329 | #endif | |
330 | ||
47c0bd1a BH |
331 | #if defined(CONFIG_4xx) |
332 | int machine_check_4xx(struct pt_regs *regs) | |
14cf11af | 333 | { |
1a6a4ffe | 334 | unsigned long reason = get_mc_reason(regs); |
14cf11af | 335 | |
14cf11af PM |
336 | if (reason & ESR_IMCP) { |
337 | printk("Instruction"); | |
338 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | |
339 | } else | |
340 | printk("Data"); | |
341 | printk(" machine check in kernel mode.\n"); | |
47c0bd1a BH |
342 | |
343 | return 0; | |
344 | } | |
345 | ||
346 | int machine_check_440A(struct pt_regs *regs) | |
347 | { | |
348 | unsigned long reason = get_mc_reason(regs); | |
349 | ||
14cf11af PM |
350 | printk("Machine check in kernel mode.\n"); |
351 | if (reason & ESR_IMCP){ | |
352 | printk("Instruction Synchronous Machine Check exception\n"); | |
353 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | |
354 | } | |
355 | else { | |
356 | u32 mcsr = mfspr(SPRN_MCSR); | |
357 | if (mcsr & MCSR_IB) | |
358 | printk("Instruction Read PLB Error\n"); | |
359 | if (mcsr & MCSR_DRB) | |
360 | printk("Data Read PLB Error\n"); | |
361 | if (mcsr & MCSR_DWB) | |
362 | printk("Data Write PLB Error\n"); | |
363 | if (mcsr & MCSR_TLBP) | |
364 | printk("TLB Parity Error\n"); | |
365 | if (mcsr & MCSR_ICP){ | |
366 | flush_instruction_cache(); | |
367 | printk("I-Cache Parity Error\n"); | |
368 | } | |
369 | if (mcsr & MCSR_DCSP) | |
370 | printk("D-Cache Search Parity Error\n"); | |
371 | if (mcsr & MCSR_DCFP) | |
372 | printk("D-Cache Flush Parity Error\n"); | |
373 | if (mcsr & MCSR_IMPE) | |
374 | printk("Machine Check exception is imprecise\n"); | |
375 | ||
376 | /* Clear MCSR */ | |
377 | mtspr(SPRN_MCSR, mcsr); | |
378 | } | |
47c0bd1a BH |
379 | return 0; |
380 | } | |
fc5e7097 DK |
381 | |
382 | int machine_check_47x(struct pt_regs *regs) | |
383 | { | |
384 | unsigned long reason = get_mc_reason(regs); | |
385 | u32 mcsr; | |
386 | ||
387 | printk(KERN_ERR "Machine check in kernel mode.\n"); | |
388 | if (reason & ESR_IMCP) { | |
389 | printk(KERN_ERR | |
390 | "Instruction Synchronous Machine Check exception\n"); | |
391 | mtspr(SPRN_ESR, reason & ~ESR_IMCP); | |
392 | return 0; | |
393 | } | |
394 | mcsr = mfspr(SPRN_MCSR); | |
395 | if (mcsr & MCSR_IB) | |
396 | printk(KERN_ERR "Instruction Read PLB Error\n"); | |
397 | if (mcsr & MCSR_DRB) | |
398 | printk(KERN_ERR "Data Read PLB Error\n"); | |
399 | if (mcsr & MCSR_DWB) | |
400 | printk(KERN_ERR "Data Write PLB Error\n"); | |
401 | if (mcsr & MCSR_TLBP) | |
402 | printk(KERN_ERR "TLB Parity Error\n"); | |
403 | if (mcsr & MCSR_ICP) { | |
404 | flush_instruction_cache(); | |
405 | printk(KERN_ERR "I-Cache Parity Error\n"); | |
406 | } | |
407 | if (mcsr & MCSR_DCSP) | |
408 | printk(KERN_ERR "D-Cache Search Parity Error\n"); | |
409 | if (mcsr & PPC47x_MCSR_GPR) | |
410 | printk(KERN_ERR "GPR Parity Error\n"); | |
411 | if (mcsr & PPC47x_MCSR_FPR) | |
412 | printk(KERN_ERR "FPR Parity Error\n"); | |
413 | if (mcsr & PPC47x_MCSR_IPR) | |
414 | printk(KERN_ERR "Machine Check exception is imprecise\n"); | |
415 | ||
416 | /* Clear MCSR */ | |
417 | mtspr(SPRN_MCSR, mcsr); | |
418 | ||
419 | return 0; | |
420 | } | |
47c0bd1a | 421 | #elif defined(CONFIG_E500) |
fe04b112 SW |
422 | int machine_check_e500mc(struct pt_regs *regs) |
423 | { | |
424 | unsigned long mcsr = mfspr(SPRN_MCSR); | |
425 | unsigned long reason = mcsr; | |
426 | int recoverable = 1; | |
427 | ||
428 | printk("Machine check in kernel mode.\n"); | |
429 | printk("Caused by (from MCSR=%lx): ", reason); | |
430 | ||
431 | if (reason & MCSR_MCP) | |
432 | printk("Machine Check Signal\n"); | |
433 | ||
434 | if (reason & MCSR_ICPERR) { | |
435 | printk("Instruction Cache Parity Error\n"); | |
436 | ||
437 | /* | |
438 | * This is recoverable by invalidating the i-cache. | |
439 | */ | |
440 | mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI); | |
441 | while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI) | |
442 | ; | |
443 | ||
444 | /* | |
445 | * This will generally be accompanied by an instruction | |
446 | * fetch error report -- only treat MCSR_IF as fatal | |
447 | * if it wasn't due to an L1 parity error. | |
448 | */ | |
449 | reason &= ~MCSR_IF; | |
450 | } | |
451 | ||
452 | if (reason & MCSR_DCPERR_MC) { | |
453 | printk("Data Cache Parity Error\n"); | |
454 | recoverable = 0; | |
455 | } | |
456 | ||
457 | if (reason & MCSR_L2MMU_MHIT) { | |
458 | printk("Hit on multiple TLB entries\n"); | |
459 | recoverable = 0; | |
460 | } | |
461 | ||
462 | if (reason & MCSR_NMI) | |
463 | printk("Non-maskable interrupt\n"); | |
464 | ||
465 | if (reason & MCSR_IF) { | |
466 | printk("Instruction Fetch Error Report\n"); | |
467 | recoverable = 0; | |
468 | } | |
469 | ||
470 | if (reason & MCSR_LD) { | |
471 | printk("Load Error Report\n"); | |
472 | recoverable = 0; | |
473 | } | |
474 | ||
475 | if (reason & MCSR_ST) { | |
476 | printk("Store Error Report\n"); | |
477 | recoverable = 0; | |
478 | } | |
479 | ||
480 | if (reason & MCSR_LDG) { | |
481 | printk("Guarded Load Error Report\n"); | |
482 | recoverable = 0; | |
483 | } | |
484 | ||
485 | if (reason & MCSR_TLBSYNC) | |
486 | printk("Simultaneous tlbsync operations\n"); | |
487 | ||
488 | if (reason & MCSR_BSL2_ERR) { | |
489 | printk("Level 2 Cache Error\n"); | |
490 | recoverable = 0; | |
491 | } | |
492 | ||
493 | if (reason & MCSR_MAV) { | |
494 | u64 addr; | |
495 | ||
496 | addr = mfspr(SPRN_MCAR); | |
497 | addr |= (u64)mfspr(SPRN_MCARU) << 32; | |
498 | ||
499 | printk("Machine Check %s Address: %#llx\n", | |
500 | reason & MCSR_MEA ? "Effective" : "Physical", addr); | |
501 | } | |
502 | ||
503 | mtspr(SPRN_MCSR, mcsr); | |
504 | return mfspr(SPRN_MCSR) == 0 && recoverable; | |
505 | } | |
506 | ||
47c0bd1a BH |
507 | int machine_check_e500(struct pt_regs *regs) |
508 | { | |
509 | unsigned long reason = get_mc_reason(regs); | |
510 | ||
14cf11af PM |
511 | printk("Machine check in kernel mode.\n"); |
512 | printk("Caused by (from MCSR=%lx): ", reason); | |
513 | ||
514 | if (reason & MCSR_MCP) | |
515 | printk("Machine Check Signal\n"); | |
516 | if (reason & MCSR_ICPERR) | |
517 | printk("Instruction Cache Parity Error\n"); | |
518 | if (reason & MCSR_DCP_PERR) | |
519 | printk("Data Cache Push Parity Error\n"); | |
520 | if (reason & MCSR_DCPERR) | |
521 | printk("Data Cache Parity Error\n"); | |
14cf11af PM |
522 | if (reason & MCSR_BUS_IAERR) |
523 | printk("Bus - Instruction Address Error\n"); | |
524 | if (reason & MCSR_BUS_RAERR) | |
525 | printk("Bus - Read Address Error\n"); | |
526 | if (reason & MCSR_BUS_WAERR) | |
527 | printk("Bus - Write Address Error\n"); | |
528 | if (reason & MCSR_BUS_IBERR) | |
529 | printk("Bus - Instruction Data Error\n"); | |
530 | if (reason & MCSR_BUS_RBERR) | |
531 | printk("Bus - Read Data Bus Error\n"); | |
532 | if (reason & MCSR_BUS_WBERR) | |
533 | printk("Bus - Read Data Bus Error\n"); | |
534 | if (reason & MCSR_BUS_IPERR) | |
535 | printk("Bus - Instruction Parity Error\n"); | |
536 | if (reason & MCSR_BUS_RPERR) | |
537 | printk("Bus - Read Parity Error\n"); | |
47c0bd1a BH |
538 | |
539 | return 0; | |
540 | } | |
4490c06b KG |
541 | |
542 | int machine_check_generic(struct pt_regs *regs) | |
543 | { | |
544 | return 0; | |
545 | } | |
47c0bd1a BH |
546 | #elif defined(CONFIG_E200) |
547 | int machine_check_e200(struct pt_regs *regs) | |
548 | { | |
549 | unsigned long reason = get_mc_reason(regs); | |
550 | ||
14cf11af PM |
551 | printk("Machine check in kernel mode.\n"); |
552 | printk("Caused by (from MCSR=%lx): ", reason); | |
553 | ||
554 | if (reason & MCSR_MCP) | |
555 | printk("Machine Check Signal\n"); | |
556 | if (reason & MCSR_CP_PERR) | |
557 | printk("Cache Push Parity Error\n"); | |
558 | if (reason & MCSR_CPERR) | |
559 | printk("Cache Parity Error\n"); | |
560 | if (reason & MCSR_EXCP_ERR) | |
561 | printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n"); | |
562 | if (reason & MCSR_BUS_IRERR) | |
563 | printk("Bus - Read Bus Error on instruction fetch\n"); | |
564 | if (reason & MCSR_BUS_DRERR) | |
565 | printk("Bus - Read Bus Error on data load\n"); | |
566 | if (reason & MCSR_BUS_WRERR) | |
567 | printk("Bus - Write Bus Error on buffered store or cache line push\n"); | |
47c0bd1a BH |
568 | |
569 | return 0; | |
570 | } | |
571 | #else | |
572 | int machine_check_generic(struct pt_regs *regs) | |
573 | { | |
574 | unsigned long reason = get_mc_reason(regs); | |
575 | ||
14cf11af PM |
576 | printk("Machine check in kernel mode.\n"); |
577 | printk("Caused by (from SRR1=%lx): ", reason); | |
578 | switch (reason & 0x601F0000) { | |
579 | case 0x80000: | |
580 | printk("Machine check signal\n"); | |
581 | break; | |
582 | case 0: /* for 601 */ | |
583 | case 0x40000: | |
584 | case 0x140000: /* 7450 MSS error and TEA */ | |
585 | printk("Transfer error ack signal\n"); | |
586 | break; | |
587 | case 0x20000: | |
588 | printk("Data parity error signal\n"); | |
589 | break; | |
590 | case 0x10000: | |
591 | printk("Address parity error signal\n"); | |
592 | break; | |
593 | case 0x20000000: | |
594 | printk("L1 Data Cache error\n"); | |
595 | break; | |
596 | case 0x40000000: | |
597 | printk("L1 Instruction Cache error\n"); | |
598 | break; | |
599 | case 0x00100000: | |
600 | printk("L2 data cache parity error\n"); | |
601 | break; | |
602 | default: | |
603 | printk("Unknown values in msr\n"); | |
604 | } | |
75918a4b OJ |
605 | return 0; |
606 | } | |
47c0bd1a | 607 | #endif /* everything else */ |
75918a4b OJ |
608 | |
609 | void machine_check_exception(struct pt_regs *regs) | |
610 | { | |
611 | int recover = 0; | |
612 | ||
89713ed1 AB |
613 | __get_cpu_var(irq_stat).mce_exceptions++; |
614 | ||
47c0bd1a BH |
615 | /* See if any machine dependent calls. In theory, we would want |
616 | * to call the CPU first, and call the ppc_md. one if the CPU | |
617 | * one returns a positive number. However there is existing code | |
618 | * that assumes the board gets a first chance, so let's keep it | |
619 | * that way for now and fix things later. --BenH. | |
620 | */ | |
75918a4b OJ |
621 | if (ppc_md.machine_check_exception) |
622 | recover = ppc_md.machine_check_exception(regs); | |
47c0bd1a BH |
623 | else if (cur_cpu_spec->machine_check) |
624 | recover = cur_cpu_spec->machine_check(regs); | |
75918a4b | 625 | |
47c0bd1a | 626 | if (recover > 0) |
75918a4b OJ |
627 | return; |
628 | ||
629 | if (user_mode(regs)) { | |
630 | regs->msr |= MSR_RI; | |
631 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | |
632 | return; | |
633 | } | |
634 | ||
635 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | |
47c0bd1a BH |
636 | /* the qspan pci read routines can cause machine checks -- Cort |
637 | * | |
638 | * yuck !!! that totally needs to go away ! There are better ways | |
639 | * to deal with that than having a wart in the mcheck handler. | |
640 | * -- BenH | |
641 | */ | |
75918a4b OJ |
642 | bad_page_fault(regs, regs->dar, SIGBUS); |
643 | return; | |
644 | #endif | |
645 | ||
646 | if (debugger_fault_handler(regs)) { | |
647 | regs->msr |= MSR_RI; | |
648 | return; | |
649 | } | |
650 | ||
651 | if (check_io_access(regs)) | |
652 | return; | |
653 | ||
14cf11af PM |
654 | if (debugger_fault_handler(regs)) |
655 | return; | |
8dad3f92 | 656 | die("Machine check", regs, SIGBUS); |
14cf11af PM |
657 | |
658 | /* Must die if the interrupt is not recoverable */ | |
659 | if (!(regs->msr & MSR_RI)) | |
660 | panic("Unrecoverable Machine check"); | |
661 | } | |
662 | ||
663 | void SMIException(struct pt_regs *regs) | |
664 | { | |
665 | die("System Management Interrupt", regs, SIGABRT); | |
666 | } | |
667 | ||
dc1c1ca3 | 668 | void unknown_exception(struct pt_regs *regs) |
14cf11af PM |
669 | { |
670 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", | |
671 | regs->nip, regs->msr, regs->trap); | |
672 | ||
673 | _exception(SIGTRAP, regs, 0, 0); | |
674 | } | |
675 | ||
dc1c1ca3 | 676 | void instruction_breakpoint_exception(struct pt_regs *regs) |
14cf11af PM |
677 | { |
678 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, | |
679 | 5, SIGTRAP) == NOTIFY_STOP) | |
680 | return; | |
681 | if (debugger_iabr_match(regs)) | |
682 | return; | |
683 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | |
684 | } | |
685 | ||
686 | void RunModeException(struct pt_regs *regs) | |
687 | { | |
688 | _exception(SIGTRAP, regs, 0, 0); | |
689 | } | |
690 | ||
8dad3f92 | 691 | void __kprobes single_step_exception(struct pt_regs *regs) |
14cf11af | 692 | { |
2538c2d0 | 693 | clear_single_step(regs); |
14cf11af PM |
694 | |
695 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | |
696 | 5, SIGTRAP) == NOTIFY_STOP) | |
697 | return; | |
698 | if (debugger_sstep(regs)) | |
699 | return; | |
700 | ||
701 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | |
702 | } | |
703 | ||
704 | /* | |
705 | * After we have successfully emulated an instruction, we have to | |
706 | * check if the instruction was being single-stepped, and if so, | |
707 | * pretend we got a single-step exception. This was pointed out | |
708 | * by Kumar Gala. -- paulus | |
709 | */ | |
8dad3f92 | 710 | static void emulate_single_step(struct pt_regs *regs) |
14cf11af | 711 | { |
2538c2d0 P |
712 | if (single_stepping(regs)) |
713 | single_step_exception(regs); | |
14cf11af PM |
714 | } |
715 | ||
5fad293b | 716 | static inline int __parse_fpscr(unsigned long fpscr) |
dc1c1ca3 | 717 | { |
5fad293b | 718 | int ret = 0; |
dc1c1ca3 SR |
719 | |
720 | /* Invalid operation */ | |
721 | if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) | |
5fad293b | 722 | ret = FPE_FLTINV; |
dc1c1ca3 SR |
723 | |
724 | /* Overflow */ | |
725 | else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) | |
5fad293b | 726 | ret = FPE_FLTOVF; |
dc1c1ca3 SR |
727 | |
728 | /* Underflow */ | |
729 | else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) | |
5fad293b | 730 | ret = FPE_FLTUND; |
dc1c1ca3 SR |
731 | |
732 | /* Divide by zero */ | |
733 | else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) | |
5fad293b | 734 | ret = FPE_FLTDIV; |
dc1c1ca3 SR |
735 | |
736 | /* Inexact result */ | |
737 | else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) | |
5fad293b KG |
738 | ret = FPE_FLTRES; |
739 | ||
740 | return ret; | |
741 | } | |
742 | ||
743 | static void parse_fpe(struct pt_regs *regs) | |
744 | { | |
745 | int code = 0; | |
746 | ||
747 | flush_fp_to_thread(current); | |
748 | ||
749 | code = __parse_fpscr(current->thread.fpscr.val); | |
dc1c1ca3 SR |
750 | |
751 | _exception(SIGFPE, regs, code, regs->nip); | |
752 | } | |
753 | ||
754 | /* | |
755 | * Illegal instruction emulation support. Originally written to | |
14cf11af PM |
756 | * provide the PVR to user applications using the mfspr rd, PVR. |
757 | * Return non-zero if we can't emulate, or -EFAULT if the associated | |
758 | * memory access caused an access fault. Return zero on success. | |
759 | * | |
760 | * There are a couple of ways to do this, either "decode" the instruction | |
761 | * or directly match lots of bits. In this case, matching lots of | |
762 | * bits is faster and easier. | |
86417780 | 763 | * |
14cf11af | 764 | */ |
14cf11af PM |
765 | static int emulate_string_inst(struct pt_regs *regs, u32 instword) |
766 | { | |
767 | u8 rT = (instword >> 21) & 0x1f; | |
768 | u8 rA = (instword >> 16) & 0x1f; | |
769 | u8 NB_RB = (instword >> 11) & 0x1f; | |
770 | u32 num_bytes; | |
771 | unsigned long EA; | |
772 | int pos = 0; | |
773 | ||
774 | /* Early out if we are an invalid form of lswx */ | |
16c57b36 | 775 | if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX) |
14cf11af PM |
776 | if ((rT == rA) || (rT == NB_RB)) |
777 | return -EINVAL; | |
778 | ||
779 | EA = (rA == 0) ? 0 : regs->gpr[rA]; | |
780 | ||
16c57b36 KG |
781 | switch (instword & PPC_INST_STRING_MASK) { |
782 | case PPC_INST_LSWX: | |
783 | case PPC_INST_STSWX: | |
14cf11af PM |
784 | EA += NB_RB; |
785 | num_bytes = regs->xer & 0x7f; | |
786 | break; | |
16c57b36 KG |
787 | case PPC_INST_LSWI: |
788 | case PPC_INST_STSWI: | |
14cf11af PM |
789 | num_bytes = (NB_RB == 0) ? 32 : NB_RB; |
790 | break; | |
791 | default: | |
792 | return -EINVAL; | |
793 | } | |
794 | ||
795 | while (num_bytes != 0) | |
796 | { | |
797 | u8 val; | |
798 | u32 shift = 8 * (3 - (pos & 0x3)); | |
799 | ||
16c57b36 KG |
800 | switch ((instword & PPC_INST_STRING_MASK)) { |
801 | case PPC_INST_LSWX: | |
802 | case PPC_INST_LSWI: | |
14cf11af PM |
803 | if (get_user(val, (u8 __user *)EA)) |
804 | return -EFAULT; | |
805 | /* first time updating this reg, | |
806 | * zero it out */ | |
807 | if (pos == 0) | |
808 | regs->gpr[rT] = 0; | |
809 | regs->gpr[rT] |= val << shift; | |
810 | break; | |
16c57b36 KG |
811 | case PPC_INST_STSWI: |
812 | case PPC_INST_STSWX: | |
14cf11af PM |
813 | val = regs->gpr[rT] >> shift; |
814 | if (put_user(val, (u8 __user *)EA)) | |
815 | return -EFAULT; | |
816 | break; | |
817 | } | |
818 | /* move EA to next address */ | |
819 | EA += 1; | |
820 | num_bytes--; | |
821 | ||
822 | /* manage our position within the register */ | |
823 | if (++pos == 4) { | |
824 | pos = 0; | |
825 | if (++rT == 32) | |
826 | rT = 0; | |
827 | } | |
828 | } | |
829 | ||
830 | return 0; | |
831 | } | |
832 | ||
c3412dcb WS |
833 | static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword) |
834 | { | |
835 | u32 ra,rs; | |
836 | unsigned long tmp; | |
837 | ||
838 | ra = (instword >> 16) & 0x1f; | |
839 | rs = (instword >> 21) & 0x1f; | |
840 | ||
841 | tmp = regs->gpr[rs]; | |
842 | tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL); | |
843 | tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL); | |
844 | tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL; | |
845 | regs->gpr[ra] = tmp; | |
846 | ||
847 | return 0; | |
848 | } | |
849 | ||
c1469f13 KG |
850 | static int emulate_isel(struct pt_regs *regs, u32 instword) |
851 | { | |
852 | u8 rT = (instword >> 21) & 0x1f; | |
853 | u8 rA = (instword >> 16) & 0x1f; | |
854 | u8 rB = (instword >> 11) & 0x1f; | |
855 | u8 BC = (instword >> 6) & 0x1f; | |
856 | u8 bit; | |
857 | unsigned long tmp; | |
858 | ||
859 | tmp = (rA == 0) ? 0 : regs->gpr[rA]; | |
860 | bit = (regs->ccr >> (31 - BC)) & 0x1; | |
861 | ||
862 | regs->gpr[rT] = bit ? tmp : regs->gpr[rB]; | |
863 | ||
864 | return 0; | |
865 | } | |
866 | ||
14cf11af PM |
867 | static int emulate_instruction(struct pt_regs *regs) |
868 | { | |
869 | u32 instword; | |
870 | u32 rd; | |
871 | ||
fab5db97 | 872 | if (!user_mode(regs) || (regs->msr & MSR_LE)) |
14cf11af PM |
873 | return -EINVAL; |
874 | CHECK_FULL_REGS(regs); | |
875 | ||
876 | if (get_user(instword, (u32 __user *)(regs->nip))) | |
877 | return -EFAULT; | |
878 | ||
879 | /* Emulate the mfspr rD, PVR. */ | |
16c57b36 | 880 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { |
eecff81d | 881 | PPC_WARN_EMULATED(mfpvr, regs); |
14cf11af PM |
882 | rd = (instword >> 21) & 0x1f; |
883 | regs->gpr[rd] = mfspr(SPRN_PVR); | |
884 | return 0; | |
885 | } | |
886 | ||
887 | /* Emulating the dcba insn is just a no-op. */ | |
80947e7c | 888 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { |
eecff81d | 889 | PPC_WARN_EMULATED(dcba, regs); |
14cf11af | 890 | return 0; |
80947e7c | 891 | } |
14cf11af PM |
892 | |
893 | /* Emulate the mcrxr insn. */ | |
16c57b36 | 894 | if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { |
86417780 | 895 | int shift = (instword >> 21) & 0x1c; |
14cf11af PM |
896 | unsigned long msk = 0xf0000000UL >> shift; |
897 | ||
eecff81d | 898 | PPC_WARN_EMULATED(mcrxr, regs); |
14cf11af PM |
899 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); |
900 | regs->xer &= ~0xf0000000UL; | |
901 | return 0; | |
902 | } | |
903 | ||
904 | /* Emulate load/store string insn. */ | |
80947e7c | 905 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
eecff81d | 906 | PPC_WARN_EMULATED(string, regs); |
14cf11af | 907 | return emulate_string_inst(regs, instword); |
80947e7c | 908 | } |
14cf11af | 909 | |
c3412dcb | 910 | /* Emulate the popcntb (Population Count Bytes) instruction. */ |
16c57b36 | 911 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { |
eecff81d | 912 | PPC_WARN_EMULATED(popcntb, regs); |
c3412dcb WS |
913 | return emulate_popcntb_inst(regs, instword); |
914 | } | |
915 | ||
c1469f13 | 916 | /* Emulate isel (Integer Select) instruction */ |
16c57b36 | 917 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { |
eecff81d | 918 | PPC_WARN_EMULATED(isel, regs); |
c1469f13 KG |
919 | return emulate_isel(regs, instword); |
920 | } | |
921 | ||
14cf11af PM |
922 | return -EINVAL; |
923 | } | |
924 | ||
73c9ceab | 925 | int is_valid_bugaddr(unsigned long addr) |
14cf11af | 926 | { |
73c9ceab | 927 | return is_kernel_addr(addr); |
14cf11af PM |
928 | } |
929 | ||
8dad3f92 | 930 | void __kprobes program_check_exception(struct pt_regs *regs) |
14cf11af PM |
931 | { |
932 | unsigned int reason = get_reason(regs); | |
933 | extern int do_mathemu(struct pt_regs *regs); | |
934 | ||
aa42c69c | 935 | /* We can now get here via a FP Unavailable exception if the core |
04903a30 | 936 | * has no FPU, in that case the reason flags will be 0 */ |
14cf11af | 937 | |
dc1c1ca3 SR |
938 | if (reason & REASON_FP) { |
939 | /* IEEE FP exception */ | |
940 | parse_fpe(regs); | |
8dad3f92 PM |
941 | return; |
942 | } | |
943 | if (reason & REASON_TRAP) { | |
ba797b28 JW |
944 | /* Debugger is first in line to stop recursive faults in |
945 | * rcu_lock, notify_die, or atomic_notifier_call_chain */ | |
946 | if (debugger_bpt(regs)) | |
947 | return; | |
948 | ||
14cf11af | 949 | /* trap exception */ |
dc1c1ca3 SR |
950 | if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) |
951 | == NOTIFY_STOP) | |
952 | return; | |
73c9ceab JF |
953 | |
954 | if (!(regs->msr & MSR_PR) && /* not user-mode */ | |
608e2619 | 955 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { |
14cf11af PM |
956 | regs->nip += 4; |
957 | return; | |
958 | } | |
8dad3f92 PM |
959 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); |
960 | return; | |
961 | } | |
962 | ||
cd8a5673 PM |
963 | local_irq_enable(); |
964 | ||
04903a30 KG |
965 | #ifdef CONFIG_MATH_EMULATION |
966 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, | |
967 | * but there seems to be a hardware bug on the 405GP (RevD) | |
968 | * that means ESR is sometimes set incorrectly - either to | |
969 | * ESR_DST (!?) or 0. In the process of chasing this with the | |
970 | * hardware people - not sure if it can happen on any illegal | |
971 | * instruction or only on FP instructions, whether there is a | |
972 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | |
5fad293b KG |
973 | switch (do_mathemu(regs)) { |
974 | case 0: | |
04903a30 KG |
975 | emulate_single_step(regs); |
976 | return; | |
5fad293b KG |
977 | case 1: { |
978 | int code = 0; | |
979 | code = __parse_fpscr(current->thread.fpscr.val); | |
980 | _exception(SIGFPE, regs, code, regs->nip); | |
981 | return; | |
982 | } | |
983 | case -EFAULT: | |
984 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | |
985 | return; | |
04903a30 | 986 | } |
5fad293b | 987 | /* fall through on any other errors */ |
04903a30 KG |
988 | #endif /* CONFIG_MATH_EMULATION */ |
989 | ||
8dad3f92 PM |
990 | /* Try to emulate it if we should. */ |
991 | if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { | |
14cf11af PM |
992 | switch (emulate_instruction(regs)) { |
993 | case 0: | |
994 | regs->nip += 4; | |
995 | emulate_single_step(regs); | |
8dad3f92 | 996 | return; |
14cf11af PM |
997 | case -EFAULT: |
998 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | |
8dad3f92 | 999 | return; |
14cf11af PM |
1000 | } |
1001 | } | |
8dad3f92 PM |
1002 | |
1003 | if (reason & REASON_PRIVILEGED) | |
1004 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | |
1005 | else | |
1006 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | |
14cf11af PM |
1007 | } |
1008 | ||
dc1c1ca3 | 1009 | void alignment_exception(struct pt_regs *regs) |
14cf11af | 1010 | { |
4393c4f6 | 1011 | int sig, code, fixed = 0; |
14cf11af | 1012 | |
e9370ae1 PM |
1013 | /* we don't implement logging of alignment exceptions */ |
1014 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) | |
1015 | fixed = fix_alignment(regs); | |
14cf11af PM |
1016 | |
1017 | if (fixed == 1) { | |
1018 | regs->nip += 4; /* skip over emulated instruction */ | |
1019 | emulate_single_step(regs); | |
1020 | return; | |
1021 | } | |
1022 | ||
dc1c1ca3 | 1023 | /* Operand address was bad */ |
14cf11af | 1024 | if (fixed == -EFAULT) { |
4393c4f6 BH |
1025 | sig = SIGSEGV; |
1026 | code = SEGV_ACCERR; | |
1027 | } else { | |
1028 | sig = SIGBUS; | |
1029 | code = BUS_ADRALN; | |
14cf11af | 1030 | } |
4393c4f6 BH |
1031 | if (user_mode(regs)) |
1032 | _exception(sig, regs, code, regs->dar); | |
1033 | else | |
1034 | bad_page_fault(regs, regs->dar, sig); | |
14cf11af PM |
1035 | } |
1036 | ||
1037 | void StackOverflow(struct pt_regs *regs) | |
1038 | { | |
1039 | printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", | |
1040 | current, regs->gpr[1]); | |
1041 | debugger(regs); | |
1042 | show_regs(regs); | |
1043 | panic("kernel stack overflow"); | |
1044 | } | |
1045 | ||
1046 | void nonrecoverable_exception(struct pt_regs *regs) | |
1047 | { | |
1048 | printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n", | |
1049 | regs->nip, regs->msr); | |
1050 | debugger(regs); | |
1051 | die("nonrecoverable exception", regs, SIGKILL); | |
1052 | } | |
1053 | ||
1054 | void trace_syscall(struct pt_regs *regs) | |
1055 | { | |
1056 | printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", | |
19c5870c | 1057 | current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0], |
14cf11af PM |
1058 | regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); |
1059 | } | |
dc1c1ca3 | 1060 | |
dc1c1ca3 SR |
1061 | void kernel_fp_unavailable_exception(struct pt_regs *regs) |
1062 | { | |
1063 | printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " | |
1064 | "%lx at %lx\n", regs->trap, regs->nip); | |
1065 | die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); | |
1066 | } | |
dc1c1ca3 SR |
1067 | |
1068 | void altivec_unavailable_exception(struct pt_regs *regs) | |
1069 | { | |
dc1c1ca3 SR |
1070 | if (user_mode(regs)) { |
1071 | /* A user program has executed an altivec instruction, | |
1072 | but this kernel doesn't support altivec. */ | |
1073 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | |
1074 | return; | |
1075 | } | |
6c4841c2 | 1076 | |
dc1c1ca3 SR |
1077 | printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " |
1078 | "%lx at %lx\n", regs->trap, regs->nip); | |
1079 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); | |
dc1c1ca3 SR |
1080 | } |
1081 | ||
ce48b210 MN |
1082 | void vsx_unavailable_exception(struct pt_regs *regs) |
1083 | { | |
1084 | if (user_mode(regs)) { | |
1085 | /* A user program has executed an vsx instruction, | |
1086 | but this kernel doesn't support vsx. */ | |
1087 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | |
1088 | return; | |
1089 | } | |
1090 | ||
1091 | printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception " | |
1092 | "%lx at %lx\n", regs->trap, regs->nip); | |
1093 | die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); | |
1094 | } | |
1095 | ||
dc1c1ca3 SR |
1096 | void performance_monitor_exception(struct pt_regs *regs) |
1097 | { | |
89713ed1 AB |
1098 | __get_cpu_var(irq_stat).pmu_irqs++; |
1099 | ||
dc1c1ca3 SR |
1100 | perf_irq(regs); |
1101 | } | |
dc1c1ca3 | 1102 | |
8dad3f92 | 1103 | #ifdef CONFIG_8xx |
14cf11af PM |
1104 | void SoftwareEmulation(struct pt_regs *regs) |
1105 | { | |
1106 | extern int do_mathemu(struct pt_regs *); | |
1107 | extern int Soft_emulate_8xx(struct pt_regs *); | |
5dd57a13 | 1108 | #if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU) |
14cf11af | 1109 | int errcode; |
5dd57a13 | 1110 | #endif |
14cf11af PM |
1111 | |
1112 | CHECK_FULL_REGS(regs); | |
1113 | ||
1114 | if (!user_mode(regs)) { | |
1115 | debugger(regs); | |
1116 | die("Kernel Mode Software FPU Emulation", regs, SIGFPE); | |
1117 | } | |
1118 | ||
1119 | #ifdef CONFIG_MATH_EMULATION | |
1120 | errcode = do_mathemu(regs); | |
80947e7c | 1121 | if (errcode >= 0) |
eecff81d | 1122 | PPC_WARN_EMULATED(math, regs); |
5fad293b KG |
1123 | |
1124 | switch (errcode) { | |
1125 | case 0: | |
1126 | emulate_single_step(regs); | |
1127 | return; | |
1128 | case 1: { | |
1129 | int code = 0; | |
1130 | code = __parse_fpscr(current->thread.fpscr.val); | |
1131 | _exception(SIGFPE, regs, code, regs->nip); | |
1132 | return; | |
1133 | } | |
1134 | case -EFAULT: | |
1135 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | |
1136 | return; | |
1137 | default: | |
1138 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | |
1139 | return; | |
1140 | } | |
1141 | ||
5dd57a13 | 1142 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
14cf11af | 1143 | errcode = Soft_emulate_8xx(regs); |
80947e7c | 1144 | if (errcode >= 0) |
eecff81d | 1145 | PPC_WARN_EMULATED(8xx, regs); |
80947e7c | 1146 | |
5fad293b KG |
1147 | switch (errcode) { |
1148 | case 0: | |
14cf11af | 1149 | emulate_single_step(regs); |
5fad293b KG |
1150 | return; |
1151 | case 1: | |
1152 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | |
1153 | return; | |
1154 | case -EFAULT: | |
1155 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | |
1156 | return; | |
1157 | } | |
5dd57a13 SW |
1158 | #else |
1159 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | |
5fad293b | 1160 | #endif |
14cf11af | 1161 | } |
8dad3f92 | 1162 | #endif /* CONFIG_8xx */ |
14cf11af | 1163 | |
172ae2e7 | 1164 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
3bffb652 DK |
1165 | static void handle_debug(struct pt_regs *regs, unsigned long debug_status) |
1166 | { | |
1167 | int changed = 0; | |
1168 | /* | |
1169 | * Determine the cause of the debug event, clear the | |
1170 | * event flags and send a trap to the handler. Torez | |
1171 | */ | |
1172 | if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | |
1173 | dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W); | |
1174 | #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE | |
1175 | current->thread.dbcr2 &= ~DBCR2_DAC12MODE; | |
1176 | #endif | |
1177 | do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT, | |
1178 | 5); | |
1179 | changed |= 0x01; | |
1180 | } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) { | |
1181 | dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W); | |
1182 | do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT, | |
1183 | 6); | |
1184 | changed |= 0x01; | |
1185 | } else if (debug_status & DBSR_IAC1) { | |
1186 | current->thread.dbcr0 &= ~DBCR0_IAC1; | |
1187 | dbcr_iac_range(current) &= ~DBCR_IAC12MODE; | |
1188 | do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT, | |
1189 | 1); | |
1190 | changed |= 0x01; | |
1191 | } else if (debug_status & DBSR_IAC2) { | |
1192 | current->thread.dbcr0 &= ~DBCR0_IAC2; | |
1193 | do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT, | |
1194 | 2); | |
1195 | changed |= 0x01; | |
1196 | } else if (debug_status & DBSR_IAC3) { | |
1197 | current->thread.dbcr0 &= ~DBCR0_IAC3; | |
1198 | dbcr_iac_range(current) &= ~DBCR_IAC34MODE; | |
1199 | do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT, | |
1200 | 3); | |
1201 | changed |= 0x01; | |
1202 | } else if (debug_status & DBSR_IAC4) { | |
1203 | current->thread.dbcr0 &= ~DBCR0_IAC4; | |
1204 | do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT, | |
1205 | 4); | |
1206 | changed |= 0x01; | |
1207 | } | |
1208 | /* | |
1209 | * At the point this routine was called, the MSR(DE) was turned off. | |
1210 | * Check all other debug flags and see if that bit needs to be turned | |
1211 | * back on or not. | |
1212 | */ | |
1213 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1)) | |
1214 | regs->msr |= MSR_DE; | |
1215 | else | |
1216 | /* Make sure the IDM flag is off */ | |
1217 | current->thread.dbcr0 &= ~DBCR0_IDM; | |
1218 | ||
1219 | if (changed & 0x01) | |
1220 | mtspr(SPRN_DBCR0, current->thread.dbcr0); | |
1221 | } | |
14cf11af | 1222 | |
f8279621 | 1223 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) |
14cf11af | 1224 | { |
3bffb652 DK |
1225 | current->thread.dbsr = debug_status; |
1226 | ||
ec097c84 RM |
1227 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while |
1228 | * on server, it stops on the target of the branch. In order to simulate | |
1229 | * the server behaviour, we thus restart right away with a single step | |
1230 | * instead of stopping here when hitting a BT | |
1231 | */ | |
1232 | if (debug_status & DBSR_BT) { | |
1233 | regs->msr &= ~MSR_DE; | |
1234 | ||
1235 | /* Disable BT */ | |
1236 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); | |
1237 | /* Clear the BT event */ | |
1238 | mtspr(SPRN_DBSR, DBSR_BT); | |
1239 | ||
1240 | /* Do the single step trick only when coming from userspace */ | |
1241 | if (user_mode(regs)) { | |
1242 | current->thread.dbcr0 &= ~DBCR0_BT; | |
1243 | current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; | |
1244 | regs->msr |= MSR_DE; | |
1245 | return; | |
1246 | } | |
1247 | ||
1248 | if (notify_die(DIE_SSTEP, "block_step", regs, 5, | |
1249 | 5, SIGTRAP) == NOTIFY_STOP) { | |
1250 | return; | |
1251 | } | |
1252 | if (debugger_sstep(regs)) | |
1253 | return; | |
1254 | } else if (debug_status & DBSR_IC) { /* Instruction complete */ | |
14cf11af | 1255 | regs->msr &= ~MSR_DE; |
f8279621 KG |
1256 | |
1257 | /* Disable instruction completion */ | |
1258 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC); | |
1259 | /* Clear the instruction completion event */ | |
1260 | mtspr(SPRN_DBSR, DBSR_IC); | |
1261 | ||
1262 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | |
1263 | 5, SIGTRAP) == NOTIFY_STOP) { | |
1264 | return; | |
1265 | } | |
1266 | ||
1267 | if (debugger_sstep(regs)) | |
1268 | return; | |
1269 | ||
d6a61bfc | 1270 | if (user_mode(regs)) { |
3bffb652 DK |
1271 | current->thread.dbcr0 &= ~DBCR0_IC; |
1272 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | |
1273 | if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, | |
1274 | current->thread.dbcr1)) | |
1275 | regs->msr |= MSR_DE; | |
1276 | else | |
1277 | /* Make sure the IDM bit is off */ | |
1278 | current->thread.dbcr0 &= ~DBCR0_IDM; | |
1279 | #endif | |
d6a61bfc | 1280 | } |
3bffb652 DK |
1281 | |
1282 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | |
1283 | } else | |
1284 | handle_debug(regs, debug_status); | |
14cf11af | 1285 | } |
172ae2e7 | 1286 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
14cf11af PM |
1287 | |
1288 | #if !defined(CONFIG_TAU_INT) | |
1289 | void TAUException(struct pt_regs *regs) | |
1290 | { | |
1291 | printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n", | |
1292 | regs->nip, regs->msr, regs->trap, print_tainted()); | |
1293 | } | |
1294 | #endif /* CONFIG_INT_TAU */ | |
14cf11af PM |
1295 | |
1296 | #ifdef CONFIG_ALTIVEC | |
dc1c1ca3 | 1297 | void altivec_assist_exception(struct pt_regs *regs) |
14cf11af PM |
1298 | { |
1299 | int err; | |
1300 | ||
14cf11af PM |
1301 | if (!user_mode(regs)) { |
1302 | printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode" | |
1303 | " at %lx\n", regs->nip); | |
8dad3f92 | 1304 | die("Kernel VMX/Altivec assist exception", regs, SIGILL); |
14cf11af PM |
1305 | } |
1306 | ||
dc1c1ca3 | 1307 | flush_altivec_to_thread(current); |
dc1c1ca3 | 1308 | |
eecff81d | 1309 | PPC_WARN_EMULATED(altivec, regs); |
14cf11af PM |
1310 | err = emulate_altivec(regs); |
1311 | if (err == 0) { | |
1312 | regs->nip += 4; /* skip emulated instruction */ | |
1313 | emulate_single_step(regs); | |
1314 | return; | |
1315 | } | |
1316 | ||
1317 | if (err == -EFAULT) { | |
1318 | /* got an error reading the instruction */ | |
1319 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | |
1320 | } else { | |
1321 | /* didn't recognize the instruction */ | |
1322 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | |
1323 | if (printk_ratelimit()) | |
1324 | printk(KERN_ERR "Unrecognized altivec instruction " | |
1325 | "in %s at %lx\n", current->comm, regs->nip); | |
1326 | current->thread.vscr.u[3] |= 0x10000; | |
1327 | } | |
1328 | } | |
1329 | #endif /* CONFIG_ALTIVEC */ | |
1330 | ||
ce48b210 MN |
1331 | #ifdef CONFIG_VSX |
1332 | void vsx_assist_exception(struct pt_regs *regs) | |
1333 | { | |
1334 | if (!user_mode(regs)) { | |
1335 | printk(KERN_EMERG "VSX assist exception in kernel mode" | |
1336 | " at %lx\n", regs->nip); | |
1337 | die("Kernel VSX assist exception", regs, SIGILL); | |
1338 | } | |
1339 | ||
1340 | flush_vsx_to_thread(current); | |
1341 | printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip); | |
1342 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | |
1343 | } | |
1344 | #endif /* CONFIG_VSX */ | |
1345 | ||
14cf11af PM |
1346 | #ifdef CONFIG_FSL_BOOKE |
1347 | void CacheLockingException(struct pt_regs *regs, unsigned long address, | |
1348 | unsigned long error_code) | |
1349 | { | |
1350 | /* We treat cache locking instructions from the user | |
1351 | * as priv ops, in the future we could try to do | |
1352 | * something smarter | |
1353 | */ | |
1354 | if (error_code & (ESR_DLK|ESR_ILK)) | |
1355 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | |
1356 | return; | |
1357 | } | |
1358 | #endif /* CONFIG_FSL_BOOKE */ | |
1359 | ||
1360 | #ifdef CONFIG_SPE | |
1361 | void SPEFloatingPointException(struct pt_regs *regs) | |
1362 | { | |
6a800f36 | 1363 | extern int do_spe_mathemu(struct pt_regs *regs); |
14cf11af PM |
1364 | unsigned long spefscr; |
1365 | int fpexc_mode; | |
1366 | int code = 0; | |
6a800f36 LY |
1367 | int err; |
1368 | ||
1369 | preempt_disable(); | |
1370 | if (regs->msr & MSR_SPE) | |
1371 | giveup_spe(current); | |
1372 | preempt_enable(); | |
14cf11af PM |
1373 | |
1374 | spefscr = current->thread.spefscr; | |
1375 | fpexc_mode = current->thread.fpexc_mode; | |
1376 | ||
14cf11af PM |
1377 | if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) { |
1378 | code = FPE_FLTOVF; | |
14cf11af PM |
1379 | } |
1380 | else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) { | |
1381 | code = FPE_FLTUND; | |
14cf11af PM |
1382 | } |
1383 | else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV)) | |
1384 | code = FPE_FLTDIV; | |
1385 | else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) { | |
1386 | code = FPE_FLTINV; | |
14cf11af PM |
1387 | } |
1388 | else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES)) | |
1389 | code = FPE_FLTRES; | |
1390 | ||
6a800f36 LY |
1391 | err = do_spe_mathemu(regs); |
1392 | if (err == 0) { | |
1393 | regs->nip += 4; /* skip emulated instruction */ | |
1394 | emulate_single_step(regs); | |
1395 | return; | |
1396 | } | |
1397 | ||
1398 | if (err == -EFAULT) { | |
1399 | /* got an error reading the instruction */ | |
1400 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | |
1401 | } else if (err == -EINVAL) { | |
1402 | /* didn't recognize the instruction */ | |
1403 | printk(KERN_ERR "unrecognized spe instruction " | |
1404 | "in %s at %lx\n", current->comm, regs->nip); | |
1405 | } else { | |
1406 | _exception(SIGFPE, regs, code, regs->nip); | |
1407 | } | |
14cf11af | 1408 | |
14cf11af PM |
1409 | return; |
1410 | } | |
6a800f36 LY |
1411 | |
1412 | void SPEFloatingPointRoundException(struct pt_regs *regs) | |
1413 | { | |
1414 | extern int speround_handler(struct pt_regs *regs); | |
1415 | int err; | |
1416 | ||
1417 | preempt_disable(); | |
1418 | if (regs->msr & MSR_SPE) | |
1419 | giveup_spe(current); | |
1420 | preempt_enable(); | |
1421 | ||
1422 | regs->nip -= 4; | |
1423 | err = speround_handler(regs); | |
1424 | if (err == 0) { | |
1425 | regs->nip += 4; /* skip emulated instruction */ | |
1426 | emulate_single_step(regs); | |
1427 | return; | |
1428 | } | |
1429 | ||
1430 | if (err == -EFAULT) { | |
1431 | /* got an error reading the instruction */ | |
1432 | _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip); | |
1433 | } else if (err == -EINVAL) { | |
1434 | /* didn't recognize the instruction */ | |
1435 | printk(KERN_ERR "unrecognized spe instruction " | |
1436 | "in %s at %lx\n", current->comm, regs->nip); | |
1437 | } else { | |
1438 | _exception(SIGFPE, regs, 0, regs->nip); | |
1439 | return; | |
1440 | } | |
1441 | } | |
14cf11af PM |
1442 | #endif |
1443 | ||
dc1c1ca3 SR |
1444 | /* |
1445 | * We enter here if we get an unrecoverable exception, that is, one | |
1446 | * that happened at a point where the RI (recoverable interrupt) bit | |
1447 | * in the MSR is 0. This indicates that SRR0/1 are live, and that | |
1448 | * we therefore lost state by taking this exception. | |
1449 | */ | |
1450 | void unrecoverable_exception(struct pt_regs *regs) | |
1451 | { | |
1452 | printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n", | |
1453 | regs->trap, regs->nip); | |
1454 | die("Unrecoverable exception", regs, SIGABRT); | |
1455 | } | |
dc1c1ca3 | 1456 | |
14cf11af PM |
1457 | #ifdef CONFIG_BOOKE_WDT |
1458 | /* | |
1459 | * Default handler for a Watchdog exception, | |
1460 | * spins until a reboot occurs | |
1461 | */ | |
1462 | void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs) | |
1463 | { | |
1464 | /* Generic WatchdogHandler, implement your own */ | |
1465 | mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE)); | |
1466 | return; | |
1467 | } | |
1468 | ||
1469 | void WatchdogException(struct pt_regs *regs) | |
1470 | { | |
1471 | printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n"); | |
1472 | WatchdogHandler(regs); | |
1473 | } | |
1474 | #endif | |
dc1c1ca3 | 1475 | |
dc1c1ca3 SR |
1476 | /* |
1477 | * We enter here if we discover during exception entry that we are | |
1478 | * running in supervisor mode with a userspace value in the stack pointer. | |
1479 | */ | |
1480 | void kernel_bad_stack(struct pt_regs *regs) | |
1481 | { | |
1482 | printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n", | |
1483 | regs->gpr[1], regs->nip); | |
1484 | die("Bad kernel stack pointer", regs, SIGABRT); | |
1485 | } | |
14cf11af PM |
1486 | |
1487 | void __init trap_init(void) | |
1488 | { | |
1489 | } | |
80947e7c GU |
1490 | |
1491 | ||
1492 | #ifdef CONFIG_PPC_EMULATED_STATS | |
1493 | ||
1494 | #define WARN_EMULATED_SETUP(type) .type = { .name = #type } | |
1495 | ||
1496 | struct ppc_emulated ppc_emulated = { | |
1497 | #ifdef CONFIG_ALTIVEC | |
1498 | WARN_EMULATED_SETUP(altivec), | |
1499 | #endif | |
1500 | WARN_EMULATED_SETUP(dcba), | |
1501 | WARN_EMULATED_SETUP(dcbz), | |
1502 | WARN_EMULATED_SETUP(fp_pair), | |
1503 | WARN_EMULATED_SETUP(isel), | |
1504 | WARN_EMULATED_SETUP(mcrxr), | |
1505 | WARN_EMULATED_SETUP(mfpvr), | |
1506 | WARN_EMULATED_SETUP(multiple), | |
1507 | WARN_EMULATED_SETUP(popcntb), | |
1508 | WARN_EMULATED_SETUP(spe), | |
1509 | WARN_EMULATED_SETUP(string), | |
1510 | WARN_EMULATED_SETUP(unaligned), | |
1511 | #ifdef CONFIG_MATH_EMULATION | |
1512 | WARN_EMULATED_SETUP(math), | |
1513 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | |
1514 | WARN_EMULATED_SETUP(8xx), | |
1515 | #endif | |
1516 | #ifdef CONFIG_VSX | |
1517 | WARN_EMULATED_SETUP(vsx), | |
1518 | #endif | |
1519 | }; | |
1520 | ||
1521 | u32 ppc_warn_emulated; | |
1522 | ||
1523 | void ppc_warn_emulated_print(const char *type) | |
1524 | { | |
1525 | if (printk_ratelimit()) | |
1526 | pr_warning("%s used emulated %s instruction\n", current->comm, | |
1527 | type); | |
1528 | } | |
1529 | ||
1530 | static int __init ppc_warn_emulated_init(void) | |
1531 | { | |
1532 | struct dentry *dir, *d; | |
1533 | unsigned int i; | |
1534 | struct ppc_emulated_entry *entries = (void *)&ppc_emulated; | |
1535 | ||
1536 | if (!powerpc_debugfs_root) | |
1537 | return -ENODEV; | |
1538 | ||
1539 | dir = debugfs_create_dir("emulated_instructions", | |
1540 | powerpc_debugfs_root); | |
1541 | if (!dir) | |
1542 | return -ENOMEM; | |
1543 | ||
1544 | d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, | |
1545 | &ppc_warn_emulated); | |
1546 | if (!d) | |
1547 | goto fail; | |
1548 | ||
1549 | for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { | |
1550 | d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, | |
1551 | (u32 *)&entries[i].val.counter); | |
1552 | if (!d) | |
1553 | goto fail; | |
1554 | } | |
1555 | ||
1556 | return 0; | |
1557 | ||
1558 | fail: | |
1559 | debugfs_remove_recursive(dir); | |
1560 | return -ENOMEM; | |
1561 | } | |
1562 | ||
1563 | device_initcall(ppc_warn_emulated_init); | |
1564 | ||
1565 | #endif /* CONFIG_PPC_EMULATED_STATS */ |