[PATCH] powerpc: Updated platforms that use gianfar to match driver
[deliverable/linux.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/config.h>
23#include <linux/errno.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x) li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48#include "head_booke.h"
49#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
50 mtspr exc_level##_SPRG,r8; \
51 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
52 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
53 stw r0,GPR10(r11); \
54 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
55 stw r0,GPR11(r11); \
56 mfspr r8,exc_level##_SPRG
57
58 .globl mcheck_transfer_to_handler
59mcheck_transfer_to_handler:
60 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
61 b transfer_to_handler_full
62
63 .globl debug_transfer_to_handler
64debug_transfer_to_handler:
65 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
66 b transfer_to_handler_full
67
68 .globl crit_transfer_to_handler
69crit_transfer_to_handler:
70 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
71 /* fall through */
72#endif
73
74#ifdef CONFIG_40x
75 .globl crit_transfer_to_handler
76crit_transfer_to_handler:
77 lwz r0,crit_r10@l(0)
78 stw r0,GPR10(r11)
79 lwz r0,crit_r11@l(0)
80 stw r0,GPR11(r11)
81 /* fall through */
82#endif
83
84/*
85 * This code finishes saving the registers to the exception frame
86 * and jumps to the appropriate handler for the exception, turning
87 * on address translation.
88 * Note that we rely on the caller having set cr0.eq iff the exception
89 * occurred in kernel mode (i.e. MSR:PR = 0).
90 */
91 .globl transfer_to_handler_full
92transfer_to_handler_full:
93 SAVE_NVGPRS(r11)
94 /* fall through */
95
96 .globl transfer_to_handler
97transfer_to_handler:
98 stw r2,GPR2(r11)
99 stw r12,_NIP(r11)
100 stw r9,_MSR(r11)
101 andi. r2,r9,MSR_PR
102 mfctr r12
103 mfspr r2,SPRN_XER
104 stw r12,_CTR(r11)
105 stw r2,_XER(r11)
106 mfspr r12,SPRN_SPRG3
107 addi r2,r12,-THREAD
108 tovirt(r2,r2) /* set r2 to current */
109 beq 2f /* if from user, fix up THREAD.regs */
110 addi r11,r1,STACK_FRAME_OVERHEAD
111 stw r11,PT_REGS(r12)
112#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
113 /* Check to see if the dbcr0 register is set up to debug. Use the
114 single-step bit to do this. */
115 lwz r12,THREAD_DBCR0(r12)
116 andis. r12,r12,DBCR0_IC@h
117 beq+ 3f
118 /* From user and task is ptraced - load up global dbcr0 */
119 li r12,-1 /* clear all pending debug events */
120 mtspr SPRN_DBSR,r12
121 lis r11,global_dbcr0@ha
122 tophys(r11,r11)
123 addi r11,r11,global_dbcr0@l
124 lwz r12,0(r11)
125 mtspr SPRN_DBCR0,r12
126 lwz r12,4(r11)
127 addi r12,r12,-1
128 stw r12,4(r11)
129#endif
130 b 3f
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
134#ifdef CONFIG_6xx
135 mfspr r11,SPRN_HID0
136 mtcr r11
137BEGIN_FTR_SECTION
138 bt- 8,power_save_6xx_restore /* Check DOZE */
139END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
140BEGIN_FTR_SECTION
141 bt- 9,power_save_6xx_restore /* Check NAP */
142END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
146 lwz r11,THREAD_INFO-THREAD(r12)
147 cmplw r1,r11 /* if r1 <= current->thread_info */
148 ble- stack_ovf /* then the kernel stack overflowed */
1493:
150 mflr r9
151 lwz r11,0(r9) /* virtual address of handler */
152 lwz r9,4(r9) /* where to go when done */
153 FIX_SRR1(r10,r12)
154 mtspr SPRN_SRR0,r11
155 mtspr SPRN_SRR1,r10
156 mtlr r9
157 SYNC
158 RFI /* jump to handler, enable MMU */
159
160/*
161 * On kernel stack overflow, load up an initial stack pointer
162 * and call StackOverflow(regs), which should not return.
163 */
164stack_ovf:
165 /* sometimes we use a statically-allocated stack, which is OK. */
166 lis r11,_end@h
167 ori r11,r11,_end@l
168 cmplw r1,r11
169 ble 3b /* r1 <= &_end is OK */
170 SAVE_NVGPRS(r11)
171 addi r3,r1,STACK_FRAME_OVERHEAD
172 lis r1,init_thread_union@ha
173 addi r1,r1,init_thread_union@l
174 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
175 lis r9,StackOverflow@ha
176 addi r9,r9,StackOverflow@l
177 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
178 FIX_SRR1(r10,r12)
179 mtspr SPRN_SRR0,r9
180 mtspr SPRN_SRR1,r10
181 SYNC
182 RFI
183
184/*
185 * Handle a system call.
186 */
187 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
188 .stabs "entry_32.S",N_SO,0,0,0f
1890:
190
191_GLOBAL(DoSyscall)
192 stw r0,THREAD+LAST_SYSCALL(r2)
193 stw r3,ORIG_GPR3(r1)
194 li r12,0
195 stw r12,RESULT(r1)
196 lwz r11,_CCR(r1) /* Clear SO bit in CR */
197 rlwinm r11,r11,0,4,2
198 stw r11,_CCR(r1)
199#ifdef SHOW_SYSCALLS
200 bl do_show_syscall
201#endif /* SHOW_SYSCALLS */
6cb7bfeb 202 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338
PM
203 lwz r11,TI_FLAGS(r10)
204 andi. r11,r11,_TIF_SYSCALL_T_OR_A
205 bne- syscall_dotrace
206syscall_dotrace_cont:
207 cmplwi 0,r0,NR_syscalls
208 lis r10,sys_call_table@h
209 ori r10,r10,sys_call_table@l
210 slwi r0,r0,2
211 bge- 66f
212 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
213 mtlr r10
214 addi r9,r1,STACK_FRAME_OVERHEAD
215 PPC440EP_ERR42
216 blrl /* Call handler */
217 .globl ret_from_syscall
218ret_from_syscall:
219#ifdef SHOW_SYSCALLS
220 bl do_show_syscall_exit
221#endif
222 mr r6,r3
6cb7bfeb 223 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338 224 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 225 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
9994a338
PM
226 SYNC
227 MTMSRD(r10)
228 lwz r9,TI_FLAGS(r12)
401d1f02
DW
229 li r8,-_LAST_ERRNO
230 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
9994a338 231 bne- syscall_exit_work
401d1f02
DW
232 cmplw 0,r3,r8
233 blt+ syscall_exit_cont
234 lwz r11,_CCR(r1) /* Load CR */
235 neg r3,r3
236 oris r11,r11,0x1000 /* Set SO bit in CR */
237 stw r11,_CCR(r1)
9994a338
PM
238syscall_exit_cont:
239#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
240 /* If the process has its own DBCR0 value, load it up. The single
241 step bit tells us that dbcr0 should be loaded. */
242 lwz r0,THREAD+THREAD_DBCR0(r2)
243 andis. r10,r0,DBCR0_IC@h
244 bnel- load_dbcr0
245#endif
246 stwcx. r0,0,r1 /* to clear the reservation */
247 lwz r4,_LINK(r1)
248 lwz r5,_CCR(r1)
249 mtlr r4
250 mtcr r5
251 lwz r7,_NIP(r1)
252 lwz r8,_MSR(r1)
253 FIX_SRR1(r8, r0)
254 lwz r2,GPR2(r1)
255 lwz r1,GPR1(r1)
256 mtspr SPRN_SRR0,r7
257 mtspr SPRN_SRR1,r8
258 SYNC
259 RFI
260
26166: li r3,-ENOSYS
262 b ret_from_syscall
263
264 .globl ret_from_fork
265ret_from_fork:
266 REST_NVGPRS(r1)
267 bl schedule_tail
268 li r3,0
269 b ret_from_syscall
270
271/* Traced system call support */
272syscall_dotrace:
273 SAVE_NVGPRS(r1)
274 li r0,0xc00
d73e0c99 275 stw r0,_TRAP(r1)
9994a338
PM
276 addi r3,r1,STACK_FRAME_OVERHEAD
277 bl do_syscall_trace_enter
278 lwz r0,GPR0(r1) /* Restore original registers */
279 lwz r3,GPR3(r1)
280 lwz r4,GPR4(r1)
281 lwz r5,GPR5(r1)
282 lwz r6,GPR6(r1)
283 lwz r7,GPR7(r1)
284 lwz r8,GPR8(r1)
285 REST_NVGPRS(r1)
286 b syscall_dotrace_cont
287
288syscall_exit_work:
401d1f02
DW
289 andi. r0,r9,_TIF_RESTOREALL
290 bne- 2f
291 cmplw 0,r3,r8
292 blt+ 1f
293 andi. r0,r9,_TIF_NOERROR
294 bne- 1f
295 lwz r11,_CCR(r1) /* Load CR */
296 neg r3,r3
297 oris r11,r11,0x1000 /* Set SO bit in CR */
298 stw r11,_CCR(r1)
299
3001: stw r6,RESULT(r1) /* Save result */
9994a338 301 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
3022: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
303 beq 4f
304
305 /* Clear per-syscall TIF flags if any are set, but _leave_
306 _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
307 yet. */
308
309 li r11,_TIF_PERSYSCALL_MASK
310 addi r12,r12,TI_FLAGS
3113: lwarx r8,0,r12
312 andc r8,r8,r11
313#ifdef CONFIG_IBM405_ERR77
314 dcbt 0,r12
315#endif
316 stwcx. r8,0,r12
317 bne- 3b
318 subi r12,r12,TI_FLAGS
319
3204: /* Anything which requires enabling interrupts? */
321 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
322 beq 7f
323
324 /* Save NVGPRS if they're not saved already */
d73e0c99 325 lwz r4,_TRAP(r1)
9994a338 326 andi. r4,r4,1
401d1f02 327 beq 5f
9994a338
PM
328 SAVE_NVGPRS(r1)
329 li r4,0xc00
d73e0c99 330 stw r4,_TRAP(r1)
401d1f02
DW
331
332 /* Re-enable interrupts */
3335: ori r10,r10,MSR_EE
334 SYNC
335 MTMSRD(r10)
336
337 andi. r0,r9,_TIF_SAVE_NVGPRS
338 bne save_user_nvgprs
339
340save_user_nvgprs_cont:
341 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
342 beq 7f
343
9994a338
PM
344 addi r3,r1,STACK_FRAME_OVERHEAD
345 bl do_syscall_trace_leave
346 REST_NVGPRS(r1)
401d1f02
DW
347
3486: lwz r3,GPR3(r1)
9994a338
PM
349 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
350 SYNC
351 MTMSRD(r10) /* disable interrupts again */
6cb7bfeb 352 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338 353 lwz r9,TI_FLAGS(r12)
401d1f02 3547:
9994a338 355 andi. r0,r9,_TIF_NEED_RESCHED
401d1f02 356 bne 8f
9994a338
PM
357 lwz r5,_MSR(r1)
358 andi. r5,r5,MSR_PR
401d1f02 359 beq ret_from_except
9994a338 360 andi. r0,r9,_TIF_SIGPENDING
401d1f02 361 beq ret_from_except
9994a338 362 b do_user_signal
401d1f02 3638:
9994a338
PM
364 ori r10,r10,MSR_EE
365 SYNC
366 MTMSRD(r10) /* re-enable interrupts */
367 bl schedule
401d1f02
DW
368 b 6b
369
370save_user_nvgprs:
623703f6 371 lwz r8,TI_SIGFRAME(r12)
401d1f02
DW
372
373.macro savewords start, end
374 1: stw \start,4*(\start)(r8)
375 .section __ex_table,"a"
376 .align 2
377 .long 1b,save_user_nvgprs_fault
378 .previous
379 .if \end - \start
380 savewords "(\start+1)",\end
381 .endif
382.endm
383 savewords 14,31
384 b save_user_nvgprs_cont
385
386
387save_user_nvgprs_fault:
388 li r3,11 /* SIGSEGV */
623703f6 389 lwz r4,TI_TASK(r12)
401d1f02 390 bl force_sigsegv
9994a338 391
401d1f02 392 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
623703f6 393 lwz r9,TI_FLAGS(r12)
401d1f02
DW
394 b save_user_nvgprs_cont
395
9994a338
PM
396#ifdef SHOW_SYSCALLS
397do_show_syscall:
398#ifdef SHOW_SYSCALLS_TASK
399 lis r11,show_syscalls_task@ha
400 lwz r11,show_syscalls_task@l(r11)
401 cmp 0,r2,r11
402 bnelr
403#endif
404 stw r31,GPR31(r1)
405 mflr r31
406 lis r3,7f@ha
407 addi r3,r3,7f@l
408 lwz r4,GPR0(r1)
409 lwz r5,GPR3(r1)
410 lwz r6,GPR4(r1)
411 lwz r7,GPR5(r1)
412 lwz r8,GPR6(r1)
413 lwz r9,GPR7(r1)
414 bl printk
415 lis r3,77f@ha
416 addi r3,r3,77f@l
417 lwz r4,GPR8(r1)
418 mr r5,r2
419 bl printk
420 lwz r0,GPR0(r1)
421 lwz r3,GPR3(r1)
422 lwz r4,GPR4(r1)
423 lwz r5,GPR5(r1)
424 lwz r6,GPR6(r1)
425 lwz r7,GPR7(r1)
426 lwz r8,GPR8(r1)
427 mtlr r31
428 lwz r31,GPR31(r1)
429 blr
430
431do_show_syscall_exit:
432#ifdef SHOW_SYSCALLS_TASK
433 lis r11,show_syscalls_task@ha
434 lwz r11,show_syscalls_task@l(r11)
435 cmp 0,r2,r11
436 bnelr
437#endif
438 stw r31,GPR31(r1)
439 mflr r31
440 stw r3,RESULT(r1) /* Save result */
441 mr r4,r3
442 lis r3,79f@ha
443 addi r3,r3,79f@l
444 bl printk
445 lwz r3,RESULT(r1)
446 mtlr r31
447 lwz r31,GPR31(r1)
448 blr
449
4507: .string "syscall %d(%x, %x, %x, %x, %x, "
45177: .string "%x), current=%p\n"
45279: .string " -> %x\n"
453 .align 2,0
454
455#ifdef SHOW_SYSCALLS_TASK
456 .data
457 .globl show_syscalls_task
458show_syscalls_task:
459 .long -1
460 .text
461#endif
462#endif /* SHOW_SYSCALLS */
463
464/*
401d1f02
DW
465 * The fork/clone functions need to copy the full register set into
466 * the child process. Therefore we need to save all the nonvolatile
467 * registers (r13 - r31) before calling the C code.
9994a338 468 */
9994a338
PM
469 .globl ppc_fork
470ppc_fork:
471 SAVE_NVGPRS(r1)
d73e0c99 472 lwz r0,_TRAP(r1)
9994a338 473 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 474 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
475 b sys_fork
476
477 .globl ppc_vfork
478ppc_vfork:
479 SAVE_NVGPRS(r1)
d73e0c99 480 lwz r0,_TRAP(r1)
9994a338 481 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 482 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
483 b sys_vfork
484
485 .globl ppc_clone
486ppc_clone:
487 SAVE_NVGPRS(r1)
d73e0c99 488 lwz r0,_TRAP(r1)
9994a338 489 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 490 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
491 b sys_clone
492
9994a338
PM
493/*
494 * Top-level page fault handling.
495 * This is in assembler because if do_page_fault tells us that
496 * it is a bad kernel page fault, we want to save the non-volatile
497 * registers before calling bad_page_fault.
498 */
499 .globl handle_page_fault
500handle_page_fault:
501 stw r4,_DAR(r1)
502 addi r3,r1,STACK_FRAME_OVERHEAD
503 bl do_page_fault
504 cmpwi r3,0
505 beq+ ret_from_except
506 SAVE_NVGPRS(r1)
d73e0c99 507 lwz r0,_TRAP(r1)
9994a338 508 clrrwi r0,r0,1
d73e0c99 509 stw r0,_TRAP(r1)
9994a338
PM
510 mr r5,r3
511 addi r3,r1,STACK_FRAME_OVERHEAD
512 lwz r4,_DAR(r1)
513 bl bad_page_fault
514 b ret_from_except_full
515
516/*
517 * This routine switches between two different tasks. The process
518 * state of one is saved on its kernel stack. Then the state
519 * of the other is restored from its kernel stack. The memory
520 * management hardware is updated to the second process's state.
521 * Finally, we can return to the second process.
522 * On entry, r3 points to the THREAD for the current task, r4
523 * points to the THREAD for the new task.
524 *
525 * This routine is always called with interrupts disabled.
526 *
527 * Note: there are two ways to get to the "going out" portion
528 * of this code; either by coming in via the entry (_switch)
529 * or via "fork" which must set up an environment equivalent
530 * to the "_switch" path. If you change this , you'll have to
531 * change the fork code also.
532 *
533 * The code which creates the new task context is in 'copy_thread'
534 * in arch/ppc/kernel/process.c
535 */
536_GLOBAL(_switch)
537 stwu r1,-INT_FRAME_SIZE(r1)
538 mflr r0
539 stw r0,INT_FRAME_SIZE+4(r1)
540 /* r3-r12 are caller saved -- Cort */
541 SAVE_NVGPRS(r1)
542 stw r0,_NIP(r1) /* Return to switch caller */
543 mfmsr r11
544 li r0,MSR_FP /* Disable floating-point */
545#ifdef CONFIG_ALTIVEC
546BEGIN_FTR_SECTION
547 oris r0,r0,MSR_VEC@h /* Disable altivec */
548 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
549 stw r12,THREAD+THREAD_VRSAVE(r2)
550END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
551#endif /* CONFIG_ALTIVEC */
552#ifdef CONFIG_SPE
553 oris r0,r0,MSR_SPE@h /* Disable SPE */
554 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
555 stw r12,THREAD+THREAD_SPEFSCR(r2)
556#endif /* CONFIG_SPE */
557 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
558 beq+ 1f
559 andc r11,r11,r0
560 MTMSRD(r11)
561 isync
5621: stw r11,_MSR(r1)
563 mfcr r10
564 stw r10,_CCR(r1)
565 stw r1,KSP(r3) /* Set old stack pointer */
566
567#ifdef CONFIG_SMP
568 /* We need a sync somewhere here to make sure that if the
569 * previous task gets rescheduled on another CPU, it sees all
570 * stores it has performed on this one.
571 */
572 sync
573#endif /* CONFIG_SMP */
574
575 tophys(r0,r4)
576 CLR_TOP32(r0)
577 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
578 lwz r1,KSP(r4) /* Load new stack pointer */
579
580 /* save the old current 'last' for return value */
581 mr r3,r2
582 addi r2,r4,-THREAD /* Update current */
583
584#ifdef CONFIG_ALTIVEC
585BEGIN_FTR_SECTION
586 lwz r0,THREAD+THREAD_VRSAVE(r2)
587 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
588END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
589#endif /* CONFIG_ALTIVEC */
590#ifdef CONFIG_SPE
591 lwz r0,THREAD+THREAD_SPEFSCR(r2)
592 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
593#endif /* CONFIG_SPE */
594
595 lwz r0,_CCR(r1)
596 mtcrf 0xFF,r0
597 /* r3-r12 are destroyed -- Cort */
598 REST_NVGPRS(r1)
599
600 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
601 mtlr r4
602 addi r1,r1,INT_FRAME_SIZE
603 blr
604
605 .globl fast_exception_return
606fast_exception_return:
607#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
608 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
609 beq 1f /* if not, we've got problems */
610#endif
611
6122: REST_4GPRS(3, r11)
613 lwz r10,_CCR(r11)
614 REST_GPR(1, r11)
615 mtcr r10
616 lwz r10,_LINK(r11)
617 mtlr r10
618 REST_GPR(10, r11)
619 mtspr SPRN_SRR1,r9
620 mtspr SPRN_SRR0,r12
621 REST_GPR(9, r11)
622 REST_GPR(12, r11)
623 lwz r11,GPR11(r11)
624 SYNC
625 RFI
626
627#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
628/* check if the exception happened in a restartable section */
6291: lis r3,exc_exit_restart_end@ha
630 addi r3,r3,exc_exit_restart_end@l
631 cmplw r12,r3
632 bge 3f
633 lis r4,exc_exit_restart@ha
634 addi r4,r4,exc_exit_restart@l
635 cmplw r12,r4
636 blt 3f
637 lis r3,fee_restarts@ha
638 tophys(r3,r3)
639 lwz r5,fee_restarts@l(r3)
640 addi r5,r5,1
641 stw r5,fee_restarts@l(r3)
642 mr r12,r4 /* restart at exc_exit_restart */
643 b 2b
644
645 .comm fee_restarts,4
646
647/* aargh, a nonrecoverable interrupt, panic */
648/* aargh, we don't know which trap this is */
649/* but the 601 doesn't implement the RI bit, so assume it's OK */
6503:
651BEGIN_FTR_SECTION
652 b 2b
653END_FTR_SECTION_IFSET(CPU_FTR_601)
654 li r10,-1
d73e0c99 655 stw r10,_TRAP(r11)
9994a338
PM
656 addi r3,r1,STACK_FRAME_OVERHEAD
657 lis r10,MSR_KERNEL@h
658 ori r10,r10,MSR_KERNEL@l
659 bl transfer_to_handler_full
660 .long nonrecoverable_exception
661 .long ret_from_except
662#endif
663
9994a338
PM
664 .globl ret_from_except_full
665ret_from_except_full:
666 REST_NVGPRS(r1)
667 /* fall through */
668
669 .globl ret_from_except
670ret_from_except:
671 /* Hard-disable interrupts so that current_thread_info()->flags
672 * can't change between when we test it and when we return
673 * from the interrupt. */
674 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
675 SYNC /* Some chip revs have problems here... */
676 MTMSRD(r10) /* disable interrupts */
677
678 lwz r3,_MSR(r1) /* Returning to user mode? */
679 andi. r0,r3,MSR_PR
680 beq resume_kernel
681
682user_exc_return: /* r10 contains MSR_KERNEL here */
683 /* Check current_thread_info()->flags */
6cb7bfeb 684 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338 685 lwz r9,TI_FLAGS(r9)
401d1f02 686 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL)
9994a338
PM
687 bne do_work
688
689restore_user:
690#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
691 /* Check whether this process has its own DBCR0 value. The single
692 step bit tells us that dbcr0 should be loaded. */
693 lwz r0,THREAD+THREAD_DBCR0(r2)
694 andis. r10,r0,DBCR0_IC@h
695 bnel- load_dbcr0
696#endif
697
698#ifdef CONFIG_PREEMPT
699 b restore
700
701/* N.B. the only way to get here is from the beq following ret_from_except. */
702resume_kernel:
703 /* check current_thread_info->preempt_count */
6cb7bfeb 704 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
705 lwz r0,TI_PREEMPT(r9)
706 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
707 bne restore
708 lwz r0,TI_FLAGS(r9)
709 andi. r0,r0,_TIF_NEED_RESCHED
710 beq+ restore
711 andi. r0,r3,MSR_EE /* interrupts off? */
712 beq restore /* don't schedule if so */
7131: bl preempt_schedule_irq
6cb7bfeb 714 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
715 lwz r3,TI_FLAGS(r9)
716 andi. r0,r3,_TIF_NEED_RESCHED
717 bne- 1b
718#else
719resume_kernel:
720#endif /* CONFIG_PREEMPT */
721
722 /* interrupts are hard-disabled at this point */
723restore:
724 lwz r0,GPR0(r1)
725 lwz r2,GPR2(r1)
726 REST_4GPRS(3, r1)
727 REST_2GPRS(7, r1)
728
729 lwz r10,_XER(r1)
730 lwz r11,_CTR(r1)
731 mtspr SPRN_XER,r10
732 mtctr r11
733
734 PPC405_ERR77(0,r1)
735 stwcx. r0,0,r1 /* to clear the reservation */
736
737#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
738 lwz r9,_MSR(r1)
739 andi. r10,r9,MSR_RI /* check if this exception occurred */
740 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
741
742 lwz r10,_CCR(r1)
743 lwz r11,_LINK(r1)
744 mtcrf 0xFF,r10
745 mtlr r11
746
747 /*
748 * Once we put values in SRR0 and SRR1, we are in a state
749 * where exceptions are not recoverable, since taking an
750 * exception will trash SRR0 and SRR1. Therefore we clear the
751 * MSR:RI bit to indicate this. If we do take an exception,
752 * we can't return to the point of the exception but we
753 * can restart the exception exit path at the label
754 * exc_exit_restart below. -- paulus
755 */
756 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
757 SYNC
758 MTMSRD(r10) /* clear the RI bit */
759 .globl exc_exit_restart
760exc_exit_restart:
761 lwz r9,_MSR(r1)
762 lwz r12,_NIP(r1)
763 FIX_SRR1(r9,r10)
764 mtspr SPRN_SRR0,r12
765 mtspr SPRN_SRR1,r9
766 REST_4GPRS(9, r1)
767 lwz r1,GPR1(r1)
768 .globl exc_exit_restart_end
769exc_exit_restart_end:
770 SYNC
771 RFI
772
773#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
774 /*
775 * This is a bit different on 4xx/Book-E because it doesn't have
776 * the RI bit in the MSR.
777 * The TLB miss handler checks if we have interrupted
778 * the exception exit path and restarts it if so
779 * (well maybe one day it will... :).
780 */
781 lwz r11,_LINK(r1)
782 mtlr r11
783 lwz r10,_CCR(r1)
784 mtcrf 0xff,r10
785 REST_2GPRS(9, r1)
786 .globl exc_exit_restart
787exc_exit_restart:
788 lwz r11,_NIP(r1)
789 lwz r12,_MSR(r1)
790exc_exit_start:
791 mtspr SPRN_SRR0,r11
792 mtspr SPRN_SRR1,r12
793 REST_2GPRS(11, r1)
794 lwz r1,GPR1(r1)
795 .globl exc_exit_restart_end
796exc_exit_restart_end:
797 PPC405_ERR77_SYNC
798 rfi
799 b . /* prevent prefetch past rfi */
800
801/*
802 * Returning from a critical interrupt in user mode doesn't need
803 * to be any different from a normal exception. For a critical
804 * interrupt in the kernel, we just return (without checking for
805 * preemption) since the interrupt may have happened at some crucial
806 * place (e.g. inside the TLB miss handler), and because we will be
807 * running with r1 pointing into critical_stack, not the current
808 * process's kernel stack (and therefore current_thread_info() will
809 * give the wrong answer).
810 * We have to restore various SPRs that may have been in use at the
811 * time of the critical interrupt.
812 *
813 */
814#ifdef CONFIG_40x
815#define PPC_40x_TURN_OFF_MSR_DR \
816 /* avoid any possible TLB misses here by turning off MSR.DR, we \
817 * assume the instructions here are mapped by a pinned TLB entry */ \
818 li r10,MSR_IR; \
819 mtmsr r10; \
820 isync; \
821 tophys(r1, r1);
822#else
823#define PPC_40x_TURN_OFF_MSR_DR
824#endif
825
826#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
827 REST_NVGPRS(r1); \
828 lwz r3,_MSR(r1); \
829 andi. r3,r3,MSR_PR; \
830 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
831 bne user_exc_return; \
832 lwz r0,GPR0(r1); \
833 lwz r2,GPR2(r1); \
834 REST_4GPRS(3, r1); \
835 REST_2GPRS(7, r1); \
836 lwz r10,_XER(r1); \
837 lwz r11,_CTR(r1); \
838 mtspr SPRN_XER,r10; \
839 mtctr r11; \
840 PPC405_ERR77(0,r1); \
841 stwcx. r0,0,r1; /* to clear the reservation */ \
842 lwz r11,_LINK(r1); \
843 mtlr r11; \
844 lwz r10,_CCR(r1); \
845 mtcrf 0xff,r10; \
846 PPC_40x_TURN_OFF_MSR_DR; \
847 lwz r9,_DEAR(r1); \
848 lwz r10,_ESR(r1); \
849 mtspr SPRN_DEAR,r9; \
850 mtspr SPRN_ESR,r10; \
851 lwz r11,_NIP(r1); \
852 lwz r12,_MSR(r1); \
853 mtspr exc_lvl_srr0,r11; \
854 mtspr exc_lvl_srr1,r12; \
855 lwz r9,GPR9(r1); \
856 lwz r12,GPR12(r1); \
857 lwz r10,GPR10(r1); \
858 lwz r11,GPR11(r1); \
859 lwz r1,GPR1(r1); \
860 PPC405_ERR77_SYNC; \
861 exc_lvl_rfi; \
862 b .; /* prevent prefetch past exc_lvl_rfi */
863
864 .globl ret_from_crit_exc
865ret_from_crit_exc:
866 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
867
868#ifdef CONFIG_BOOKE
869 .globl ret_from_debug_exc
870ret_from_debug_exc:
871 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
872
873 .globl ret_from_mcheck_exc
874ret_from_mcheck_exc:
875 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
876#endif /* CONFIG_BOOKE */
877
878/*
879 * Load the DBCR0 value for a task that is being ptraced,
880 * having first saved away the global DBCR0. Note that r0
881 * has the dbcr0 value to set upon entry to this.
882 */
883load_dbcr0:
884 mfmsr r10 /* first disable debug exceptions */
885 rlwinm r10,r10,0,~MSR_DE
886 mtmsr r10
887 isync
888 mfspr r10,SPRN_DBCR0
889 lis r11,global_dbcr0@ha
890 addi r11,r11,global_dbcr0@l
891 stw r10,0(r11)
892 mtspr SPRN_DBCR0,r0
893 lwz r10,4(r11)
894 addi r10,r10,1
895 stw r10,4(r11)
896 li r11,-1
897 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
898 blr
899
900 .comm global_dbcr0,8
901#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
902
903do_work: /* r10 contains MSR_KERNEL here */
904 andi. r0,r9,_TIF_NEED_RESCHED
905 beq do_user_signal
906
907do_resched: /* r10 contains MSR_KERNEL here */
908 ori r10,r10,MSR_EE
909 SYNC
910 MTMSRD(r10) /* hard-enable interrupts */
911 bl schedule
912recheck:
913 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
914 SYNC
915 MTMSRD(r10) /* disable interrupts */
6cb7bfeb 916 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
917 lwz r9,TI_FLAGS(r9)
918 andi. r0,r9,_TIF_NEED_RESCHED
919 bne- do_resched
920 andi. r0,r9,_TIF_SIGPENDING
921 beq restore_user
922do_user_signal: /* r10 contains MSR_KERNEL here */
923 ori r10,r10,MSR_EE
924 SYNC
925 MTMSRD(r10) /* hard-enable interrupts */
926 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 927 lwz r3,_TRAP(r1)
9994a338
PM
928 andi. r0,r3,1
929 beq 2f
930 SAVE_NVGPRS(r1)
931 rlwinm r3,r3,0,0,30
d73e0c99 932 stw r3,_TRAP(r1)
9994a338
PM
9332: li r3,0
934 addi r4,r1,STACK_FRAME_OVERHEAD
935 bl do_signal
936 REST_NVGPRS(r1)
937 b recheck
938
939/*
940 * We come here when we are at the end of handling an exception
941 * that occurred at a place where taking an exception will lose
942 * state information, such as the contents of SRR0 and SRR1.
943 */
944nonrecoverable:
945 lis r10,exc_exit_restart_end@ha
946 addi r10,r10,exc_exit_restart_end@l
947 cmplw r12,r10
948 bge 3f
949 lis r11,exc_exit_restart@ha
950 addi r11,r11,exc_exit_restart@l
951 cmplw r12,r11
952 blt 3f
953 lis r10,ee_restarts@ha
954 lwz r12,ee_restarts@l(r10)
955 addi r12,r12,1
956 stw r12,ee_restarts@l(r10)
957 mr r12,r11 /* restart at exc_exit_restart */
958 blr
9593: /* OK, we can't recover, kill this process */
960 /* but the 601 doesn't implement the RI bit, so assume it's OK */
961BEGIN_FTR_SECTION
962 blr
963END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 964 lwz r3,_TRAP(r1)
9994a338
PM
965 andi. r0,r3,1
966 beq 4f
967 SAVE_NVGPRS(r1)
968 rlwinm r3,r3,0,0,30
d73e0c99 969 stw r3,_TRAP(r1)
9994a338
PM
9704: addi r3,r1,STACK_FRAME_OVERHEAD
971 bl nonrecoverable_exception
972 /* shouldn't return */
973 b 4b
974
975 .comm ee_restarts,4
976
977/*
978 * PROM code for specific machines follows. Put it
979 * here so it's easy to add arch-specific sections later.
980 * -- Cort
981 */
033ef338 982#ifdef CONFIG_PPC_RTAS
9994a338
PM
983/*
984 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
985 * called with the MMU off.
986 */
987_GLOBAL(enter_rtas)
988 stwu r1,-INT_FRAME_SIZE(r1)
989 mflr r0
990 stw r0,INT_FRAME_SIZE+4(r1)
033ef338 991 LOADADDR(r4, rtas)
9994a338
PM
992 lis r6,1f@ha /* physical return address for rtas */
993 addi r6,r6,1f@l
994 tophys(r6,r6)
995 tophys(r7,r1)
033ef338
PM
996 lwz r8,RTASENTRY(r4)
997 lwz r4,RTASBASE(r4)
9994a338
PM
998 mfmsr r9
999 stw r9,8(r1)
1000 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1001 SYNC /* disable interrupts so SRR0/1 */
1002 MTMSRD(r0) /* don't get trashed */
1003 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1004 mtlr r6
9994a338
PM
1005 mtspr SPRN_SPRG2,r7
1006 mtspr SPRN_SRR0,r8
1007 mtspr SPRN_SRR1,r9
1008 RFI
10091: tophys(r9,r1)
1010 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1011 lwz r9,8(r9) /* original msr value */
1012 FIX_SRR1(r9,r0)
1013 addi r1,r1,INT_FRAME_SIZE
1014 li r0,0
1015 mtspr SPRN_SPRG2,r0
1016 mtspr SPRN_SRR0,r8
1017 mtspr SPRN_SRR1,r9
1018 RFI /* return to caller */
1019
1020 .globl machine_check_in_rtas
1021machine_check_in_rtas:
1022 twi 31,0,0
1023 /* XXX load up BATs and panic */
1024
033ef338 1025#endif /* CONFIG_PPC_RTAS */
This page took 0.092724 seconds and 5 git commands to generate.