powerpc: add explicit #include <asm/asm-compat.h> for jump label
[deliverable/linux.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
9994a338 22#include <linux/errno.h>
c3525940 23#include <linux/err.h>
9994a338
PM
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
395a59d0 34#include <asm/ftrace.h>
46f52210 35#include <asm/ptrace.h>
9994a338 36
9994a338
PM
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
9994a338
PM
47 .globl mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
fca622c5
KG
49 mfspr r0,SPRN_DSRR0
50 stw r0,_DSRR0(r11)
51 mfspr r0,SPRN_DSRR1
52 stw r0,_DSRR1(r11)
53 /* fall through */
9994a338
PM
54
55 .globl debug_transfer_to_handler
56debug_transfer_to_handler:
fca622c5
KG
57 mfspr r0,SPRN_CSRR0
58 stw r0,_CSRR0(r11)
59 mfspr r0,SPRN_CSRR1
60 stw r0,_CSRR1(r11)
61 /* fall through */
9994a338
PM
62
63 .globl crit_transfer_to_handler
64crit_transfer_to_handler:
70fe3af8 65#ifdef CONFIG_PPC_BOOK3E_MMU
fca622c5
KG
66 mfspr r0,SPRN_MAS0
67 stw r0,MAS0(r11)
68 mfspr r0,SPRN_MAS1
69 stw r0,MAS1(r11)
70 mfspr r0,SPRN_MAS2
71 stw r0,MAS2(r11)
72 mfspr r0,SPRN_MAS3
73 stw r0,MAS3(r11)
74 mfspr r0,SPRN_MAS6
75 stw r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77 mfspr r0,SPRN_MAS7
78 stw r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
70fe3af8 80#endif /* CONFIG_PPC_BOOK3E_MMU */
fca622c5
KG
81#ifdef CONFIG_44x
82 mfspr r0,SPRN_MMUCR
83 stw r0,MMUCR(r11)
84#endif
85 mfspr r0,SPRN_SRR0
86 stw r0,_SRR0(r11)
87 mfspr r0,SPRN_SRR1
88 stw r0,_SRR1(r11)
89
1f8b0bc8
SY
90 /* set the stack limit to the current stack
91 * and set the limit to protect the thread_info
92 * struct
93 */
ee43eb78 94 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
95 lwz r0,KSP_LIMIT(r8)
96 stw r0,SAVED_KSP_LIMIT(r11)
1f8b0bc8 97 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 98 stw r0,KSP_LIMIT(r8)
9994a338
PM
99 /* fall through */
100#endif
101
102#ifdef CONFIG_40x
103 .globl crit_transfer_to_handler
104crit_transfer_to_handler:
105 lwz r0,crit_r10@l(0)
106 stw r0,GPR10(r11)
107 lwz r0,crit_r11@l(0)
108 stw r0,GPR11(r11)
fca622c5
KG
109 mfspr r0,SPRN_SRR0
110 stw r0,crit_srr0@l(0)
111 mfspr r0,SPRN_SRR1
112 stw r0,crit_srr1@l(0)
113
1f8b0bc8
SY
114 /* set the stack limit to the current stack
115 * and set the limit to protect the thread_info
116 * struct
117 */
ee43eb78 118 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
119 lwz r0,KSP_LIMIT(r8)
120 stw r0,saved_ksp_limit@l(0)
1f8b0bc8 121 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 122 stw r0,KSP_LIMIT(r8)
9994a338
PM
123 /* fall through */
124#endif
125
126/*
127 * This code finishes saving the registers to the exception frame
128 * and jumps to the appropriate handler for the exception, turning
129 * on address translation.
130 * Note that we rely on the caller having set cr0.eq iff the exception
131 * occurred in kernel mode (i.e. MSR:PR = 0).
132 */
133 .globl transfer_to_handler_full
134transfer_to_handler_full:
135 SAVE_NVGPRS(r11)
136 /* fall through */
137
138 .globl transfer_to_handler
139transfer_to_handler:
140 stw r2,GPR2(r11)
141 stw r12,_NIP(r11)
142 stw r9,_MSR(r11)
143 andi. r2,r9,MSR_PR
144 mfctr r12
145 mfspr r2,SPRN_XER
146 stw r12,_CTR(r11)
147 stw r2,_XER(r11)
ee43eb78 148 mfspr r12,SPRN_SPRG_THREAD
9994a338
PM
149 addi r2,r12,-THREAD
150 tovirt(r2,r2) /* set r2 to current */
151 beq 2f /* if from user, fix up THREAD.regs */
152 addi r11,r1,STACK_FRAME_OVERHEAD
153 stw r11,PT_REGS(r12)
154#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
155 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 156 internal debug mode bit to do this. */
9994a338 157 lwz r12,THREAD_DBCR0(r12)
2325f0a0 158 andis. r12,r12,DBCR0_IDM@h
9994a338
PM
159 beq+ 3f
160 /* From user and task is ptraced - load up global dbcr0 */
161 li r12,-1 /* clear all pending debug events */
162 mtspr SPRN_DBSR,r12
163 lis r11,global_dbcr0@ha
164 tophys(r11,r11)
165 addi r11,r11,global_dbcr0@l
4eaddb4d 166#ifdef CONFIG_SMP
9778b696 167 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
168 lwz r9,TI_CPU(r9)
169 slwi r9,r9,3
170 add r11,r11,r9
171#endif
9994a338
PM
172 lwz r12,0(r11)
173 mtspr SPRN_DBCR0,r12
174 lwz r12,4(r11)
175 addi r12,r12,-1
176 stw r12,4(r11)
177#endif
c223c903
CL
178#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
179 CURRENT_THREAD_INFO(r9, r1)
180 tophys(r9, r9)
181 ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
182#endif
183
9994a338 184 b 3f
f39224a8 185
9994a338
PM
1862: /* if from kernel, check interrupted DOZE/NAP mode and
187 * check for stack overflow
188 */
85218827
KG
189 lwz r9,KSP_LIMIT(r12)
190 cmplw r1,r9 /* if r1 <= ksp_limit */
f39224a8
PM
191 ble- stack_ovf /* then the kernel stack overflowed */
1925:
fc4033b2 193#if defined(CONFIG_6xx) || defined(CONFIG_E500)
9778b696 194 CURRENT_THREAD_INFO(r9, r1)
f39224a8
PM
195 tophys(r9,r9) /* check local flags */
196 lwz r12,TI_LOCAL_FLAGS(r9)
197 mtcrf 0x01,r12
198 bt- 31-TLF_NAPPING,4f
a560643e 199 bt- 31-TLF_SLEEPING,7f
fc4033b2 200#endif /* CONFIG_6xx || CONFIG_E500 */
9994a338
PM
201 .globl transfer_to_handler_cont
202transfer_to_handler_cont:
9994a338
PM
2033:
204 mflr r9
205 lwz r11,0(r9) /* virtual address of handler */
206 lwz r9,4(r9) /* where to go when done */
5d38902c
BH
207#ifdef CONFIG_TRACE_IRQFLAGS
208 lis r12,reenable_mmu@h
209 ori r12,r12,reenable_mmu@l
210 mtspr SPRN_SRR0,r12
211 mtspr SPRN_SRR1,r10
212 SYNC
213 RFI
214reenable_mmu: /* re-enable mmu so we can */
215 mfmsr r10
216 lwz r12,_MSR(r1)
217 xor r10,r10,r12
218 andi. r10,r10,MSR_EE /* Did EE change? */
219 beq 1f
220
2cd76629
KH
221 /*
222 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
223 * If from user mode there is only one stack frame on the stack, and
224 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
225 * stack frame to make trace_hardirqs_off happy.
08f1ec8a
BH
226 *
227 * This is handy because we also need to save a bunch of GPRs,
228 * r3 can be different from GPR3(r1) at this point, r9 and r11
229 * contains the old MSR and handler address respectively,
230 * r4 & r5 can contain page fault arguments that need to be passed
231 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
232 * they aren't useful past this point (aren't syscall arguments),
233 * the rest is restored from the exception frame.
2cd76629 234 */
08f1ec8a
BH
235 stwu r1,-32(r1)
236 stw r9,8(r1)
237 stw r11,12(r1)
238 stw r3,16(r1)
239 stw r4,20(r1)
240 stw r5,24(r1)
2cd76629 241 bl trace_hardirqs_off
08f1ec8a
BH
242 lwz r5,24(r1)
243 lwz r4,20(r1)
244 lwz r3,16(r1)
245 lwz r11,12(r1)
246 lwz r9,8(r1)
247 addi r1,r1,32
5d38902c 248 lwz r0,GPR0(r1)
5d38902c
BH
249 lwz r6,GPR6(r1)
250 lwz r7,GPR7(r1)
251 lwz r8,GPR8(r1)
5d38902c
BH
2521: mtctr r11
253 mtlr r9
254 bctr /* jump to handler */
255#else /* CONFIG_TRACE_IRQFLAGS */
9994a338
PM
256 mtspr SPRN_SRR0,r11
257 mtspr SPRN_SRR1,r10
258 mtlr r9
259 SYNC
260 RFI /* jump to handler, enable MMU */
5d38902c 261#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 262
fc4033b2 263#if defined (CONFIG_6xx) || defined(CONFIG_E500)
f39224a8
PM
2644: rlwinm r12,r12,0,~_TLF_NAPPING
265 stw r12,TI_LOCAL_FLAGS(r9)
fc4033b2 266 b power_save_ppc32_restore
a560643e
PM
267
2687: rlwinm r12,r12,0,~_TLF_SLEEPING
269 stw r12,TI_LOCAL_FLAGS(r9)
270 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
271 rlwinm r9,r9,0,~MSR_EE
272 lwz r12,_LINK(r11) /* and return to address in LR */
273 b fast_exception_return
a0652fc9
PM
274#endif
275
9994a338
PM
276/*
277 * On kernel stack overflow, load up an initial stack pointer
278 * and call StackOverflow(regs), which should not return.
279 */
280stack_ovf:
281 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
282 lis r12,_end@h
283 ori r12,r12,_end@l
284 cmplw r1,r12
285 ble 5b /* r1 <= &_end is OK */
9994a338
PM
286 SAVE_NVGPRS(r11)
287 addi r3,r1,STACK_FRAME_OVERHEAD
288 lis r1,init_thread_union@ha
289 addi r1,r1,init_thread_union@l
290 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
291 lis r9,StackOverflow@ha
292 addi r9,r9,StackOverflow@l
293 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
294 FIX_SRR1(r10,r12)
295 mtspr SPRN_SRR0,r9
296 mtspr SPRN_SRR1,r10
297 SYNC
298 RFI
299
300/*
301 * Handle a system call.
302 */
303 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
304 .stabs "entry_32.S",N_SO,0,0,0f
3050:
306
307_GLOBAL(DoSyscall)
9994a338
PM
308 stw r3,ORIG_GPR3(r1)
309 li r12,0
310 stw r12,RESULT(r1)
311 lwz r11,_CCR(r1) /* Clear SO bit in CR */
312 rlwinm r11,r11,0,4,2
313 stw r11,_CCR(r1)
5d38902c
BH
314#ifdef CONFIG_TRACE_IRQFLAGS
315 /* Return from syscalls can (and generally will) hard enable
316 * interrupts. You aren't supposed to call a syscall with
317 * interrupts disabled in the first place. However, to ensure
318 * that we get it right vs. lockdep if it happens, we force
319 * that hard enable here with appropriate tracing if we see
320 * that we have been called with interrupts off
321 */
322 mfmsr r11
323 andi. r12,r11,MSR_EE
324 bne+ 1f
325 /* We came in with interrupts disabled, we enable them now */
326 bl trace_hardirqs_on
327 mfmsr r11
328 lwz r0,GPR0(r1)
329 lwz r3,GPR3(r1)
330 lwz r4,GPR4(r1)
331 ori r11,r11,MSR_EE
332 lwz r5,GPR5(r1)
333 lwz r6,GPR6(r1)
334 lwz r7,GPR7(r1)
335 lwz r8,GPR8(r1)
336 mtmsr r11
3371:
338#endif /* CONFIG_TRACE_IRQFLAGS */
9778b696 339 CURRENT_THREAD_INFO(r10, r1)
9994a338 340 lwz r11,TI_FLAGS(r10)
10ea8343 341 andi. r11,r11,_TIF_SYSCALL_DOTRACE
9994a338
PM
342 bne- syscall_dotrace
343syscall_dotrace_cont:
344 cmplwi 0,r0,NR_syscalls
345 lis r10,sys_call_table@h
346 ori r10,r10,sys_call_table@l
347 slwi r0,r0,2
348 bge- 66f
349 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
350 mtlr r10
351 addi r9,r1,STACK_FRAME_OVERHEAD
352 PPC440EP_ERR42
353 blrl /* Call handler */
354 .globl ret_from_syscall
355ret_from_syscall:
9994a338 356 mr r6,r3
9778b696 357 CURRENT_THREAD_INFO(r12, r1)
9994a338 358 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 359 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
5d38902c 360 /* Note: We don't bother telling lockdep about it */
9994a338
PM
361 SYNC
362 MTMSRD(r10)
363 lwz r9,TI_FLAGS(r12)
c3525940 364 li r8,-MAX_ERRNO
10ea8343 365 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 366 bne- syscall_exit_work
401d1f02
DW
367 cmplw 0,r3,r8
368 blt+ syscall_exit_cont
369 lwz r11,_CCR(r1) /* Load CR */
370 neg r3,r3
371 oris r11,r11,0x1000 /* Set SO bit in CR */
372 stw r11,_CCR(r1)
9994a338 373syscall_exit_cont:
5d38902c
BH
374 lwz r8,_MSR(r1)
375#ifdef CONFIG_TRACE_IRQFLAGS
376 /* If we are going to return from the syscall with interrupts
377 * off, we trace that here. It shouldn't happen though but we
378 * want to catch the bugger if it does right ?
379 */
380 andi. r10,r8,MSR_EE
381 bne+ 1f
382 stw r3,GPR3(r1)
383 bl trace_hardirqs_off
384 lwz r3,GPR3(r1)
3851:
386#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 387#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
388 /* If the process has its own DBCR0 value, load it up. The internal
389 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 390 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 391 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
392 bnel- load_dbcr0
393#endif
b98ac05d 394#ifdef CONFIG_44x
e7f75ad0 395BEGIN_MMU_FTR_SECTION
b98ac05d
BH
396 lis r4,icache_44x_need_flush@ha
397 lwz r5,icache_44x_need_flush@l(r4)
398 cmplwi cr0,r5,0
399 bne- 2f
4001:
e7f75ad0 401END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
b98ac05d 402#endif /* CONFIG_44x */
b64f87c1
BB
403BEGIN_FTR_SECTION
404 lwarx r7,0,r1
405END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338 406 stwcx. r0,0,r1 /* to clear the reservation */
c223c903
CL
407#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
408 andi. r4,r8,MSR_PR
409 beq 3f
410 CURRENT_THREAD_INFO(r4, r1)
411 ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4123:
413#endif
9994a338
PM
414 lwz r4,_LINK(r1)
415 lwz r5,_CCR(r1)
416 mtlr r4
417 mtcr r5
418 lwz r7,_NIP(r1)
9994a338
PM
419 FIX_SRR1(r8, r0)
420 lwz r2,GPR2(r1)
421 lwz r1,GPR1(r1)
422 mtspr SPRN_SRR0,r7
423 mtspr SPRN_SRR1,r8
424 SYNC
425 RFI
b98ac05d
BH
426#ifdef CONFIG_44x
4272: li r7,0
428 iccci r0,r0
429 stw r7,icache_44x_need_flush@l(r4)
430 b 1b
431#endif /* CONFIG_44x */
9994a338
PM
432
43366: li r3,-ENOSYS
434 b ret_from_syscall
435
436 .globl ret_from_fork
437ret_from_fork:
438 REST_NVGPRS(r1)
439 bl schedule_tail
440 li r3,0
441 b ret_from_syscall
442
58254e10
AV
443 .globl ret_from_kernel_thread
444ret_from_kernel_thread:
445 REST_NVGPRS(r1)
446 bl schedule_tail
447 mtlr r14
448 mr r3,r15
449 PPC440EP_ERR42
450 blrl
451 li r3,0
be6abfa7 452 b ret_from_syscall
9994a338
PM
453
454/* Traced system call support */
455syscall_dotrace:
456 SAVE_NVGPRS(r1)
457 li r0,0xc00
d73e0c99 458 stw r0,_TRAP(r1)
9994a338
PM
459 addi r3,r1,STACK_FRAME_OVERHEAD
460 bl do_syscall_trace_enter
4f72c427
RM
461 /*
462 * Restore argument registers possibly just changed.
463 * We use the return value of do_syscall_trace_enter
464 * for call number to look up in the table (r0).
465 */
466 mr r0,r3
9994a338
PM
467 lwz r3,GPR3(r1)
468 lwz r4,GPR4(r1)
469 lwz r5,GPR5(r1)
470 lwz r6,GPR6(r1)
471 lwz r7,GPR7(r1)
472 lwz r8,GPR8(r1)
473 REST_NVGPRS(r1)
d3837414
ME
474
475 cmplwi r0,NR_syscalls
476 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
477 bge- ret_from_syscall
9994a338
PM
478 b syscall_dotrace_cont
479
480syscall_exit_work:
401d1f02 481 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
482 beq+ 0f
483 REST_NVGPRS(r1)
484 b 2f
4850: cmplw 0,r3,r8
401d1f02
DW
486 blt+ 1f
487 andi. r0,r9,_TIF_NOERROR
488 bne- 1f
489 lwz r11,_CCR(r1) /* Load CR */
490 neg r3,r3
491 oris r11,r11,0x1000 /* Set SO bit in CR */
492 stw r11,_CCR(r1)
493
4941: stw r6,RESULT(r1) /* Save result */
9994a338 495 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
4962: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
497 beq 4f
498
1bd79336 499 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
500
501 li r11,_TIF_PERSYSCALL_MASK
502 addi r12,r12,TI_FLAGS
5033: lwarx r8,0,r12
504 andc r8,r8,r11
505#ifdef CONFIG_IBM405_ERR77
506 dcbt 0,r12
507#endif
508 stwcx. r8,0,r12
509 bne- 3b
510 subi r12,r12,TI_FLAGS
511
5124: /* Anything which requires enabling interrupts? */
10ea8343 513 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
1bd79336
PM
514 beq ret_from_except
515
5d38902c
BH
516 /* Re-enable interrupts. There is no need to trace that with
517 * lockdep as we are supposed to have IRQs on at this point
518 */
1bd79336
PM
519 ori r10,r10,MSR_EE
520 SYNC
521 MTMSRD(r10)
401d1f02
DW
522
523 /* Save NVGPRS if they're not saved already */
d73e0c99 524 lwz r4,_TRAP(r1)
9994a338 525 andi. r4,r4,1
401d1f02 526 beq 5f
9994a338
PM
527 SAVE_NVGPRS(r1)
528 li r4,0xc00
d73e0c99 529 stw r4,_TRAP(r1)
1bd79336 5305:
9994a338
PM
531 addi r3,r1,STACK_FRAME_OVERHEAD
532 bl do_syscall_trace_leave
1bd79336 533 b ret_from_except_full
9994a338 534
9994a338 535/*
401d1f02
DW
536 * The fork/clone functions need to copy the full register set into
537 * the child process. Therefore we need to save all the nonvolatile
538 * registers (r13 - r31) before calling the C code.
9994a338 539 */
9994a338
PM
540 .globl ppc_fork
541ppc_fork:
542 SAVE_NVGPRS(r1)
d73e0c99 543 lwz r0,_TRAP(r1)
9994a338 544 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 545 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
546 b sys_fork
547
548 .globl ppc_vfork
549ppc_vfork:
550 SAVE_NVGPRS(r1)
d73e0c99 551 lwz r0,_TRAP(r1)
9994a338 552 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 553 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
554 b sys_vfork
555
556 .globl ppc_clone
557ppc_clone:
558 SAVE_NVGPRS(r1)
d73e0c99 559 lwz r0,_TRAP(r1)
9994a338 560 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 561 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
562 b sys_clone
563
1bd79336
PM
564 .globl ppc_swapcontext
565ppc_swapcontext:
566 SAVE_NVGPRS(r1)
567 lwz r0,_TRAP(r1)
568 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
569 stw r0,_TRAP(r1) /* register set saved */
570 b sys_swapcontext
571
9994a338
PM
572/*
573 * Top-level page fault handling.
574 * This is in assembler because if do_page_fault tells us that
575 * it is a bad kernel page fault, we want to save the non-volatile
576 * registers before calling bad_page_fault.
577 */
578 .globl handle_page_fault
579handle_page_fault:
580 stw r4,_DAR(r1)
581 addi r3,r1,STACK_FRAME_OVERHEAD
582 bl do_page_fault
583 cmpwi r3,0
584 beq+ ret_from_except
585 SAVE_NVGPRS(r1)
d73e0c99 586 lwz r0,_TRAP(r1)
9994a338 587 clrrwi r0,r0,1
d73e0c99 588 stw r0,_TRAP(r1)
9994a338
PM
589 mr r5,r3
590 addi r3,r1,STACK_FRAME_OVERHEAD
591 lwz r4,_DAR(r1)
592 bl bad_page_fault
593 b ret_from_except_full
594
595/*
596 * This routine switches between two different tasks. The process
597 * state of one is saved on its kernel stack. Then the state
598 * of the other is restored from its kernel stack. The memory
599 * management hardware is updated to the second process's state.
600 * Finally, we can return to the second process.
601 * On entry, r3 points to the THREAD for the current task, r4
602 * points to the THREAD for the new task.
603 *
604 * This routine is always called with interrupts disabled.
605 *
606 * Note: there are two ways to get to the "going out" portion
607 * of this code; either by coming in via the entry (_switch)
608 * or via "fork" which must set up an environment equivalent
609 * to the "_switch" path. If you change this , you'll have to
610 * change the fork code also.
611 *
612 * The code which creates the new task context is in 'copy_thread'
613 * in arch/ppc/kernel/process.c
614 */
615_GLOBAL(_switch)
616 stwu r1,-INT_FRAME_SIZE(r1)
617 mflr r0
618 stw r0,INT_FRAME_SIZE+4(r1)
619 /* r3-r12 are caller saved -- Cort */
620 SAVE_NVGPRS(r1)
621 stw r0,_NIP(r1) /* Return to switch caller */
622 mfmsr r11
623 li r0,MSR_FP /* Disable floating-point */
624#ifdef CONFIG_ALTIVEC
625BEGIN_FTR_SECTION
626 oris r0,r0,MSR_VEC@h /* Disable altivec */
627 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
628 stw r12,THREAD+THREAD_VRSAVE(r2)
629END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
630#endif /* CONFIG_ALTIVEC */
631#ifdef CONFIG_SPE
5e14d21e 632BEGIN_FTR_SECTION
9994a338
PM
633 oris r0,r0,MSR_SPE@h /* Disable SPE */
634 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
635 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 636END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
637#endif /* CONFIG_SPE */
638 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
639 beq+ 1f
640 andc r11,r11,r0
641 MTMSRD(r11)
642 isync
6431: stw r11,_MSR(r1)
644 mfcr r10
645 stw r10,_CCR(r1)
646 stw r1,KSP(r3) /* Set old stack pointer */
647
648#ifdef CONFIG_SMP
649 /* We need a sync somewhere here to make sure that if the
650 * previous task gets rescheduled on another CPU, it sees all
651 * stores it has performed on this one.
652 */
653 sync
654#endif /* CONFIG_SMP */
655
656 tophys(r0,r4)
657 CLR_TOP32(r0)
ee43eb78 658 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
9994a338
PM
659 lwz r1,KSP(r4) /* Load new stack pointer */
660
661 /* save the old current 'last' for return value */
662 mr r3,r2
663 addi r2,r4,-THREAD /* Update current */
664
665#ifdef CONFIG_ALTIVEC
666BEGIN_FTR_SECTION
667 lwz r0,THREAD+THREAD_VRSAVE(r2)
668 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
669END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
670#endif /* CONFIG_ALTIVEC */
671#ifdef CONFIG_SPE
5e14d21e 672BEGIN_FTR_SECTION
9994a338
PM
673 lwz r0,THREAD+THREAD_SPEFSCR(r2)
674 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 675END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
676#endif /* CONFIG_SPE */
677
678 lwz r0,_CCR(r1)
679 mtcrf 0xFF,r0
680 /* r3-r12 are destroyed -- Cort */
681 REST_NVGPRS(r1)
682
683 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
684 mtlr r4
685 addi r1,r1,INT_FRAME_SIZE
686 blr
687
688 .globl fast_exception_return
689fast_exception_return:
690#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
691 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
692 beq 1f /* if not, we've got problems */
693#endif
694
6952: REST_4GPRS(3, r11)
696 lwz r10,_CCR(r11)
697 REST_GPR(1, r11)
698 mtcr r10
699 lwz r10,_LINK(r11)
700 mtlr r10
701 REST_GPR(10, r11)
702 mtspr SPRN_SRR1,r9
703 mtspr SPRN_SRR0,r12
704 REST_GPR(9, r11)
705 REST_GPR(12, r11)
706 lwz r11,GPR11(r11)
707 SYNC
708 RFI
709
710#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
711/* check if the exception happened in a restartable section */
7121: lis r3,exc_exit_restart_end@ha
713 addi r3,r3,exc_exit_restart_end@l
714 cmplw r12,r3
715 bge 3f
716 lis r4,exc_exit_restart@ha
717 addi r4,r4,exc_exit_restart@l
718 cmplw r12,r4
719 blt 3f
720 lis r3,fee_restarts@ha
721 tophys(r3,r3)
722 lwz r5,fee_restarts@l(r3)
723 addi r5,r5,1
724 stw r5,fee_restarts@l(r3)
725 mr r12,r4 /* restart at exc_exit_restart */
726 b 2b
727
991eb43a
KG
728 .section .bss
729 .align 2
730fee_restarts:
731 .space 4
732 .previous
9994a338
PM
733
734/* aargh, a nonrecoverable interrupt, panic */
735/* aargh, we don't know which trap this is */
736/* but the 601 doesn't implement the RI bit, so assume it's OK */
7373:
738BEGIN_FTR_SECTION
739 b 2b
740END_FTR_SECTION_IFSET(CPU_FTR_601)
741 li r10,-1
d73e0c99 742 stw r10,_TRAP(r11)
9994a338
PM
743 addi r3,r1,STACK_FRAME_OVERHEAD
744 lis r10,MSR_KERNEL@h
745 ori r10,r10,MSR_KERNEL@l
746 bl transfer_to_handler_full
747 .long nonrecoverable_exception
748 .long ret_from_except
749#endif
750
9994a338
PM
751 .globl ret_from_except_full
752ret_from_except_full:
753 REST_NVGPRS(r1)
754 /* fall through */
755
756 .globl ret_from_except
757ret_from_except:
758 /* Hard-disable interrupts so that current_thread_info()->flags
759 * can't change between when we test it and when we return
760 * from the interrupt. */
5d38902c 761 /* Note: We don't bother telling lockdep about it */
9994a338
PM
762 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
763 SYNC /* Some chip revs have problems here... */
764 MTMSRD(r10) /* disable interrupts */
765
766 lwz r3,_MSR(r1) /* Returning to user mode? */
767 andi. r0,r3,MSR_PR
768 beq resume_kernel
769
770user_exc_return: /* r10 contains MSR_KERNEL here */
771 /* Check current_thread_info()->flags */
9778b696 772 CURRENT_THREAD_INFO(r9, r1)
9994a338 773 lwz r9,TI_FLAGS(r9)
7a10174e 774 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
775 bne do_work
776
777restore_user:
778#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
779 /* Check whether this process has its own DBCR0 value. The internal
780 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 781 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 782 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
783 bnel- load_dbcr0
784#endif
c223c903
CL
785#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
786 CURRENT_THREAD_INFO(r9, r1)
787 ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
788#endif
9994a338 789
9994a338
PM
790 b restore
791
792/* N.B. the only way to get here is from the beq following ret_from_except. */
793resume_kernel:
a9c4e541 794 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
9778b696 795 CURRENT_THREAD_INFO(r9, r1)
a9c4e541 796 lwz r8,TI_FLAGS(r9)
f7b33677 797 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
798 beq+ 1f
799
800 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
801
802 lwz r3,GPR1(r1)
803 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
804 mr r4,r1 /* src: current exception frame */
805 mr r1,r3 /* Reroute the trampoline frame to r1 */
806
807 /* Copy from the original to the trampoline. */
808 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
809 li r6,0 /* start offset: 0 */
810 mtctr r5
8112: lwzx r0,r6,r4
812 stwx r0,r6,r3
813 addi r6,r6,4
814 bdnz 2b
815
816 /* Do real store operation to complete stwu */
817 lwz r5,GPR1(r1)
818 stw r8,0(r5)
819
820 /* Clear _TIF_EMULATE_STACK_STORE flag */
821 lis r11,_TIF_EMULATE_STACK_STORE@h
822 addi r5,r9,TI_FLAGS
8230: lwarx r8,0,r5
824 andc r8,r8,r11
825#ifdef CONFIG_IBM405_ERR77
826 dcbt 0,r5
827#endif
828 stwcx. r8,0,r5
829 bne- 0b
8301:
831
832#ifdef CONFIG_PREEMPT
833 /* check current_thread_info->preempt_count */
9994a338
PM
834 lwz r0,TI_PREEMPT(r9)
835 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
836 bne restore
a9c4e541 837 andi. r8,r8,_TIF_NEED_RESCHED
9994a338 838 beq+ restore
a9c4e541 839 lwz r3,_MSR(r1)
9994a338
PM
840 andi. r0,r3,MSR_EE /* interrupts off? */
841 beq restore /* don't schedule if so */
5d38902c
BH
842#ifdef CONFIG_TRACE_IRQFLAGS
843 /* Lockdep thinks irqs are enabled, we need to call
844 * preempt_schedule_irq with IRQs off, so we inform lockdep
845 * now that we -did- turn them off already
846 */
847 bl trace_hardirqs_off
848#endif
9994a338 8491: bl preempt_schedule_irq
9778b696 850 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
851 lwz r3,TI_FLAGS(r9)
852 andi. r0,r3,_TIF_NEED_RESCHED
853 bne- 1b
5d38902c
BH
854#ifdef CONFIG_TRACE_IRQFLAGS
855 /* And now, to properly rebalance the above, we tell lockdep they
856 * are being turned back on, which will happen when we return
857 */
858 bl trace_hardirqs_on
859#endif
9994a338
PM
860#endif /* CONFIG_PREEMPT */
861
862 /* interrupts are hard-disabled at this point */
863restore:
b98ac05d 864#ifdef CONFIG_44x
e7f75ad0
DK
865BEGIN_MMU_FTR_SECTION
866 b 1f
867END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
b98ac05d
BH
868 lis r4,icache_44x_need_flush@ha
869 lwz r5,icache_44x_need_flush@l(r4)
870 cmplwi cr0,r5,0
871 beq+ 1f
872 li r6,0
873 iccci r0,r0
874 stw r6,icache_44x_need_flush@l(r4)
8751:
876#endif /* CONFIG_44x */
5d38902c
BH
877
878 lwz r9,_MSR(r1)
879#ifdef CONFIG_TRACE_IRQFLAGS
880 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
881 * off in this assembly code while peeking at TI_FLAGS() and such. However
882 * we need to inform it if the exception turned interrupts off, and we
883 * are about to trun them back on.
884 *
885 * The problem here sadly is that we don't know whether the exceptions was
886 * one that turned interrupts off or not. So we always tell lockdep about
887 * turning them on here when we go back to wherever we came from with EE
888 * on, even if that may meen some redudant calls being tracked. Maybe later
889 * we could encode what the exception did somewhere or test the exception
890 * type in the pt_regs but that sounds overkill
891 */
892 andi. r10,r9,MSR_EE
893 beq 1f
06ca2188
SR
894 /*
895 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
896 * which is the stack frame here, we need to force a stack frame
897 * in case we came from user space.
898 */
899 stwu r1,-32(r1)
900 mflr r0
901 stw r0,4(r1)
902 stwu r1,-32(r1)
5d38902c 903 bl trace_hardirqs_on
06ca2188
SR
904 lwz r1,0(r1)
905 lwz r1,0(r1)
5d38902c
BH
906 lwz r9,_MSR(r1)
9071:
908#endif /* CONFIG_TRACE_IRQFLAGS */
909
9994a338
PM
910 lwz r0,GPR0(r1)
911 lwz r2,GPR2(r1)
912 REST_4GPRS(3, r1)
913 REST_2GPRS(7, r1)
914
915 lwz r10,_XER(r1)
916 lwz r11,_CTR(r1)
917 mtspr SPRN_XER,r10
918 mtctr r11
919
920 PPC405_ERR77(0,r1)
b64f87c1
BB
921BEGIN_FTR_SECTION
922 lwarx r11,0,r1
923END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
924 stwcx. r0,0,r1 /* to clear the reservation */
925
926#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
9994a338
PM
927 andi. r10,r9,MSR_RI /* check if this exception occurred */
928 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
929
930 lwz r10,_CCR(r1)
931 lwz r11,_LINK(r1)
932 mtcrf 0xFF,r10
933 mtlr r11
934
935 /*
936 * Once we put values in SRR0 and SRR1, we are in a state
937 * where exceptions are not recoverable, since taking an
938 * exception will trash SRR0 and SRR1. Therefore we clear the
939 * MSR:RI bit to indicate this. If we do take an exception,
940 * we can't return to the point of the exception but we
941 * can restart the exception exit path at the label
942 * exc_exit_restart below. -- paulus
943 */
944 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
945 SYNC
946 MTMSRD(r10) /* clear the RI bit */
947 .globl exc_exit_restart
948exc_exit_restart:
9994a338
PM
949 lwz r12,_NIP(r1)
950 FIX_SRR1(r9,r10)
951 mtspr SPRN_SRR0,r12
952 mtspr SPRN_SRR1,r9
953 REST_4GPRS(9, r1)
954 lwz r1,GPR1(r1)
955 .globl exc_exit_restart_end
956exc_exit_restart_end:
957 SYNC
958 RFI
959
960#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
961 /*
962 * This is a bit different on 4xx/Book-E because it doesn't have
963 * the RI bit in the MSR.
964 * The TLB miss handler checks if we have interrupted
965 * the exception exit path and restarts it if so
966 * (well maybe one day it will... :).
967 */
968 lwz r11,_LINK(r1)
969 mtlr r11
970 lwz r10,_CCR(r1)
971 mtcrf 0xff,r10
972 REST_2GPRS(9, r1)
973 .globl exc_exit_restart
974exc_exit_restart:
975 lwz r11,_NIP(r1)
976 lwz r12,_MSR(r1)
977exc_exit_start:
978 mtspr SPRN_SRR0,r11
979 mtspr SPRN_SRR1,r12
980 REST_2GPRS(11, r1)
981 lwz r1,GPR1(r1)
982 .globl exc_exit_restart_end
983exc_exit_restart_end:
984 PPC405_ERR77_SYNC
985 rfi
986 b . /* prevent prefetch past rfi */
987
988/*
989 * Returning from a critical interrupt in user mode doesn't need
990 * to be any different from a normal exception. For a critical
991 * interrupt in the kernel, we just return (without checking for
992 * preemption) since the interrupt may have happened at some crucial
993 * place (e.g. inside the TLB miss handler), and because we will be
994 * running with r1 pointing into critical_stack, not the current
995 * process's kernel stack (and therefore current_thread_info() will
996 * give the wrong answer).
997 * We have to restore various SPRs that may have been in use at the
998 * time of the critical interrupt.
999 *
1000 */
1001#ifdef CONFIG_40x
1002#define PPC_40x_TURN_OFF_MSR_DR \
1003 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1004 * assume the instructions here are mapped by a pinned TLB entry */ \
1005 li r10,MSR_IR; \
1006 mtmsr r10; \
1007 isync; \
1008 tophys(r1, r1);
1009#else
1010#define PPC_40x_TURN_OFF_MSR_DR
1011#endif
1012
1013#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1014 REST_NVGPRS(r1); \
1015 lwz r3,_MSR(r1); \
1016 andi. r3,r3,MSR_PR; \
1017 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1018 bne user_exc_return; \
1019 lwz r0,GPR0(r1); \
1020 lwz r2,GPR2(r1); \
1021 REST_4GPRS(3, r1); \
1022 REST_2GPRS(7, r1); \
1023 lwz r10,_XER(r1); \
1024 lwz r11,_CTR(r1); \
1025 mtspr SPRN_XER,r10; \
1026 mtctr r11; \
1027 PPC405_ERR77(0,r1); \
1028 stwcx. r0,0,r1; /* to clear the reservation */ \
1029 lwz r11,_LINK(r1); \
1030 mtlr r11; \
1031 lwz r10,_CCR(r1); \
1032 mtcrf 0xff,r10; \
1033 PPC_40x_TURN_OFF_MSR_DR; \
1034 lwz r9,_DEAR(r1); \
1035 lwz r10,_ESR(r1); \
1036 mtspr SPRN_DEAR,r9; \
1037 mtspr SPRN_ESR,r10; \
1038 lwz r11,_NIP(r1); \
1039 lwz r12,_MSR(r1); \
1040 mtspr exc_lvl_srr0,r11; \
1041 mtspr exc_lvl_srr1,r12; \
1042 lwz r9,GPR9(r1); \
1043 lwz r12,GPR12(r1); \
1044 lwz r10,GPR10(r1); \
1045 lwz r11,GPR11(r1); \
1046 lwz r1,GPR1(r1); \
1047 PPC405_ERR77_SYNC; \
1048 exc_lvl_rfi; \
1049 b .; /* prevent prefetch past exc_lvl_rfi */
1050
fca622c5
KG
1051#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1052 lwz r9,_##exc_lvl_srr0(r1); \
1053 lwz r10,_##exc_lvl_srr1(r1); \
1054 mtspr SPRN_##exc_lvl_srr0,r9; \
1055 mtspr SPRN_##exc_lvl_srr1,r10;
1056
70fe3af8 1057#if defined(CONFIG_PPC_BOOK3E_MMU)
fca622c5
KG
1058#ifdef CONFIG_PHYS_64BIT
1059#define RESTORE_MAS7 \
1060 lwz r11,MAS7(r1); \
1061 mtspr SPRN_MAS7,r11;
1062#else
1063#define RESTORE_MAS7
1064#endif /* CONFIG_PHYS_64BIT */
1065#define RESTORE_MMU_REGS \
1066 lwz r9,MAS0(r1); \
1067 lwz r10,MAS1(r1); \
1068 lwz r11,MAS2(r1); \
1069 mtspr SPRN_MAS0,r9; \
1070 lwz r9,MAS3(r1); \
1071 mtspr SPRN_MAS1,r10; \
1072 lwz r10,MAS6(r1); \
1073 mtspr SPRN_MAS2,r11; \
1074 mtspr SPRN_MAS3,r9; \
1075 mtspr SPRN_MAS6,r10; \
1076 RESTORE_MAS7;
1077#elif defined(CONFIG_44x)
1078#define RESTORE_MMU_REGS \
1079 lwz r9,MMUCR(r1); \
1080 mtspr SPRN_MMUCR,r9;
1081#else
1082#define RESTORE_MMU_REGS
1083#endif
1084
1085#ifdef CONFIG_40x
9994a338
PM
1086 .globl ret_from_crit_exc
1087ret_from_crit_exc:
ee43eb78 1088 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1089 lis r10,saved_ksp_limit@ha;
1090 lwz r10,saved_ksp_limit@l(r10);
1091 tovirt(r9,r9);
1092 stw r10,KSP_LIMIT(r9)
1093 lis r9,crit_srr0@ha;
1094 lwz r9,crit_srr0@l(r9);
1095 lis r10,crit_srr1@ha;
1096 lwz r10,crit_srr1@l(r10);
1097 mtspr SPRN_SRR0,r9;
1098 mtspr SPRN_SRR1,r10;
16c57b36 1099 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1100#endif /* CONFIG_40x */
9994a338
PM
1101
1102#ifdef CONFIG_BOOKE
fca622c5
KG
1103 .globl ret_from_crit_exc
1104ret_from_crit_exc:
ee43eb78 1105 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1106 lwz r10,SAVED_KSP_LIMIT(r1)
1107 stw r10,KSP_LIMIT(r9)
1108 RESTORE_xSRR(SRR0,SRR1);
1109 RESTORE_MMU_REGS;
16c57b36 1110 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1111
9994a338
PM
1112 .globl ret_from_debug_exc
1113ret_from_debug_exc:
ee43eb78 1114 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1115 lwz r10,SAVED_KSP_LIMIT(r1)
1116 stw r10,KSP_LIMIT(r9)
1117 lwz r9,THREAD_INFO-THREAD(r9)
9778b696 1118 CURRENT_THREAD_INFO(r10, r1)
fca622c5
KG
1119 lwz r10,TI_PREEMPT(r10)
1120 stw r10,TI_PREEMPT(r9)
1121 RESTORE_xSRR(SRR0,SRR1);
1122 RESTORE_xSRR(CSRR0,CSRR1);
1123 RESTORE_MMU_REGS;
16c57b36 1124 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
9994a338
PM
1125
1126 .globl ret_from_mcheck_exc
1127ret_from_mcheck_exc:
ee43eb78 1128 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1129 lwz r10,SAVED_KSP_LIMIT(r1)
1130 stw r10,KSP_LIMIT(r9)
1131 RESTORE_xSRR(SRR0,SRR1);
1132 RESTORE_xSRR(CSRR0,CSRR1);
1133 RESTORE_xSRR(DSRR0,DSRR1);
1134 RESTORE_MMU_REGS;
16c57b36 1135 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
9994a338
PM
1136#endif /* CONFIG_BOOKE */
1137
1138/*
1139 * Load the DBCR0 value for a task that is being ptraced,
1140 * having first saved away the global DBCR0. Note that r0
1141 * has the dbcr0 value to set upon entry to this.
1142 */
1143load_dbcr0:
1144 mfmsr r10 /* first disable debug exceptions */
1145 rlwinm r10,r10,0,~MSR_DE
1146 mtmsr r10
1147 isync
1148 mfspr r10,SPRN_DBCR0
1149 lis r11,global_dbcr0@ha
1150 addi r11,r11,global_dbcr0@l
4eaddb4d 1151#ifdef CONFIG_SMP
9778b696 1152 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
1153 lwz r9,TI_CPU(r9)
1154 slwi r9,r9,3
1155 add r11,r11,r9
1156#endif
9994a338
PM
1157 stw r10,0(r11)
1158 mtspr SPRN_DBCR0,r0
1159 lwz r10,4(r11)
1160 addi r10,r10,1
1161 stw r10,4(r11)
1162 li r11,-1
1163 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1164 blr
1165
991eb43a
KG
1166 .section .bss
1167 .align 4
1168global_dbcr0:
4eaddb4d 1169 .space 8*NR_CPUS
991eb43a 1170 .previous
9994a338
PM
1171#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1172
1173do_work: /* r10 contains MSR_KERNEL here */
1174 andi. r0,r9,_TIF_NEED_RESCHED
1175 beq do_user_signal
1176
1177do_resched: /* r10 contains MSR_KERNEL here */
5d38902c
BH
1178 /* Note: We don't need to inform lockdep that we are enabling
1179 * interrupts here. As far as it knows, they are already enabled
1180 */
9994a338
PM
1181 ori r10,r10,MSR_EE
1182 SYNC
1183 MTMSRD(r10) /* hard-enable interrupts */
1184 bl schedule
1185recheck:
5d38902c
BH
1186 /* Note: And we don't tell it we are disabling them again
1187 * neither. Those disable/enable cycles used to peek at
1188 * TI_FLAGS aren't advertised.
1189 */
9994a338
PM
1190 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1191 SYNC
1192 MTMSRD(r10) /* disable interrupts */
9778b696 1193 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
1194 lwz r9,TI_FLAGS(r9)
1195 andi. r0,r9,_TIF_NEED_RESCHED
1196 bne- do_resched
7a10174e 1197 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
1198 beq restore_user
1199do_user_signal: /* r10 contains MSR_KERNEL here */
1200 ori r10,r10,MSR_EE
1201 SYNC
1202 MTMSRD(r10) /* hard-enable interrupts */
1203 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 1204 lwz r3,_TRAP(r1)
9994a338
PM
1205 andi. r0,r3,1
1206 beq 2f
1207 SAVE_NVGPRS(r1)
1208 rlwinm r3,r3,0,0,30
d73e0c99 1209 stw r3,_TRAP(r1)
7d6d637d
RM
12102: addi r3,r1,STACK_FRAME_OVERHEAD
1211 mr r4,r9
18b246fa 1212 bl do_notify_resume
9994a338
PM
1213 REST_NVGPRS(r1)
1214 b recheck
1215
1216/*
1217 * We come here when we are at the end of handling an exception
1218 * that occurred at a place where taking an exception will lose
1219 * state information, such as the contents of SRR0 and SRR1.
1220 */
1221nonrecoverable:
1222 lis r10,exc_exit_restart_end@ha
1223 addi r10,r10,exc_exit_restart_end@l
1224 cmplw r12,r10
1225 bge 3f
1226 lis r11,exc_exit_restart@ha
1227 addi r11,r11,exc_exit_restart@l
1228 cmplw r12,r11
1229 blt 3f
1230 lis r10,ee_restarts@ha
1231 lwz r12,ee_restarts@l(r10)
1232 addi r12,r12,1
1233 stw r12,ee_restarts@l(r10)
1234 mr r12,r11 /* restart at exc_exit_restart */
1235 blr
12363: /* OK, we can't recover, kill this process */
1237 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1238BEGIN_FTR_SECTION
1239 blr
1240END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 1241 lwz r3,_TRAP(r1)
9994a338
PM
1242 andi. r0,r3,1
1243 beq 4f
1244 SAVE_NVGPRS(r1)
1245 rlwinm r3,r3,0,0,30
d73e0c99 1246 stw r3,_TRAP(r1)
9994a338
PM
12474: addi r3,r1,STACK_FRAME_OVERHEAD
1248 bl nonrecoverable_exception
1249 /* shouldn't return */
1250 b 4b
1251
991eb43a
KG
1252 .section .bss
1253 .align 2
1254ee_restarts:
1255 .space 4
1256 .previous
9994a338
PM
1257
1258/*
1259 * PROM code for specific machines follows. Put it
1260 * here so it's easy to add arch-specific sections later.
1261 * -- Cort
1262 */
033ef338 1263#ifdef CONFIG_PPC_RTAS
9994a338
PM
1264/*
1265 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1266 * called with the MMU off.
1267 */
1268_GLOBAL(enter_rtas)
1269 stwu r1,-INT_FRAME_SIZE(r1)
1270 mflr r0
1271 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1272 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1273 lis r6,1f@ha /* physical return address for rtas */
1274 addi r6,r6,1f@l
1275 tophys(r6,r6)
1276 tophys(r7,r1)
033ef338
PM
1277 lwz r8,RTASENTRY(r4)
1278 lwz r4,RTASBASE(r4)
9994a338
PM
1279 mfmsr r9
1280 stw r9,8(r1)
1281 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1282 SYNC /* disable interrupts so SRR0/1 */
1283 MTMSRD(r0) /* don't get trashed */
1284 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1285 mtlr r6
ee43eb78 1286 mtspr SPRN_SPRG_RTAS,r7
9994a338
PM
1287 mtspr SPRN_SRR0,r8
1288 mtspr SPRN_SRR1,r9
1289 RFI
12901: tophys(r9,r1)
1291 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1292 lwz r9,8(r9) /* original msr value */
1293 FIX_SRR1(r9,r0)
1294 addi r1,r1,INT_FRAME_SIZE
1295 li r0,0
ee43eb78 1296 mtspr SPRN_SPRG_RTAS,r0
9994a338
PM
1297 mtspr SPRN_SRR0,r8
1298 mtspr SPRN_SRR1,r9
1299 RFI /* return to caller */
1300
1301 .globl machine_check_in_rtas
1302machine_check_in_rtas:
1303 twi 31,0,0
1304 /* XXX load up BATs and panic */
1305
033ef338 1306#endif /* CONFIG_PPC_RTAS */
4e491d14 1307
606576ce 1308#ifdef CONFIG_FUNCTION_TRACER
4e491d14
SR
1309#ifdef CONFIG_DYNAMIC_FTRACE
1310_GLOBAL(mcount)
1311_GLOBAL(_mcount)
c7b0d173
SR
1312 /*
1313 * It is required that _mcount on PPC32 must preserve the
1314 * link register. But we have r0 to play with. We use r0
1315 * to push the return address back to the caller of mcount
1316 * into the ctr register, restore the link register and
1317 * then jump back using the ctr register.
1318 */
1319 mflr r0
4e491d14 1320 mtctr r0
c7b0d173 1321 lwz r0, 4(r1)
4e491d14 1322 mtlr r0
4e491d14
SR
1323 bctr
1324
1325_GLOBAL(ftrace_caller)
bf528a3a
SR
1326 MCOUNT_SAVE_FRAME
1327 /* r3 ends up with link register */
395a59d0 1328 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
1329.globl ftrace_call
1330ftrace_call:
1331 bl ftrace_stub
1332 nop
60ce8f72
SR
1333#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1334.globl ftrace_graph_call
1335ftrace_graph_call:
1336 b ftrace_graph_stub
1337_GLOBAL(ftrace_graph_stub)
1338#endif
bf528a3a
SR
1339 MCOUNT_RESTORE_FRAME
1340 /* old link register ends up in ctr reg */
4e491d14
SR
1341 bctr
1342#else
1343_GLOBAL(mcount)
1344_GLOBAL(_mcount)
bf528a3a
SR
1345
1346 MCOUNT_SAVE_FRAME
4e491d14 1347
395a59d0 1348 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14 1349 LOAD_REG_ADDR(r5, ftrace_trace_function)
4e491d14 1350 lwz r5,0(r5)
ccbfac29 1351
4e491d14
SR
1352 mtctr r5
1353 bctrl
4e491d14
SR
1354 nop
1355
fad4f47c
SR
1356#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1357 b ftrace_graph_caller
1358#endif
bf528a3a 1359 MCOUNT_RESTORE_FRAME
4e491d14
SR
1360 bctr
1361#endif
1362
1363_GLOBAL(ftrace_stub)
1364 blr
1365
fad4f47c
SR
1366#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1367_GLOBAL(ftrace_graph_caller)
1368 /* load r4 with local address */
1369 lwz r4, 44(r1)
1370 subi r4, r4, MCOUNT_INSN_SIZE
1371
b3c18725
AB
1372 /* Grab the LR out of the caller stack frame */
1373 lwz r3,52(r1)
fad4f47c
SR
1374
1375 bl prepare_ftrace_return
1376 nop
1377
b3c18725
AB
1378 /*
1379 * prepare_ftrace_return gives us the address we divert to.
1380 * Change the LR in the callers stack frame to this.
1381 */
1382 stw r3,52(r1)
1383
fad4f47c
SR
1384 MCOUNT_RESTORE_FRAME
1385 /* old link register ends up in ctr reg */
1386 bctr
1387
1388_GLOBAL(return_to_handler)
1389 /* need to save return values */
1390 stwu r1, -32(r1)
1391 stw r3, 20(r1)
1392 stw r4, 16(r1)
1393 stw r31, 12(r1)
1394 mr r31, r1
1395
1396 bl ftrace_return_to_handler
1397 nop
1398
1399 /* return value has real return address */
1400 mtlr r3
1401
1402 lwz r3, 20(r1)
1403 lwz r4, 16(r1)
1404 lwz r31,12(r1)
1405 lwz r1, 0(r1)
1406
1407 /* Jump back to real return address */
1408 blr
1409#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1410
60878dfb 1411#endif /* CONFIG_FUNCTION_TRACER */
This page took 0.711738 seconds and 5 git commands to generate.