Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke...
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
007d88d0 31#include <asm/bug.h>
ec2b36b9 32#include <asm/ptrace.h>
945feb17 33#include <asm/irqflags.h>
395a59d0 34#include <asm/ftrace.h>
9994a338
PM
35
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
ec2b36b9 45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
46
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
c6622f63 64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
ab598b66
PM
65 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
9994a338
PM
72 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
9994a338
PM
85 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
cf9efce0
PM
100#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
101BEGIN_FW_FTR_SECTION
102 beq 33f
103 /* if from user, see if there are any DTL entries to process */
104 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
105 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
106 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
107 cmpd cr1,r11,r10
108 beq+ cr1,33f
109 bl .accumulate_stolen_time
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
11433:
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
117
945feb17
BH
118#ifdef CONFIG_TRACE_IRQFLAGS
119 bl .trace_hardirqs_on
120 REST_GPR(0,r1)
121 REST_4GPRS(3,r1)
122 REST_2GPRS(7,r1)
123 addi r9,r1,STACK_FRAME_OVERHEAD
124 ld r12,_MSR(r1)
125#endif /* CONFIG_TRACE_IRQFLAGS */
d04c56f7
PM
126 li r10,1
127 stb r10,PACASOFTIRQEN(r13)
128 stb r10,PACAHARDIRQEN(r13)
129 std r10,SOFTE(r1)
9994a338 130#ifdef CONFIG_PPC_ISERIES
3f639ee8 131BEGIN_FW_FTR_SECTION
9994a338
PM
132 /* Hack for handling interrupts when soft-enabling on iSeries */
133 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
134 andi. r10,r12,MSR_PR /* from kernel */
135 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
136 bne 2f
137 b hardware_interrupt_entry
1382:
3f639ee8 139END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
945feb17 140#endif /* CONFIG_PPC_ISERIES */
2d27cfd3
BH
141
142 /* Hard enable interrupts */
143#ifdef CONFIG_PPC_BOOK3E
144 wrteei 1
145#else
9994a338
PM
146 mfmsr r11
147 ori r11,r11,MSR_EE
148 mtmsrd r11,1
2d27cfd3 149#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
150
151#ifdef SHOW_SYSCALLS
152 bl .do_show_syscall
153 REST_GPR(0,r1)
154 REST_4GPRS(3,r1)
155 REST_2GPRS(7,r1)
156 addi r9,r1,STACK_FRAME_OVERHEAD
157#endif
158 clrrdi r11,r1,THREAD_SHIFT
9994a338 159 ld r10,TI_FLAGS(r11)
9994a338
PM
160 andi. r11,r10,_TIF_SYSCALL_T_OR_A
161 bne- syscall_dotrace
162syscall_dotrace_cont:
163 cmpldi 0,r0,NR_syscalls
164 bge- syscall_enosys
165
166system_call: /* label this so stack traces look sane */
167/*
168 * Need to vector to 32 Bit or default sys_call_table here,
169 * based on caller's run-mode / personality.
170 */
171 ld r11,.SYS_CALL_TABLE@toc(2)
172 andi. r10,r10,_TIF_32BIT
173 beq 15f
174 addi r11,r11,8 /* use 32-bit syscall entries */
175 clrldi r3,r3,32
176 clrldi r4,r4,32
177 clrldi r5,r5,32
178 clrldi r6,r6,32
179 clrldi r7,r7,32
180 clrldi r8,r8,32
18115:
182 slwi r0,r0,4
183 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
184 mtctr r10
185 bctrl /* Call handler */
186
187syscall_exit:
401d1f02 188 std r3,RESULT(r1)
9994a338 189#ifdef SHOW_SYSCALLS
9994a338 190 bl .do_show_syscall_exit
401d1f02 191 ld r3,RESULT(r1)
9994a338 192#endif
9994a338 193 clrrdi r12,r1,THREAD_SHIFT
9994a338 194
9994a338 195 ld r8,_MSR(r1)
2d27cfd3
BH
196#ifdef CONFIG_PPC_BOOK3S
197 /* No MSR:RI on BookE */
9994a338
PM
198 andi. r10,r8,MSR_RI
199 beq- unrecov_restore
2d27cfd3
BH
200#endif
201
202 /* Disable interrupts so current_thread_info()->flags can't change,
203 * and so that we don't get interrupted after loading SRR0/1.
204 */
205#ifdef CONFIG_PPC_BOOK3E
206 wrteei 0
207#else
9994a338
PM
208 mfmsr r10
209 rldicl r10,r10,48,1
210 rotldi r10,r10,16
211 mtmsrd r10,1
2d27cfd3
BH
212#endif /* CONFIG_PPC_BOOK3E */
213
9994a338 214 ld r9,TI_FLAGS(r12)
401d1f02 215 li r11,-_LAST_ERRNO
1bd79336 216 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 217 bne- syscall_exit_work
401d1f02
DW
218 cmpld r3,r11
219 ld r5,_CCR(r1)
220 bge- syscall_error
221syscall_error_cont:
9994a338 222 ld r7,_NIP(r1)
f89451fb 223BEGIN_FTR_SECTION
9994a338 224 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb 225END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
9994a338
PM
226 andi. r6,r8,MSR_PR
227 ld r4,_LINK(r1)
e56a6e20
PM
228 /*
229 * Clear RI before restoring r13. If we are returning to
230 * userspace and we take an exception after restoring r13,
231 * we end up corrupting the userspace r13 value.
232 */
2d27cfd3
BH
233#ifdef CONFIG_PPC_BOOK3S
234 /* No MSR:RI on BookE */
e56a6e20
PM
235 li r12,MSR_RI
236 andc r11,r10,r12
237 mtmsrd r11,1 /* clear MSR.RI */
2d27cfd3
BH
238#endif /* CONFIG_PPC_BOOK3S */
239
c6622f63
PM
240 beq- 1f
241 ACCOUNT_CPU_USER_EXIT(r11, r12)
242 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 2431: ld r2,GPR2(r1)
9994a338
PM
244 ld r1,GPR1(r1)
245 mtlr r4
246 mtcr r5
247 mtspr SPRN_SRR0,r7
248 mtspr SPRN_SRR1,r8
2d27cfd3 249 RFI
9994a338
PM
250 b . /* prevent speculative execution */
251
401d1f02 252syscall_error:
9994a338 253 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 254 neg r3,r3
9994a338
PM
255 std r5,_CCR(r1)
256 b syscall_error_cont
401d1f02 257
9994a338
PM
258/* Traced system call support */
259syscall_dotrace:
260 bl .save_nvgprs
261 addi r3,r1,STACK_FRAME_OVERHEAD
262 bl .do_syscall_trace_enter
4f72c427
RM
263 /*
264 * Restore argument registers possibly just changed.
265 * We use the return value of do_syscall_trace_enter
266 * for the call number to look up in the table (r0).
267 */
268 mr r0,r3
9994a338
PM
269 ld r3,GPR3(r1)
270 ld r4,GPR4(r1)
271 ld r5,GPR5(r1)
272 ld r6,GPR6(r1)
273 ld r7,GPR7(r1)
274 ld r8,GPR8(r1)
275 addi r9,r1,STACK_FRAME_OVERHEAD
276 clrrdi r10,r1,THREAD_SHIFT
277 ld r10,TI_FLAGS(r10)
278 b syscall_dotrace_cont
279
401d1f02
DW
280syscall_enosys:
281 li r3,-ENOSYS
282 b syscall_exit
283
284syscall_exit_work:
285 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
286 If TIF_NOERROR is set, just save r3 as it is. */
287
288 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
289 beq+ 0f
290 REST_NVGPRS(r1)
291 b 2f
2920: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
293 blt+ 1f
294 andi. r0,r9,_TIF_NOERROR
295 bne- 1f
296 ld r5,_CCR(r1)
297 neg r3,r3
298 oris r5,r5,0x1000 /* Set SO bit in CR */
299 std r5,_CCR(r1)
3001: std r3,GPR3(r1)
3012: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
302 beq 4f
303
1bd79336 304 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
305
306 li r11,_TIF_PERSYSCALL_MASK
307 addi r12,r12,TI_FLAGS
3083: ldarx r10,0,r12
309 andc r10,r10,r11
310 stdcx. r10,0,r12
311 bne- 3b
312 subi r12,r12,TI_FLAGS
1bd79336
PM
313
3144: /* Anything else left to do? */
315 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
316 beq .ret_from_except_lite
317
318 /* Re-enable interrupts */
2d27cfd3
BH
319#ifdef CONFIG_PPC_BOOK3E
320 wrteei 1
321#else
401d1f02
DW
322 mfmsr r10
323 ori r10,r10,MSR_EE
324 mtmsrd r10,1
2d27cfd3 325#endif /* CONFIG_PPC_BOOK3E */
401d1f02 326
1bd79336 327 bl .save_nvgprs
9994a338
PM
328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl .do_syscall_trace_leave
1bd79336 330 b .ret_from_except
9994a338
PM
331
332/* Save non-volatile GPRs, if not already saved. */
333_GLOBAL(save_nvgprs)
334 ld r11,_TRAP(r1)
335 andi. r0,r11,1
336 beqlr-
337 SAVE_NVGPRS(r1)
338 clrrdi r0,r11,1
339 std r0,_TRAP(r1)
340 blr
341
401d1f02 342
9994a338
PM
343/*
344 * The sigsuspend and rt_sigsuspend system calls can call do_signal
345 * and thus put the process into the stopped state where we might
346 * want to examine its user state with ptrace. Therefore we need
347 * to save all the nonvolatile registers (r14 - r31) before calling
348 * the C code. Similarly, fork, vfork and clone need the full
349 * register state on the stack so that it can be copied to the child.
350 */
9994a338
PM
351
352_GLOBAL(ppc_fork)
353 bl .save_nvgprs
354 bl .sys_fork
355 b syscall_exit
356
357_GLOBAL(ppc_vfork)
358 bl .save_nvgprs
359 bl .sys_vfork
360 b syscall_exit
361
362_GLOBAL(ppc_clone)
363 bl .save_nvgprs
364 bl .sys_clone
365 b syscall_exit
366
1bd79336
PM
367_GLOBAL(ppc32_swapcontext)
368 bl .save_nvgprs
369 bl .compat_sys_swapcontext
370 b syscall_exit
371
372_GLOBAL(ppc64_swapcontext)
373 bl .save_nvgprs
374 bl .sys_swapcontext
375 b syscall_exit
376
9994a338
PM
377_GLOBAL(ret_from_fork)
378 bl .schedule_tail
379 REST_NVGPRS(r1)
380 li r3,0
381 b syscall_exit
382
383/*
384 * This routine switches between two different tasks. The process
385 * state of one is saved on its kernel stack. Then the state
386 * of the other is restored from its kernel stack. The memory
387 * management hardware is updated to the second process's state.
388 * Finally, we can return to the second process, via ret_from_except.
389 * On entry, r3 points to the THREAD for the current task, r4
390 * points to the THREAD for the new task.
391 *
392 * Note: there are two ways to get to the "going out" portion
393 * of this code; either by coming in via the entry (_switch)
394 * or via "fork" which must set up an environment equivalent
395 * to the "_switch" path. If you change this you'll have to change
396 * the fork code also.
397 *
398 * The code which creates the new task context is in 'copy_thread'
2ef9481e 399 * in arch/powerpc/kernel/process.c
9994a338
PM
400 */
401 .align 7
402_GLOBAL(_switch)
403 mflr r0
404 std r0,16(r1)
405 stdu r1,-SWITCH_FRAME_SIZE(r1)
406 /* r3-r13 are caller saved -- Cort */
407 SAVE_8GPRS(14, r1)
408 SAVE_10GPRS(22, r1)
409 mflr r20 /* Return to switch caller */
410 mfmsr r22
411 li r0, MSR_FP
ce48b210
MN
412#ifdef CONFIG_VSX
413BEGIN_FTR_SECTION
414 oris r0,r0,MSR_VSX@h /* Disable VSX */
415END_FTR_SECTION_IFSET(CPU_FTR_VSX)
416#endif /* CONFIG_VSX */
9994a338
PM
417#ifdef CONFIG_ALTIVEC
418BEGIN_FTR_SECTION
419 oris r0,r0,MSR_VEC@h /* Disable altivec */
420 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
421 std r24,THREAD_VRSAVE(r3)
422END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
423#endif /* CONFIG_ALTIVEC */
424 and. r0,r0,r22
425 beq+ 1f
426 andc r22,r22,r0
2d27cfd3 427 MTMSRD(r22)
9994a338
PM
428 isync
4291: std r20,_NIP(r1)
430 mfcr r23
431 std r23,_CCR(r1)
432 std r1,KSP(r3) /* Set old stack pointer */
433
434#ifdef CONFIG_SMP
435 /* We need a sync somewhere here to make sure that if the
436 * previous task gets rescheduled on another CPU, it sees all
437 * stores it has performed on this one.
438 */
439 sync
440#endif /* CONFIG_SMP */
441
f89451fb
AB
442 /*
443 * If we optimise away the clear of the reservation in system
444 * calls because we know the CPU tracks the address of the
445 * reservation, then we need to clear it here to cover the
446 * case that the kernel context switch path has no larx
447 * instructions.
448 */
449BEGIN_FTR_SECTION
450 ldarx r6,0,r1
451END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
452
9994a338
PM
453 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
454 std r6,PACACURRENT(r13) /* Set new 'current' */
455
456 ld r8,KSP(r4) /* new stack pointer */
2d27cfd3 457#ifdef CONFIG_PPC_BOOK3S
1189be65 458BEGIN_FTR_SECTION
c230328d 459 BEGIN_FTR_SECTION_NESTED(95)
9994a338
PM
460 clrrdi r6,r8,28 /* get its ESID */
461 clrrdi r9,r1,28 /* get current sp ESID */
c230328d 462 FTR_SECTION_ELSE_NESTED(95)
1189be65
PM
463 clrrdi r6,r8,40 /* get its 1T ESID */
464 clrrdi r9,r1,40 /* get current sp 1T ESID */
c230328d
ME
465 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
466FTR_SECTION_ELSE
467 b 2f
468ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
9994a338
PM
469 clrldi. r0,r6,2 /* is new ESID c00000000? */
470 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
471 cror eq,4*cr1+eq,eq
472 beq 2f /* if yes, don't slbie it */
473
474 /* Bolt in the new stack SLB entry */
475 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
476 oris r0,r6,(SLB_ESID_V)@h
477 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
478BEGIN_FTR_SECTION
479 li r9,MMU_SEGSIZE_1T /* insert B field */
480 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
481 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
482END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
2f6093c8 483
00efee7d
MN
484 /* Update the last bolted SLB. No write barriers are needed
485 * here, provided we only update the current CPU's SLB shadow
486 * buffer.
487 */
2f6093c8 488 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
489 li r12,0
490 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
491 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
492 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 493
f66bce5e
OJ
494 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
495 * we have 1TB segments, the only CPUs known to have the errata
496 * only support less than 1TB of system memory and we'll never
497 * actually hit this code path.
498 */
499
9994a338
PM
500 slbie r6
501 slbie r6 /* Workaround POWER5 < DD2.1 issue */
502 slbmte r7,r0
503 isync
9994a338 5042:
2d27cfd3
BH
505#endif /* !CONFIG_PPC_BOOK3S */
506
9994a338
PM
507 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
508 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
509 because we don't need to leave the 288-byte ABI gap at the
510 top of the kernel stack. */
511 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
512
513 mr r1,r8 /* start using new stack pointer */
514 std r7,PACAKSAVE(r13)
515
516 ld r6,_CCR(r1)
517 mtcrf 0xFF,r6
518
519#ifdef CONFIG_ALTIVEC
520BEGIN_FTR_SECTION
521 ld r0,THREAD_VRSAVE(r4)
522 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
523END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
524#endif /* CONFIG_ALTIVEC */
525
526 /* r3-r13 are destroyed -- Cort */
527 REST_8GPRS(14, r1)
528 REST_10GPRS(22, r1)
529
530 /* convert old thread to its task_struct for return value */
531 addi r3,r3,-THREAD
532 ld r7,_NIP(r1) /* Return to _switch caller in new task */
533 mtlr r7
534 addi r1,r1,SWITCH_FRAME_SIZE
535 blr
536
537 .align 7
538_GLOBAL(ret_from_except)
539 ld r11,_TRAP(r1)
540 andi. r0,r11,1
541 bne .ret_from_except_lite
542 REST_NVGPRS(r1)
543
544_GLOBAL(ret_from_except_lite)
545 /*
546 * Disable interrupts so that current_thread_info()->flags
547 * can't change between when we test it and when we return
548 * from the interrupt.
549 */
2d27cfd3
BH
550#ifdef CONFIG_PPC_BOOK3E
551 wrteei 0
552#else
9994a338
PM
553 mfmsr r10 /* Get current interrupt state */
554 rldicl r9,r10,48,1 /* clear MSR_EE */
555 rotldi r9,r9,16
556 mtmsrd r9,1 /* Update machine state */
2d27cfd3 557#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
558
559#ifdef CONFIG_PREEMPT
560 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
561 li r0,_TIF_NEED_RESCHED /* bits to check */
562 ld r3,_MSR(r1)
563 ld r4,TI_FLAGS(r9)
564 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
565 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
566 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
567 bne do_work
568
569#else /* !CONFIG_PREEMPT */
570 ld r3,_MSR(r1) /* Returning to user mode? */
571 andi. r3,r3,MSR_PR
572 beq restore /* if not, just restore regs and return */
573
574 /* Check current_thread_info()->flags */
575 clrrdi r9,r1,THREAD_SHIFT
576 ld r4,TI_FLAGS(r9)
577 andi. r0,r4,_TIF_USER_WORK_MASK
578 bne do_work
579#endif
580
581restore:
3f639ee8 582BEGIN_FW_FTR_SECTION
01f3880d
ME
583 ld r5,SOFTE(r1)
584FW_FTR_SECTION_ELSE
917e407c 585 b .Liseries_check_pending_irqs
01f3880d
ME
586ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5872:
945feb17 588 TRACE_AND_RESTORE_IRQ(r5);
9994a338 589
e56a6e20 590 /* extract EE bit and use it to restore paca->hard_enabled */
9994a338 591 ld r3,_MSR(r1)
e56a6e20
PM
592 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
593 stb r4,PACAHARDIRQEN(r13)
594
2d27cfd3
BH
595#ifdef CONFIG_PPC_BOOK3E
596 b .exception_return_book3e
597#else
e56a6e20
PM
598 ld r4,_CTR(r1)
599 ld r0,_LINK(r1)
600 mtctr r4
601 mtlr r0
602 ld r4,_XER(r1)
603 mtspr SPRN_XER,r4
604
605 REST_8GPRS(5, r1)
606
9994a338
PM
607 andi. r0,r3,MSR_RI
608 beq- unrecov_restore
609
f89451fb
AB
610 /*
611 * Clear the reservation. If we know the CPU tracks the address of
612 * the reservation then we can potentially save some cycles and use
613 * a larx. On POWER6 and POWER7 this is significantly faster.
614 */
615BEGIN_FTR_SECTION
e56a6e20 616 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb
AB
617FTR_SECTION_ELSE
618 ldarx r4,0,r1
619ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
b0a779de 620
e56a6e20
PM
621 /*
622 * Clear RI before restoring r13. If we are returning to
623 * userspace and we take an exception after restoring r13,
624 * we end up corrupting the userspace r13 value.
625 */
626 mfmsr r4
627 andc r4,r4,r0 /* r0 contains MSR_RI here */
628 mtmsrd r4,1
9994a338
PM
629
630 /*
631 * r13 is our per cpu area, only restore it if we are returning to
632 * userspace
633 */
e56a6e20 634 andi. r0,r3,MSR_PR
9994a338 635 beq 1f
e56a6e20 636 ACCOUNT_CPU_USER_EXIT(r2, r4)
9994a338
PM
637 REST_GPR(13, r1)
6381:
e56a6e20 639 mtspr SPRN_SRR1,r3
9994a338
PM
640
641 ld r2,_CCR(r1)
642 mtcrf 0xFF,r2
643 ld r2,_NIP(r1)
644 mtspr SPRN_SRR0,r2
645
646 ld r0,GPR0(r1)
647 ld r2,GPR2(r1)
648 ld r3,GPR3(r1)
649 ld r4,GPR4(r1)
650 ld r1,GPR1(r1)
651
652 rfid
653 b . /* prevent speculative execution */
654
2d27cfd3
BH
655#endif /* CONFIG_PPC_BOOK3E */
656
917e407c 657.Liseries_check_pending_irqs:
01f3880d
ME
658#ifdef CONFIG_PPC_ISERIES
659 ld r5,SOFTE(r1)
660 cmpdi 0,r5,0
661 beq 2b
662 /* Check for pending interrupts (iSeries) */
663 ld r3,PACALPPACAPTR(r13)
664 ld r3,LPPACAANYINT(r3)
665 cmpdi r3,0
666 beq+ 2b /* skip do_IRQ if no interrupts */
667
668 li r3,0
669 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
670#ifdef CONFIG_TRACE_IRQFLAGS
671 bl .trace_hardirqs_off
672 mfmsr r10
673#endif
674 ori r10,r10,MSR_EE
675 mtmsrd r10 /* hard-enable again */
676 addi r3,r1,STACK_FRAME_OVERHEAD
677 bl .do_IRQ
678 b .ret_from_except_lite /* loop back and handle more */
679#endif
680
9994a338
PM
681do_work:
682#ifdef CONFIG_PREEMPT
683 andi. r0,r3,MSR_PR /* Returning to user mode? */
684 bne user_work
685 /* Check that preempt_count() == 0 and interrupts are enabled */
686 lwz r8,TI_PREEMPT(r9)
687 cmpwi cr1,r8,0
9994a338
PM
688 ld r0,SOFTE(r1)
689 cmpdi r0,0
9994a338
PM
690 crandc eq,cr1*4+eq,eq
691 bne restore
4f917ba3
BH
692
693 /* Here we are preempting the current task.
694 *
695 * Ensure interrupts are soft-disabled. We also properly mark
696 * the PACA to reflect the fact that they are hard-disabled
697 * and trace the change
945feb17 698 */
4f917ba3 699 li r0,0
d04c56f7
PM
700 stb r0,PACASOFTIRQEN(r13)
701 stb r0,PACAHARDIRQEN(r13)
4f917ba3
BH
702 TRACE_DISABLE_INTS
703
704 /* Call the scheduler with soft IRQs off */
7051: bl .preempt_schedule_irq
706
707 /* Hard-disable interrupts again (and update PACA) */
2d27cfd3 708#ifdef CONFIG_PPC_BOOK3E
2d27cfd3
BH
709 wrteei 0
710#else
9994a338 711 mfmsr r10
4f917ba3 712 rldicl r10,r10,48,1
9994a338
PM
713 rotldi r10,r10,16
714 mtmsrd r10,1
2d27cfd3 715#endif /* CONFIG_PPC_BOOK3E */
4f917ba3
BH
716 li r0,0
717 stb r0,PACAHARDIRQEN(r13)
718
719 /* Re-test flags and eventually loop */
720 clrrdi r9,r1,THREAD_SHIFT
9994a338
PM
721 ld r4,TI_FLAGS(r9)
722 andi. r0,r4,_TIF_NEED_RESCHED
723 bne 1b
724 b restore
725
726user_work:
4f917ba3
BH
727#endif /* CONFIG_PREEMPT */
728
9994a338 729 /* Enable interrupts */
2d27cfd3
BH
730#ifdef CONFIG_PPC_BOOK3E
731 wrteei 1
732#else
9994a338
PM
733 ori r10,r10,MSR_EE
734 mtmsrd r10,1
2d27cfd3 735#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
736
737 andi. r0,r4,_TIF_NEED_RESCHED
738 beq 1f
739 bl .schedule
740 b .ret_from_except_lite
741
7421: bl .save_nvgprs
7d6d637d 743 addi r3,r1,STACK_FRAME_OVERHEAD
9994a338
PM
744 bl .do_signal
745 b .ret_from_except
746
747unrecov_restore:
748 addi r3,r1,STACK_FRAME_OVERHEAD
749 bl .unrecoverable_exception
750 b unrecov_restore
751
752#ifdef CONFIG_PPC_RTAS
753/*
754 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
755 * called with the MMU off.
756 *
757 * In addition, we need to be in 32b mode, at least for now.
758 *
759 * Note: r3 is an input parameter to rtas, so don't trash it...
760 */
761_GLOBAL(enter_rtas)
762 mflr r0
763 std r0,16(r1)
764 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
765
766 /* Because RTAS is running in 32b mode, it clobbers the high order half
767 * of all registers that it saves. We therefore save those registers
768 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
769 */
770 SAVE_GPR(2, r1) /* Save the TOC */
771 SAVE_GPR(13, r1) /* Save paca */
772 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
773 SAVE_10GPRS(22, r1) /* ditto */
774
775 mfcr r4
776 std r4,_CCR(r1)
777 mfctr r5
778 std r5,_CTR(r1)
779 mfspr r6,SPRN_XER
780 std r6,_XER(r1)
781 mfdar r7
782 std r7,_DAR(r1)
783 mfdsisr r8
784 std r8,_DSISR(r1)
9994a338 785
9fe901d1
MK
786 /* Temporary workaround to clear CR until RTAS can be modified to
787 * ignore all bits.
788 */
789 li r0,0
790 mtcr r0
791
007d88d0 792#ifdef CONFIG_BUG
9994a338
PM
793 /* There is no way it is acceptable to get here with interrupts enabled,
794 * check it with the asm equivalent of WARN_ON
795 */
d04c56f7 796 lbz r0,PACASOFTIRQEN(r13)
9994a338 7971: tdnei r0,0
007d88d0
DW
798 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
799#endif
800
d04c56f7
PM
801 /* Hard-disable interrupts */
802 mfmsr r6
803 rldicl r7,r6,48,1
804 rotldi r7,r7,16
805 mtmsrd r7,1
806
9994a338
PM
807 /* Unfortunately, the stack pointer and the MSR are also clobbered,
808 * so they are saved in the PACA which allows us to restore
809 * our original state after RTAS returns.
810 */
811 std r1,PACAR1(r13)
812 std r6,PACASAVEDMSR(r13)
813
814 /* Setup our real return addr */
e58c3495
DG
815 LOAD_REG_ADDR(r4,.rtas_return_loc)
816 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
817 mtlr r4
818
819 li r0,0
820 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
821 andc r0,r6,r0
822
823 li r9,1
824 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
44c9f3cc 825 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
9994a338 826 andc r6,r0,r9
9994a338
PM
827 sync /* disable interrupts so SRR0/1 */
828 mtmsrd r0 /* don't get trashed */
829
e58c3495 830 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
831 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
832 ld r4,RTASBASE(r4) /* get the rtas->base value */
833
834 mtspr SPRN_SRR0,r5
835 mtspr SPRN_SRR1,r6
836 rfid
837 b . /* prevent speculative execution */
838
839_STATIC(rtas_return_loc)
840 /* relocation is off at this point */
ee43eb78 841 mfspr r4,SPRN_SPRG_PACA /* Get PACA */
e58c3495 842 clrldi r4,r4,2 /* convert to realmode address */
9994a338 843
e31aa453
PM
844 bcl 20,31,$+4
8450: mflr r3
846 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
847
9994a338
PM
848 mfmsr r6
849 li r0,MSR_RI
850 andc r6,r6,r0
851 sync
852 mtmsrd r6
853
854 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
855 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
856
857 mtspr SPRN_SRR0,r3
858 mtspr SPRN_SRR1,r4
859 rfid
860 b . /* prevent speculative execution */
861
e31aa453
PM
862 .align 3
8631: .llong .rtas_restore_regs
864
9994a338
PM
865_STATIC(rtas_restore_regs)
866 /* relocation is on at this point */
867 REST_GPR(2, r1) /* Restore the TOC */
868 REST_GPR(13, r1) /* Restore paca */
869 REST_8GPRS(14, r1) /* Restore the non-volatiles */
870 REST_10GPRS(22, r1) /* ditto */
871
ee43eb78 872 mfspr r13,SPRN_SPRG_PACA
9994a338
PM
873
874 ld r4,_CCR(r1)
875 mtcr r4
876 ld r5,_CTR(r1)
877 mtctr r5
878 ld r6,_XER(r1)
879 mtspr SPRN_XER,r6
880 ld r7,_DAR(r1)
881 mtdar r7
882 ld r8,_DSISR(r1)
883 mtdsisr r8
9994a338
PM
884
885 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
886 ld r0,16(r1) /* get return address */
887
888 mtlr r0
889 blr /* return to caller */
890
891#endif /* CONFIG_PPC_RTAS */
892
9994a338
PM
893_GLOBAL(enter_prom)
894 mflr r0
895 std r0,16(r1)
896 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
897
898 /* Because PROM is running in 32b mode, it clobbers the high order half
899 * of all registers that it saves. We therefore save those registers
900 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
901 */
6c171994 902 SAVE_GPR(2, r1)
9994a338
PM
903 SAVE_GPR(13, r1)
904 SAVE_8GPRS(14, r1)
905 SAVE_10GPRS(22, r1)
6c171994 906 mfcr r10
9994a338 907 mfmsr r11
6c171994 908 std r10,_CCR(r1)
9994a338
PM
909 std r11,_MSR(r1)
910
911 /* Get the PROM entrypoint */
6c171994 912 mtlr r4
9994a338
PM
913
914 /* Switch MSR to 32 bits mode
915 */
2d27cfd3
BH
916#ifdef CONFIG_PPC_BOOK3E
917 rlwinm r11,r11,0,1,31
918 mtmsr r11
919#else /* CONFIG_PPC_BOOK3E */
9994a338
PM
920 mfmsr r11
921 li r12,1
922 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
923 andc r11,r11,r12
924 li r12,1
925 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
926 andc r11,r11,r12
927 mtmsrd r11
2d27cfd3 928#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
929 isync
930
6c171994 931 /* Enter PROM here... */
9994a338
PM
932 blrl
933
934 /* Just make sure that r1 top 32 bits didn't get
935 * corrupt by OF
936 */
937 rldicl r1,r1,0,32
938
939 /* Restore the MSR (back to 64 bits) */
940 ld r0,_MSR(r1)
6c171994 941 MTMSRD(r0)
9994a338
PM
942 isync
943
944 /* Restore other registers */
945 REST_GPR(2, r1)
946 REST_GPR(13, r1)
947 REST_8GPRS(14, r1)
948 REST_10GPRS(22, r1)
949 ld r4,_CCR(r1)
950 mtcr r4
9994a338
PM
951
952 addi r1,r1,PROM_FRAME_SIZE
953 ld r0,16(r1)
954 mtlr r0
955 blr
4e491d14 956
606576ce 957#ifdef CONFIG_FUNCTION_TRACER
4e491d14
SR
958#ifdef CONFIG_DYNAMIC_FTRACE
959_GLOBAL(mcount)
960_GLOBAL(_mcount)
4e491d14
SR
961 blr
962
963_GLOBAL(ftrace_caller)
964 /* Taken from output of objdump from lib64/glibc */
965 mflr r3
966 ld r11, 0(r1)
967 stdu r1, -112(r1)
968 std r3, 128(r1)
969 ld r4, 16(r11)
395a59d0 970 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
971.globl ftrace_call
972ftrace_call:
973 bl ftrace_stub
974 nop
46542888
SR
975#ifdef CONFIG_FUNCTION_GRAPH_TRACER
976.globl ftrace_graph_call
977ftrace_graph_call:
978 b ftrace_graph_stub
979_GLOBAL(ftrace_graph_stub)
980#endif
4e491d14
SR
981 ld r0, 128(r1)
982 mtlr r0
983 addi r1, r1, 112
984_GLOBAL(ftrace_stub)
985 blr
986#else
987_GLOBAL(mcount)
988 blr
989
990_GLOBAL(_mcount)
991 /* Taken from output of objdump from lib64/glibc */
992 mflr r3
993 ld r11, 0(r1)
994 stdu r1, -112(r1)
995 std r3, 128(r1)
996 ld r4, 16(r11)
997
395a59d0 998 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
999 LOAD_REG_ADDR(r5,ftrace_trace_function)
1000 ld r5,0(r5)
1001 ld r5,0(r5)
1002 mtctr r5
1003 bctrl
4e491d14 1004 nop
6794c782
SR
1005
1006
1007#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1008 b ftrace_graph_caller
1009#endif
4e491d14
SR
1010 ld r0, 128(r1)
1011 mtlr r0
1012 addi r1, r1, 112
1013_GLOBAL(ftrace_stub)
1014 blr
1015
6794c782
SR
1016#endif /* CONFIG_DYNAMIC_FTRACE */
1017
1018#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46542888 1019_GLOBAL(ftrace_graph_caller)
6794c782
SR
1020 /* load r4 with local address */
1021 ld r4, 128(r1)
1022 subi r4, r4, MCOUNT_INSN_SIZE
1023
1024 /* get the parent address */
1025 ld r11, 112(r1)
1026 addi r3, r11, 16
1027
1028 bl .prepare_ftrace_return
1029 nop
1030
1031 ld r0, 128(r1)
1032 mtlr r0
1033 addi r1, r1, 112
1034 blr
1035
1036_GLOBAL(return_to_handler)
bb725340
SR
1037 /* need to save return values */
1038 std r4, -24(r1)
1039 std r3, -16(r1)
1040 std r31, -8(r1)
1041 mr r31, r1
1042 stdu r1, -112(r1)
1043
1044 bl .ftrace_return_to_handler
1045 nop
1046
1047 /* return value has real return address */
1048 mtlr r3
1049
1050 ld r1, 0(r1)
1051 ld r4, -24(r1)
1052 ld r3, -16(r1)
1053 ld r31, -8(r1)
1054
1055 /* Jump back to real return address */
1056 blr
1057
1058_GLOBAL(mod_return_to_handler)
6794c782
SR
1059 /* need to save return values */
1060 std r4, -32(r1)
1061 std r3, -24(r1)
1062 /* save TOC */
1063 std r2, -16(r1)
1064 std r31, -8(r1)
1065 mr r31, r1
1066 stdu r1, -112(r1)
1067
bb725340
SR
1068 /*
1069 * We are in a module using the module's TOC.
1070 * Switch to our TOC to run inside the core kernel.
1071 */
be10ab10 1072 ld r2, PACATOC(r13)
6794c782
SR
1073
1074 bl .ftrace_return_to_handler
1075 nop
1076
1077 /* return value has real return address */
1078 mtlr r3
1079
1080 ld r1, 0(r1)
1081 ld r4, -32(r1)
1082 ld r3, -24(r1)
1083 ld r2, -16(r1)
1084 ld r31, -8(r1)
1085
1086 /* Jump back to real return address */
1087 blr
1088#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1089#endif /* CONFIG_FUNCTION_TRACER */
This page took 0.434968 seconds and 5 git commands to generate.