ftrace: fix depends
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
007d88d0 31#include <asm/bug.h>
ec2b36b9 32#include <asm/ptrace.h>
945feb17 33#include <asm/irqflags.h>
395a59d0 34#include <asm/ftrace.h>
9994a338
PM
35
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
ec2b36b9 45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
46
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
bd19c899 60 crclr so
9994a338
PM
61 std r11,_NIP(r1)
62 std r12,_MSR(r1)
63 std r0,GPR0(r1)
64 std r10,GPR1(r1)
c6622f63 65 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
9994a338
PM
79 mfcr r9
80 mflr r10
81 li r11,0xc01
82 std r9,_CCR(r1)
83 std r10,_LINK(r1)
84 std r11,_TRAP(r1)
85 mfxer r9
86 mfctr r10
87 std r9,_XER(r1)
88 std r10,_CTR(r1)
89 std r3,ORIG_GPR3(r1)
90 ld r2,PACATOC(r13)
91 addi r9,r1,STACK_FRAME_OVERHEAD
92 ld r11,exception_marker@toc(r2)
93 std r11,-16(r9) /* "regshere" marker */
945feb17
BH
94#ifdef CONFIG_TRACE_IRQFLAGS
95 bl .trace_hardirqs_on
96 REST_GPR(0,r1)
97 REST_4GPRS(3,r1)
98 REST_2GPRS(7,r1)
99 addi r9,r1,STACK_FRAME_OVERHEAD
100 ld r12,_MSR(r1)
101#endif /* CONFIG_TRACE_IRQFLAGS */
d04c56f7
PM
102 li r10,1
103 stb r10,PACASOFTIRQEN(r13)
104 stb r10,PACAHARDIRQEN(r13)
105 std r10,SOFTE(r1)
9994a338 106#ifdef CONFIG_PPC_ISERIES
3f639ee8 107BEGIN_FW_FTR_SECTION
9994a338
PM
108 /* Hack for handling interrupts when soft-enabling on iSeries */
109 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
110 andi. r10,r12,MSR_PR /* from kernel */
111 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
112 bne 2f
113 b hardware_interrupt_entry
1142:
3f639ee8 115END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
945feb17 116#endif /* CONFIG_PPC_ISERIES */
9994a338
PM
117 mfmsr r11
118 ori r11,r11,MSR_EE
119 mtmsrd r11,1
120
121#ifdef SHOW_SYSCALLS
122 bl .do_show_syscall
123 REST_GPR(0,r1)
124 REST_4GPRS(3,r1)
125 REST_2GPRS(7,r1)
126 addi r9,r1,STACK_FRAME_OVERHEAD
127#endif
128 clrrdi r11,r1,THREAD_SHIFT
9994a338 129 ld r10,TI_FLAGS(r11)
9994a338
PM
130 andi. r11,r10,_TIF_SYSCALL_T_OR_A
131 bne- syscall_dotrace
132syscall_dotrace_cont:
133 cmpldi 0,r0,NR_syscalls
134 bge- syscall_enosys
135
136system_call: /* label this so stack traces look sane */
137/*
138 * Need to vector to 32 Bit or default sys_call_table here,
139 * based on caller's run-mode / personality.
140 */
141 ld r11,.SYS_CALL_TABLE@toc(2)
142 andi. r10,r10,_TIF_32BIT
143 beq 15f
144 addi r11,r11,8 /* use 32-bit syscall entries */
145 clrldi r3,r3,32
146 clrldi r4,r4,32
147 clrldi r5,r5,32
148 clrldi r6,r6,32
149 clrldi r7,r7,32
150 clrldi r8,r8,32
15115:
152 slwi r0,r0,4
153 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
154 mtctr r10
155 bctrl /* Call handler */
156
157syscall_exit:
401d1f02 158 std r3,RESULT(r1)
9994a338 159#ifdef SHOW_SYSCALLS
9994a338 160 bl .do_show_syscall_exit
401d1f02 161 ld r3,RESULT(r1)
9994a338 162#endif
9994a338 163 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
164
165 /* disable interrupts so current_thread_info()->flags can't change,
166 and so that we don't get interrupted after loading SRR0/1. */
167 ld r8,_MSR(r1)
168 andi. r10,r8,MSR_RI
169 beq- unrecov_restore
170 mfmsr r10
171 rldicl r10,r10,48,1
172 rotldi r10,r10,16
173 mtmsrd r10,1
174 ld r9,TI_FLAGS(r12)
401d1f02 175 li r11,-_LAST_ERRNO
1bd79336 176 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 177 bne- syscall_exit_work
401d1f02
DW
178 cmpld r3,r11
179 ld r5,_CCR(r1)
180 bge- syscall_error
181syscall_error_cont:
9994a338
PM
182 ld r7,_NIP(r1)
183 stdcx. r0,0,r1 /* to clear the reservation */
184 andi. r6,r8,MSR_PR
185 ld r4,_LINK(r1)
e56a6e20
PM
186 /*
187 * Clear RI before restoring r13. If we are returning to
188 * userspace and we take an exception after restoring r13,
189 * we end up corrupting the userspace r13 value.
190 */
191 li r12,MSR_RI
192 andc r11,r10,r12
193 mtmsrd r11,1 /* clear MSR.RI */
c6622f63
PM
194 beq- 1f
195 ACCOUNT_CPU_USER_EXIT(r11, r12)
196 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 1971: ld r2,GPR2(r1)
9994a338
PM
198 ld r1,GPR1(r1)
199 mtlr r4
200 mtcr r5
201 mtspr SPRN_SRR0,r7
202 mtspr SPRN_SRR1,r8
203 rfid
204 b . /* prevent speculative execution */
205
401d1f02 206syscall_error:
9994a338 207 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 208 neg r3,r3
9994a338
PM
209 std r5,_CCR(r1)
210 b syscall_error_cont
401d1f02 211
9994a338
PM
212/* Traced system call support */
213syscall_dotrace:
214 bl .save_nvgprs
215 addi r3,r1,STACK_FRAME_OVERHEAD
216 bl .do_syscall_trace_enter
4f72c427
RM
217 /*
218 * Restore argument registers possibly just changed.
219 * We use the return value of do_syscall_trace_enter
220 * for the call number to look up in the table (r0).
221 */
222 mr r0,r3
9994a338
PM
223 ld r3,GPR3(r1)
224 ld r4,GPR4(r1)
225 ld r5,GPR5(r1)
226 ld r6,GPR6(r1)
227 ld r7,GPR7(r1)
228 ld r8,GPR8(r1)
229 addi r9,r1,STACK_FRAME_OVERHEAD
230 clrrdi r10,r1,THREAD_SHIFT
231 ld r10,TI_FLAGS(r10)
232 b syscall_dotrace_cont
233
401d1f02
DW
234syscall_enosys:
235 li r3,-ENOSYS
236 b syscall_exit
237
238syscall_exit_work:
239 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
240 If TIF_NOERROR is set, just save r3 as it is. */
241
242 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
243 beq+ 0f
244 REST_NVGPRS(r1)
245 b 2f
2460: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
247 blt+ 1f
248 andi. r0,r9,_TIF_NOERROR
249 bne- 1f
250 ld r5,_CCR(r1)
251 neg r3,r3
252 oris r5,r5,0x1000 /* Set SO bit in CR */
253 std r5,_CCR(r1)
2541: std r3,GPR3(r1)
2552: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
256 beq 4f
257
1bd79336 258 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
259
260 li r11,_TIF_PERSYSCALL_MASK
261 addi r12,r12,TI_FLAGS
2623: ldarx r10,0,r12
263 andc r10,r10,r11
264 stdcx. r10,0,r12
265 bne- 3b
266 subi r12,r12,TI_FLAGS
1bd79336
PM
267
2684: /* Anything else left to do? */
269 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
270 beq .ret_from_except_lite
271
272 /* Re-enable interrupts */
273 mfmsr r10
274 ori r10,r10,MSR_EE
275 mtmsrd r10,1
276
1bd79336 277 bl .save_nvgprs
9994a338
PM
278 addi r3,r1,STACK_FRAME_OVERHEAD
279 bl .do_syscall_trace_leave
1bd79336 280 b .ret_from_except
9994a338
PM
281
282/* Save non-volatile GPRs, if not already saved. */
283_GLOBAL(save_nvgprs)
284 ld r11,_TRAP(r1)
285 andi. r0,r11,1
286 beqlr-
287 SAVE_NVGPRS(r1)
288 clrrdi r0,r11,1
289 std r0,_TRAP(r1)
290 blr
291
401d1f02 292
9994a338
PM
293/*
294 * The sigsuspend and rt_sigsuspend system calls can call do_signal
295 * and thus put the process into the stopped state where we might
296 * want to examine its user state with ptrace. Therefore we need
297 * to save all the nonvolatile registers (r14 - r31) before calling
298 * the C code. Similarly, fork, vfork and clone need the full
299 * register state on the stack so that it can be copied to the child.
300 */
9994a338
PM
301
302_GLOBAL(ppc_fork)
303 bl .save_nvgprs
304 bl .sys_fork
305 b syscall_exit
306
307_GLOBAL(ppc_vfork)
308 bl .save_nvgprs
309 bl .sys_vfork
310 b syscall_exit
311
312_GLOBAL(ppc_clone)
313 bl .save_nvgprs
314 bl .sys_clone
315 b syscall_exit
316
1bd79336
PM
317_GLOBAL(ppc32_swapcontext)
318 bl .save_nvgprs
319 bl .compat_sys_swapcontext
320 b syscall_exit
321
322_GLOBAL(ppc64_swapcontext)
323 bl .save_nvgprs
324 bl .sys_swapcontext
325 b syscall_exit
326
9994a338
PM
327_GLOBAL(ret_from_fork)
328 bl .schedule_tail
329 REST_NVGPRS(r1)
330 li r3,0
331 b syscall_exit
332
333/*
334 * This routine switches between two different tasks. The process
335 * state of one is saved on its kernel stack. Then the state
336 * of the other is restored from its kernel stack. The memory
337 * management hardware is updated to the second process's state.
338 * Finally, we can return to the second process, via ret_from_except.
339 * On entry, r3 points to the THREAD for the current task, r4
340 * points to the THREAD for the new task.
341 *
342 * Note: there are two ways to get to the "going out" portion
343 * of this code; either by coming in via the entry (_switch)
344 * or via "fork" which must set up an environment equivalent
345 * to the "_switch" path. If you change this you'll have to change
346 * the fork code also.
347 *
348 * The code which creates the new task context is in 'copy_thread'
2ef9481e 349 * in arch/powerpc/kernel/process.c
9994a338
PM
350 */
351 .align 7
352_GLOBAL(_switch)
353 mflr r0
354 std r0,16(r1)
355 stdu r1,-SWITCH_FRAME_SIZE(r1)
356 /* r3-r13 are caller saved -- Cort */
357 SAVE_8GPRS(14, r1)
358 SAVE_10GPRS(22, r1)
359 mflr r20 /* Return to switch caller */
360 mfmsr r22
361 li r0, MSR_FP
ce48b210
MN
362#ifdef CONFIG_VSX
363BEGIN_FTR_SECTION
364 oris r0,r0,MSR_VSX@h /* Disable VSX */
365END_FTR_SECTION_IFSET(CPU_FTR_VSX)
366#endif /* CONFIG_VSX */
9994a338
PM
367#ifdef CONFIG_ALTIVEC
368BEGIN_FTR_SECTION
369 oris r0,r0,MSR_VEC@h /* Disable altivec */
370 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
371 std r24,THREAD_VRSAVE(r3)
372END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
373#endif /* CONFIG_ALTIVEC */
374 and. r0,r0,r22
375 beq+ 1f
376 andc r22,r22,r0
377 mtmsrd r22
378 isync
3791: std r20,_NIP(r1)
380 mfcr r23
381 std r23,_CCR(r1)
382 std r1,KSP(r3) /* Set old stack pointer */
383
384#ifdef CONFIG_SMP
385 /* We need a sync somewhere here to make sure that if the
386 * previous task gets rescheduled on another CPU, it sees all
387 * stores it has performed on this one.
388 */
389 sync
390#endif /* CONFIG_SMP */
391
392 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
393 std r6,PACACURRENT(r13) /* Set new 'current' */
394
395 ld r8,KSP(r4) /* new stack pointer */
1189be65 396BEGIN_FTR_SECTION
c230328d 397 BEGIN_FTR_SECTION_NESTED(95)
9994a338
PM
398 clrrdi r6,r8,28 /* get its ESID */
399 clrrdi r9,r1,28 /* get current sp ESID */
c230328d 400 FTR_SECTION_ELSE_NESTED(95)
1189be65
PM
401 clrrdi r6,r8,40 /* get its 1T ESID */
402 clrrdi r9,r1,40 /* get current sp 1T ESID */
c230328d
ME
403 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
404FTR_SECTION_ELSE
405 b 2f
406ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
9994a338
PM
407 clrldi. r0,r6,2 /* is new ESID c00000000? */
408 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
409 cror eq,4*cr1+eq,eq
410 beq 2f /* if yes, don't slbie it */
411
412 /* Bolt in the new stack SLB entry */
413 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
414 oris r0,r6,(SLB_ESID_V)@h
415 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
416BEGIN_FTR_SECTION
417 li r9,MMU_SEGSIZE_1T /* insert B field */
418 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
419 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
420END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
2f6093c8 421
00efee7d
MN
422 /* Update the last bolted SLB. No write barriers are needed
423 * here, provided we only update the current CPU's SLB shadow
424 * buffer.
425 */
2f6093c8 426 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
427 li r12,0
428 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
429 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
430 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 431
f66bce5e
OJ
432 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
433 * we have 1TB segments, the only CPUs known to have the errata
434 * only support less than 1TB of system memory and we'll never
435 * actually hit this code path.
436 */
437
9994a338
PM
438 slbie r6
439 slbie r6 /* Workaround POWER5 < DD2.1 issue */
440 slbmte r7,r0
441 isync
442
4432:
9994a338
PM
444 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
445 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
446 because we don't need to leave the 288-byte ABI gap at the
447 top of the kernel stack. */
448 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
449
450 mr r1,r8 /* start using new stack pointer */
451 std r7,PACAKSAVE(r13)
452
453 ld r6,_CCR(r1)
454 mtcrf 0xFF,r6
455
456#ifdef CONFIG_ALTIVEC
457BEGIN_FTR_SECTION
458 ld r0,THREAD_VRSAVE(r4)
459 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
460END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
461#endif /* CONFIG_ALTIVEC */
462
463 /* r3-r13 are destroyed -- Cort */
464 REST_8GPRS(14, r1)
465 REST_10GPRS(22, r1)
466
467 /* convert old thread to its task_struct for return value */
468 addi r3,r3,-THREAD
469 ld r7,_NIP(r1) /* Return to _switch caller in new task */
470 mtlr r7
471 addi r1,r1,SWITCH_FRAME_SIZE
472 blr
473
474 .align 7
475_GLOBAL(ret_from_except)
476 ld r11,_TRAP(r1)
477 andi. r0,r11,1
478 bne .ret_from_except_lite
479 REST_NVGPRS(r1)
480
481_GLOBAL(ret_from_except_lite)
482 /*
483 * Disable interrupts so that current_thread_info()->flags
484 * can't change between when we test it and when we return
485 * from the interrupt.
486 */
487 mfmsr r10 /* Get current interrupt state */
488 rldicl r9,r10,48,1 /* clear MSR_EE */
489 rotldi r9,r9,16
490 mtmsrd r9,1 /* Update machine state */
491
492#ifdef CONFIG_PREEMPT
493 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
494 li r0,_TIF_NEED_RESCHED /* bits to check */
495 ld r3,_MSR(r1)
496 ld r4,TI_FLAGS(r9)
497 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
498 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
499 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
500 bne do_work
501
502#else /* !CONFIG_PREEMPT */
503 ld r3,_MSR(r1) /* Returning to user mode? */
504 andi. r3,r3,MSR_PR
505 beq restore /* if not, just restore regs and return */
506
507 /* Check current_thread_info()->flags */
508 clrrdi r9,r1,THREAD_SHIFT
509 ld r4,TI_FLAGS(r9)
510 andi. r0,r4,_TIF_USER_WORK_MASK
511 bne do_work
512#endif
513
514restore:
3f639ee8 515BEGIN_FW_FTR_SECTION
01f3880d
ME
516 ld r5,SOFTE(r1)
517FW_FTR_SECTION_ELSE
518 b iseries_check_pending_irqs
519ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5202:
945feb17 521 TRACE_AND_RESTORE_IRQ(r5);
9994a338 522
e56a6e20 523 /* extract EE bit and use it to restore paca->hard_enabled */
9994a338 524 ld r3,_MSR(r1)
e56a6e20
PM
525 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
526 stb r4,PACAHARDIRQEN(r13)
527
528 ld r4,_CTR(r1)
529 ld r0,_LINK(r1)
530 mtctr r4
531 mtlr r0
532 ld r4,_XER(r1)
533 mtspr SPRN_XER,r4
534
535 REST_8GPRS(5, r1)
536
9994a338
PM
537 andi. r0,r3,MSR_RI
538 beq- unrecov_restore
539
e56a6e20 540 stdcx. r0,0,r1 /* to clear the reservation */
b0a779de 541
e56a6e20
PM
542 /*
543 * Clear RI before restoring r13. If we are returning to
544 * userspace and we take an exception after restoring r13,
545 * we end up corrupting the userspace r13 value.
546 */
547 mfmsr r4
548 andc r4,r4,r0 /* r0 contains MSR_RI here */
549 mtmsrd r4,1
9994a338
PM
550
551 /*
552 * r13 is our per cpu area, only restore it if we are returning to
553 * userspace
554 */
e56a6e20 555 andi. r0,r3,MSR_PR
9994a338 556 beq 1f
e56a6e20 557 ACCOUNT_CPU_USER_EXIT(r2, r4)
9994a338
PM
558 REST_GPR(13, r1)
5591:
e56a6e20 560 mtspr SPRN_SRR1,r3
9994a338
PM
561
562 ld r2,_CCR(r1)
563 mtcrf 0xFF,r2
564 ld r2,_NIP(r1)
565 mtspr SPRN_SRR0,r2
566
567 ld r0,GPR0(r1)
568 ld r2,GPR2(r1)
569 ld r3,GPR3(r1)
570 ld r4,GPR4(r1)
571 ld r1,GPR1(r1)
572
573 rfid
574 b . /* prevent speculative execution */
575
01f3880d
ME
576iseries_check_pending_irqs:
577#ifdef CONFIG_PPC_ISERIES
578 ld r5,SOFTE(r1)
579 cmpdi 0,r5,0
580 beq 2b
581 /* Check for pending interrupts (iSeries) */
582 ld r3,PACALPPACAPTR(r13)
583 ld r3,LPPACAANYINT(r3)
584 cmpdi r3,0
585 beq+ 2b /* skip do_IRQ if no interrupts */
586
587 li r3,0
588 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
589#ifdef CONFIG_TRACE_IRQFLAGS
590 bl .trace_hardirqs_off
591 mfmsr r10
592#endif
593 ori r10,r10,MSR_EE
594 mtmsrd r10 /* hard-enable again */
595 addi r3,r1,STACK_FRAME_OVERHEAD
596 bl .do_IRQ
597 b .ret_from_except_lite /* loop back and handle more */
598#endif
599
9994a338
PM
600do_work:
601#ifdef CONFIG_PREEMPT
602 andi. r0,r3,MSR_PR /* Returning to user mode? */
603 bne user_work
604 /* Check that preempt_count() == 0 and interrupts are enabled */
605 lwz r8,TI_PREEMPT(r9)
606 cmpwi cr1,r8,0
9994a338
PM
607 ld r0,SOFTE(r1)
608 cmpdi r0,0
9994a338
PM
609 crandc eq,cr1*4+eq,eq
610 bne restore
611 /* here we are preempting the current task */
6121:
945feb17
BH
613#ifdef CONFIG_TRACE_IRQFLAGS
614 bl .trace_hardirqs_on
615 /* Note: we just clobbered r10 which used to contain the previous
616 * MSR before the hard-disabling done by the caller of do_work.
617 * We don't have that value anymore, but it doesn't matter as
618 * we will hard-enable unconditionally, we can just reload the
619 * current MSR into r10
620 */
621 mfmsr r10
622#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 623 li r0,1
d04c56f7
PM
624 stb r0,PACASOFTIRQEN(r13)
625 stb r0,PACAHARDIRQEN(r13)
9994a338
PM
626 ori r10,r10,MSR_EE
627 mtmsrd r10,1 /* reenable interrupts */
628 bl .preempt_schedule
629 mfmsr r10
630 clrrdi r9,r1,THREAD_SHIFT
631 rldicl r10,r10,48,1 /* disable interrupts again */
632 rotldi r10,r10,16
633 mtmsrd r10,1
634 ld r4,TI_FLAGS(r9)
635 andi. r0,r4,_TIF_NEED_RESCHED
636 bne 1b
637 b restore
638
639user_work:
640#endif
641 /* Enable interrupts */
642 ori r10,r10,MSR_EE
643 mtmsrd r10,1
644
645 andi. r0,r4,_TIF_NEED_RESCHED
646 beq 1f
647 bl .schedule
648 b .ret_from_except_lite
649
6501: bl .save_nvgprs
7d6d637d 651 addi r3,r1,STACK_FRAME_OVERHEAD
9994a338
PM
652 bl .do_signal
653 b .ret_from_except
654
655unrecov_restore:
656 addi r3,r1,STACK_FRAME_OVERHEAD
657 bl .unrecoverable_exception
658 b unrecov_restore
659
660#ifdef CONFIG_PPC_RTAS
661/*
662 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
663 * called with the MMU off.
664 *
665 * In addition, we need to be in 32b mode, at least for now.
666 *
667 * Note: r3 is an input parameter to rtas, so don't trash it...
668 */
669_GLOBAL(enter_rtas)
670 mflr r0
671 std r0,16(r1)
672 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
673
674 /* Because RTAS is running in 32b mode, it clobbers the high order half
675 * of all registers that it saves. We therefore save those registers
676 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
677 */
678 SAVE_GPR(2, r1) /* Save the TOC */
679 SAVE_GPR(13, r1) /* Save paca */
680 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
681 SAVE_10GPRS(22, r1) /* ditto */
682
683 mfcr r4
684 std r4,_CCR(r1)
685 mfctr r5
686 std r5,_CTR(r1)
687 mfspr r6,SPRN_XER
688 std r6,_XER(r1)
689 mfdar r7
690 std r7,_DAR(r1)
691 mfdsisr r8
692 std r8,_DSISR(r1)
9994a338 693
9fe901d1
MK
694 /* Temporary workaround to clear CR until RTAS can be modified to
695 * ignore all bits.
696 */
697 li r0,0
698 mtcr r0
699
007d88d0 700#ifdef CONFIG_BUG
9994a338
PM
701 /* There is no way it is acceptable to get here with interrupts enabled,
702 * check it with the asm equivalent of WARN_ON
703 */
d04c56f7 704 lbz r0,PACASOFTIRQEN(r13)
9994a338 7051: tdnei r0,0
007d88d0
DW
706 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
707#endif
708
d04c56f7
PM
709 /* Hard-disable interrupts */
710 mfmsr r6
711 rldicl r7,r6,48,1
712 rotldi r7,r7,16
713 mtmsrd r7,1
714
9994a338
PM
715 /* Unfortunately, the stack pointer and the MSR are also clobbered,
716 * so they are saved in the PACA which allows us to restore
717 * our original state after RTAS returns.
718 */
719 std r1,PACAR1(r13)
720 std r6,PACASAVEDMSR(r13)
721
722 /* Setup our real return addr */
e58c3495
DG
723 LOAD_REG_ADDR(r4,.rtas_return_loc)
724 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
725 mtlr r4
726
727 li r0,0
728 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
729 andc r0,r6,r0
730
731 li r9,1
732 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
733 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
734 andc r6,r0,r9
735 ori r6,r6,MSR_RI
736 sync /* disable interrupts so SRR0/1 */
737 mtmsrd r0 /* don't get trashed */
738
e58c3495 739 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
740 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
741 ld r4,RTASBASE(r4) /* get the rtas->base value */
742
743 mtspr SPRN_SRR0,r5
744 mtspr SPRN_SRR1,r6
745 rfid
746 b . /* prevent speculative execution */
747
748_STATIC(rtas_return_loc)
749 /* relocation is off at this point */
750 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 751 clrldi r4,r4,2 /* convert to realmode address */
9994a338 752
e31aa453
PM
753 bcl 20,31,$+4
7540: mflr r3
755 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
756
9994a338
PM
757 mfmsr r6
758 li r0,MSR_RI
759 andc r6,r6,r0
760 sync
761 mtmsrd r6
762
763 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
764 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
765
766 mtspr SPRN_SRR0,r3
767 mtspr SPRN_SRR1,r4
768 rfid
769 b . /* prevent speculative execution */
770
e31aa453
PM
771 .align 3
7721: .llong .rtas_restore_regs
773
9994a338
PM
774_STATIC(rtas_restore_regs)
775 /* relocation is on at this point */
776 REST_GPR(2, r1) /* Restore the TOC */
777 REST_GPR(13, r1) /* Restore paca */
778 REST_8GPRS(14, r1) /* Restore the non-volatiles */
779 REST_10GPRS(22, r1) /* ditto */
780
781 mfspr r13,SPRN_SPRG3
782
783 ld r4,_CCR(r1)
784 mtcr r4
785 ld r5,_CTR(r1)
786 mtctr r5
787 ld r6,_XER(r1)
788 mtspr SPRN_XER,r6
789 ld r7,_DAR(r1)
790 mtdar r7
791 ld r8,_DSISR(r1)
792 mtdsisr r8
9994a338
PM
793
794 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
795 ld r0,16(r1) /* get return address */
796
797 mtlr r0
798 blr /* return to caller */
799
800#endif /* CONFIG_PPC_RTAS */
801
9994a338
PM
802_GLOBAL(enter_prom)
803 mflr r0
804 std r0,16(r1)
805 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
806
807 /* Because PROM is running in 32b mode, it clobbers the high order half
808 * of all registers that it saves. We therefore save those registers
809 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
810 */
811 SAVE_8GPRS(2, r1)
812 SAVE_GPR(13, r1)
813 SAVE_8GPRS(14, r1)
814 SAVE_10GPRS(22, r1)
815 mfcr r4
816 std r4,_CCR(r1)
817 mfctr r5
818 std r5,_CTR(r1)
819 mfspr r6,SPRN_XER
820 std r6,_XER(r1)
821 mfdar r7
822 std r7,_DAR(r1)
823 mfdsisr r8
824 std r8,_DSISR(r1)
825 mfsrr0 r9
826 std r9,_SRR0(r1)
827 mfsrr1 r10
828 std r10,_SRR1(r1)
829 mfmsr r11
830 std r11,_MSR(r1)
831
832 /* Get the PROM entrypoint */
833 ld r0,GPR4(r1)
834 mtlr r0
835
836 /* Switch MSR to 32 bits mode
837 */
838 mfmsr r11
839 li r12,1
840 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
841 andc r11,r11,r12
842 li r12,1
843 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
844 andc r11,r11,r12
845 mtmsrd r11
846 isync
847
848 /* Restore arguments & enter PROM here... */
849 ld r3,GPR3(r1)
850 blrl
851
852 /* Just make sure that r1 top 32 bits didn't get
853 * corrupt by OF
854 */
855 rldicl r1,r1,0,32
856
857 /* Restore the MSR (back to 64 bits) */
858 ld r0,_MSR(r1)
859 mtmsrd r0
860 isync
861
862 /* Restore other registers */
863 REST_GPR(2, r1)
864 REST_GPR(13, r1)
865 REST_8GPRS(14, r1)
866 REST_10GPRS(22, r1)
867 ld r4,_CCR(r1)
868 mtcr r4
869 ld r5,_CTR(r1)
870 mtctr r5
871 ld r6,_XER(r1)
872 mtspr SPRN_XER,r6
873 ld r7,_DAR(r1)
874 mtdar r7
875 ld r8,_DSISR(r1)
876 mtdsisr r8
877 ld r9,_SRR0(r1)
878 mtsrr0 r9
879 ld r10,_SRR1(r1)
880 mtsrr1 r10
881
882 addi r1,r1,PROM_FRAME_SIZE
883 ld r0,16(r1)
884 mtlr r0
885 blr
4e491d14
SR
886
887#ifdef CONFIG_FTRACE
888#ifdef CONFIG_DYNAMIC_FTRACE
889_GLOBAL(mcount)
890_GLOBAL(_mcount)
891 /* Taken from output of objdump from lib64/glibc */
892 mflr r3
893 stdu r1, -112(r1)
894 std r3, 128(r1)
395a59d0 895 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
896 .globl mcount_call
897mcount_call:
898 bl ftrace_stub
899 nop
900 ld r0, 128(r1)
901 mtlr r0
902 addi r1, r1, 112
903 blr
904
905_GLOBAL(ftrace_caller)
906 /* Taken from output of objdump from lib64/glibc */
907 mflr r3
908 ld r11, 0(r1)
909 stdu r1, -112(r1)
910 std r3, 128(r1)
911 ld r4, 16(r11)
395a59d0 912 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
913.globl ftrace_call
914ftrace_call:
915 bl ftrace_stub
916 nop
917 ld r0, 128(r1)
918 mtlr r0
919 addi r1, r1, 112
920_GLOBAL(ftrace_stub)
921 blr
922#else
923_GLOBAL(mcount)
924 blr
925
926_GLOBAL(_mcount)
927 /* Taken from output of objdump from lib64/glibc */
928 mflr r3
929 ld r11, 0(r1)
930 stdu r1, -112(r1)
931 std r3, 128(r1)
932 ld r4, 16(r11)
933
395a59d0 934 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
935 LOAD_REG_ADDR(r5,ftrace_trace_function)
936 ld r5,0(r5)
937 ld r5,0(r5)
938 mtctr r5
939 bctrl
940
941 nop
942 ld r0, 128(r1)
943 mtlr r0
944 addi r1, r1, 112
945_GLOBAL(ftrace_stub)
946 blr
947
948#endif
949#endif
This page took 0.309087 seconds and 5 git commands to generate.