Merge branch 'stable/xen-pcifront-0.8.2' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / powerpc / kernel / entry_64.S
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21 #include <linux/errno.h>
22 #include <asm/unistd.h>
23 #include <asm/processor.h>
24 #include <asm/page.h>
25 #include <asm/mmu.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/cputable.h>
30 #include <asm/firmware.h>
31 #include <asm/bug.h>
32 #include <asm/ptrace.h>
33 #include <asm/irqflags.h>
34 #include <asm/ftrace.h>
35
36 /*
37 * System calls.
38 */
39 .section ".toc","aw"
40 .SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43 /* This value is used to mark exception frames on the stack. */
44 exception_marker:
45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
46
47 .section ".text"
48 .align 7
49
50 #undef SHOW_SYSCALLS
51
52 .globl system_call_common
53 system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
59 1: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
65 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
72 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
85 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
100 #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)
101 BEGIN_FW_FTR_SECTION
102 beq 33f
103 /* if from user, see if there are any DTL entries to process */
104 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
105 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
106 ld r10,LPPACA_DTLIDX(r10) /* get log write index */
107 cmpd cr1,r11,r10
108 beq+ cr1,33f
109 bl .accumulate_stolen_time
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114 33:
115 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116 #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
117
118 #ifdef CONFIG_TRACE_IRQFLAGS
119 bl .trace_hardirqs_on
120 REST_GPR(0,r1)
121 REST_4GPRS(3,r1)
122 REST_2GPRS(7,r1)
123 addi r9,r1,STACK_FRAME_OVERHEAD
124 ld r12,_MSR(r1)
125 #endif /* CONFIG_TRACE_IRQFLAGS */
126 li r10,1
127 stb r10,PACASOFTIRQEN(r13)
128 stb r10,PACAHARDIRQEN(r13)
129 std r10,SOFTE(r1)
130 #ifdef CONFIG_PPC_ISERIES
131 BEGIN_FW_FTR_SECTION
132 /* Hack for handling interrupts when soft-enabling on iSeries */
133 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
134 andi. r10,r12,MSR_PR /* from kernel */
135 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
136 bne 2f
137 b hardware_interrupt_entry
138 2:
139 END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
140 #endif /* CONFIG_PPC_ISERIES */
141
142 /* Hard enable interrupts */
143 #ifdef CONFIG_PPC_BOOK3E
144 wrteei 1
145 #else
146 mfmsr r11
147 ori r11,r11,MSR_EE
148 mtmsrd r11,1
149 #endif /* CONFIG_PPC_BOOK3E */
150
151 #ifdef SHOW_SYSCALLS
152 bl .do_show_syscall
153 REST_GPR(0,r1)
154 REST_4GPRS(3,r1)
155 REST_2GPRS(7,r1)
156 addi r9,r1,STACK_FRAME_OVERHEAD
157 #endif
158 clrrdi r11,r1,THREAD_SHIFT
159 ld r10,TI_FLAGS(r11)
160 andi. r11,r10,_TIF_SYSCALL_T_OR_A
161 bne- syscall_dotrace
162 syscall_dotrace_cont:
163 cmpldi 0,r0,NR_syscalls
164 bge- syscall_enosys
165
166 system_call: /* label this so stack traces look sane */
167 /*
168 * Need to vector to 32 Bit or default sys_call_table here,
169 * based on caller's run-mode / personality.
170 */
171 ld r11,.SYS_CALL_TABLE@toc(2)
172 andi. r10,r10,_TIF_32BIT
173 beq 15f
174 addi r11,r11,8 /* use 32-bit syscall entries */
175 clrldi r3,r3,32
176 clrldi r4,r4,32
177 clrldi r5,r5,32
178 clrldi r6,r6,32
179 clrldi r7,r7,32
180 clrldi r8,r8,32
181 15:
182 slwi r0,r0,4
183 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
184 mtctr r10
185 bctrl /* Call handler */
186
187 syscall_exit:
188 std r3,RESULT(r1)
189 #ifdef SHOW_SYSCALLS
190 bl .do_show_syscall_exit
191 ld r3,RESULT(r1)
192 #endif
193 clrrdi r12,r1,THREAD_SHIFT
194
195 ld r8,_MSR(r1)
196 #ifdef CONFIG_PPC_BOOK3S
197 /* No MSR:RI on BookE */
198 andi. r10,r8,MSR_RI
199 beq- unrecov_restore
200 #endif
201
202 /* Disable interrupts so current_thread_info()->flags can't change,
203 * and so that we don't get interrupted after loading SRR0/1.
204 */
205 #ifdef CONFIG_PPC_BOOK3E
206 wrteei 0
207 #else
208 mfmsr r10
209 rldicl r10,r10,48,1
210 rotldi r10,r10,16
211 mtmsrd r10,1
212 #endif /* CONFIG_PPC_BOOK3E */
213
214 ld r9,TI_FLAGS(r12)
215 li r11,-_LAST_ERRNO
216 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
217 bne- syscall_exit_work
218 cmpld r3,r11
219 ld r5,_CCR(r1)
220 bge- syscall_error
221 syscall_error_cont:
222 ld r7,_NIP(r1)
223 BEGIN_FTR_SECTION
224 stdcx. r0,0,r1 /* to clear the reservation */
225 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
226 andi. r6,r8,MSR_PR
227 ld r4,_LINK(r1)
228 /*
229 * Clear RI before restoring r13. If we are returning to
230 * userspace and we take an exception after restoring r13,
231 * we end up corrupting the userspace r13 value.
232 */
233 #ifdef CONFIG_PPC_BOOK3S
234 /* No MSR:RI on BookE */
235 li r12,MSR_RI
236 andc r11,r10,r12
237 mtmsrd r11,1 /* clear MSR.RI */
238 #endif /* CONFIG_PPC_BOOK3S */
239
240 beq- 1f
241 ACCOUNT_CPU_USER_EXIT(r11, r12)
242 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
243 1: ld r2,GPR2(r1)
244 ld r1,GPR1(r1)
245 mtlr r4
246 mtcr r5
247 mtspr SPRN_SRR0,r7
248 mtspr SPRN_SRR1,r8
249 RFI
250 b . /* prevent speculative execution */
251
252 syscall_error:
253 oris r5,r5,0x1000 /* Set SO bit in CR */
254 neg r3,r3
255 std r5,_CCR(r1)
256 b syscall_error_cont
257
258 /* Traced system call support */
259 syscall_dotrace:
260 bl .save_nvgprs
261 addi r3,r1,STACK_FRAME_OVERHEAD
262 bl .do_syscall_trace_enter
263 /*
264 * Restore argument registers possibly just changed.
265 * We use the return value of do_syscall_trace_enter
266 * for the call number to look up in the table (r0).
267 */
268 mr r0,r3
269 ld r3,GPR3(r1)
270 ld r4,GPR4(r1)
271 ld r5,GPR5(r1)
272 ld r6,GPR6(r1)
273 ld r7,GPR7(r1)
274 ld r8,GPR8(r1)
275 addi r9,r1,STACK_FRAME_OVERHEAD
276 clrrdi r10,r1,THREAD_SHIFT
277 ld r10,TI_FLAGS(r10)
278 b syscall_dotrace_cont
279
280 syscall_enosys:
281 li r3,-ENOSYS
282 b syscall_exit
283
284 syscall_exit_work:
285 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
286 If TIF_NOERROR is set, just save r3 as it is. */
287
288 andi. r0,r9,_TIF_RESTOREALL
289 beq+ 0f
290 REST_NVGPRS(r1)
291 b 2f
292 0: cmpld r3,r11 /* r10 is -LAST_ERRNO */
293 blt+ 1f
294 andi. r0,r9,_TIF_NOERROR
295 bne- 1f
296 ld r5,_CCR(r1)
297 neg r3,r3
298 oris r5,r5,0x1000 /* Set SO bit in CR */
299 std r5,_CCR(r1)
300 1: std r3,GPR3(r1)
301 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
302 beq 4f
303
304 /* Clear per-syscall TIF flags if any are set. */
305
306 li r11,_TIF_PERSYSCALL_MASK
307 addi r12,r12,TI_FLAGS
308 3: ldarx r10,0,r12
309 andc r10,r10,r11
310 stdcx. r10,0,r12
311 bne- 3b
312 subi r12,r12,TI_FLAGS
313
314 4: /* Anything else left to do? */
315 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
316 beq .ret_from_except_lite
317
318 /* Re-enable interrupts */
319 #ifdef CONFIG_PPC_BOOK3E
320 wrteei 1
321 #else
322 mfmsr r10
323 ori r10,r10,MSR_EE
324 mtmsrd r10,1
325 #endif /* CONFIG_PPC_BOOK3E */
326
327 bl .save_nvgprs
328 addi r3,r1,STACK_FRAME_OVERHEAD
329 bl .do_syscall_trace_leave
330 b .ret_from_except
331
332 /* Save non-volatile GPRs, if not already saved. */
333 _GLOBAL(save_nvgprs)
334 ld r11,_TRAP(r1)
335 andi. r0,r11,1
336 beqlr-
337 SAVE_NVGPRS(r1)
338 clrrdi r0,r11,1
339 std r0,_TRAP(r1)
340 blr
341
342
343 /*
344 * The sigsuspend and rt_sigsuspend system calls can call do_signal
345 * and thus put the process into the stopped state where we might
346 * want to examine its user state with ptrace. Therefore we need
347 * to save all the nonvolatile registers (r14 - r31) before calling
348 * the C code. Similarly, fork, vfork and clone need the full
349 * register state on the stack so that it can be copied to the child.
350 */
351
352 _GLOBAL(ppc_fork)
353 bl .save_nvgprs
354 bl .sys_fork
355 b syscall_exit
356
357 _GLOBAL(ppc_vfork)
358 bl .save_nvgprs
359 bl .sys_vfork
360 b syscall_exit
361
362 _GLOBAL(ppc_clone)
363 bl .save_nvgprs
364 bl .sys_clone
365 b syscall_exit
366
367 _GLOBAL(ppc32_swapcontext)
368 bl .save_nvgprs
369 bl .compat_sys_swapcontext
370 b syscall_exit
371
372 _GLOBAL(ppc64_swapcontext)
373 bl .save_nvgprs
374 bl .sys_swapcontext
375 b syscall_exit
376
377 _GLOBAL(ret_from_fork)
378 bl .schedule_tail
379 REST_NVGPRS(r1)
380 li r3,0
381 b syscall_exit
382
383 /*
384 * This routine switches between two different tasks. The process
385 * state of one is saved on its kernel stack. Then the state
386 * of the other is restored from its kernel stack. The memory
387 * management hardware is updated to the second process's state.
388 * Finally, we can return to the second process, via ret_from_except.
389 * On entry, r3 points to the THREAD for the current task, r4
390 * points to the THREAD for the new task.
391 *
392 * Note: there are two ways to get to the "going out" portion
393 * of this code; either by coming in via the entry (_switch)
394 * or via "fork" which must set up an environment equivalent
395 * to the "_switch" path. If you change this you'll have to change
396 * the fork code also.
397 *
398 * The code which creates the new task context is in 'copy_thread'
399 * in arch/powerpc/kernel/process.c
400 */
401 .align 7
402 _GLOBAL(_switch)
403 mflr r0
404 std r0,16(r1)
405 stdu r1,-SWITCH_FRAME_SIZE(r1)
406 /* r3-r13 are caller saved -- Cort */
407 SAVE_8GPRS(14, r1)
408 SAVE_10GPRS(22, r1)
409 mflr r20 /* Return to switch caller */
410 mfmsr r22
411 li r0, MSR_FP
412 #ifdef CONFIG_VSX
413 BEGIN_FTR_SECTION
414 oris r0,r0,MSR_VSX@h /* Disable VSX */
415 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
416 #endif /* CONFIG_VSX */
417 #ifdef CONFIG_ALTIVEC
418 BEGIN_FTR_SECTION
419 oris r0,r0,MSR_VEC@h /* Disable altivec */
420 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
421 std r24,THREAD_VRSAVE(r3)
422 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
423 #endif /* CONFIG_ALTIVEC */
424 and. r0,r0,r22
425 beq+ 1f
426 andc r22,r22,r0
427 MTMSRD(r22)
428 isync
429 1: std r20,_NIP(r1)
430 mfcr r23
431 std r23,_CCR(r1)
432 std r1,KSP(r3) /* Set old stack pointer */
433
434 #ifdef CONFIG_SMP
435 /* We need a sync somewhere here to make sure that if the
436 * previous task gets rescheduled on another CPU, it sees all
437 * stores it has performed on this one.
438 */
439 sync
440 #endif /* CONFIG_SMP */
441
442 /*
443 * If we optimise away the clear of the reservation in system
444 * calls because we know the CPU tracks the address of the
445 * reservation, then we need to clear it here to cover the
446 * case that the kernel context switch path has no larx
447 * instructions.
448 */
449 BEGIN_FTR_SECTION
450 ldarx r6,0,r1
451 END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
452
453 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
454 std r6,PACACURRENT(r13) /* Set new 'current' */
455
456 ld r8,KSP(r4) /* new stack pointer */
457 #ifdef CONFIG_PPC_BOOK3S
458 BEGIN_FTR_SECTION
459 BEGIN_FTR_SECTION_NESTED(95)
460 clrrdi r6,r8,28 /* get its ESID */
461 clrrdi r9,r1,28 /* get current sp ESID */
462 FTR_SECTION_ELSE_NESTED(95)
463 clrrdi r6,r8,40 /* get its 1T ESID */
464 clrrdi r9,r1,40 /* get current sp 1T ESID */
465 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
466 FTR_SECTION_ELSE
467 b 2f
468 ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
469 clrldi. r0,r6,2 /* is new ESID c00000000? */
470 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
471 cror eq,4*cr1+eq,eq
472 beq 2f /* if yes, don't slbie it */
473
474 /* Bolt in the new stack SLB entry */
475 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
476 oris r0,r6,(SLB_ESID_V)@h
477 ori r0,r0,(SLB_NUM_BOLTED-1)@l
478 BEGIN_FTR_SECTION
479 li r9,MMU_SEGSIZE_1T /* insert B field */
480 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
481 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
482 END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
483
484 /* Update the last bolted SLB. No write barriers are needed
485 * here, provided we only update the current CPU's SLB shadow
486 * buffer.
487 */
488 ld r9,PACA_SLBSHADOWPTR(r13)
489 li r12,0
490 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
491 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
492 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
493
494 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
495 * we have 1TB segments, the only CPUs known to have the errata
496 * only support less than 1TB of system memory and we'll never
497 * actually hit this code path.
498 */
499
500 slbie r6
501 slbie r6 /* Workaround POWER5 < DD2.1 issue */
502 slbmte r7,r0
503 isync
504 2:
505 #endif /* !CONFIG_PPC_BOOK3S */
506
507 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
508 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
509 because we don't need to leave the 288-byte ABI gap at the
510 top of the kernel stack. */
511 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
512
513 mr r1,r8 /* start using new stack pointer */
514 std r7,PACAKSAVE(r13)
515
516 ld r6,_CCR(r1)
517 mtcrf 0xFF,r6
518
519 #ifdef CONFIG_ALTIVEC
520 BEGIN_FTR_SECTION
521 ld r0,THREAD_VRSAVE(r4)
522 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
523 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
524 #endif /* CONFIG_ALTIVEC */
525
526 /* r3-r13 are destroyed -- Cort */
527 REST_8GPRS(14, r1)
528 REST_10GPRS(22, r1)
529
530 /* convert old thread to its task_struct for return value */
531 addi r3,r3,-THREAD
532 ld r7,_NIP(r1) /* Return to _switch caller in new task */
533 mtlr r7
534 addi r1,r1,SWITCH_FRAME_SIZE
535 blr
536
537 .align 7
538 _GLOBAL(ret_from_except)
539 ld r11,_TRAP(r1)
540 andi. r0,r11,1
541 bne .ret_from_except_lite
542 REST_NVGPRS(r1)
543
544 _GLOBAL(ret_from_except_lite)
545 /*
546 * Disable interrupts so that current_thread_info()->flags
547 * can't change between when we test it and when we return
548 * from the interrupt.
549 */
550 #ifdef CONFIG_PPC_BOOK3E
551 wrteei 0
552 #else
553 mfmsr r10 /* Get current interrupt state */
554 rldicl r9,r10,48,1 /* clear MSR_EE */
555 rotldi r9,r9,16
556 mtmsrd r9,1 /* Update machine state */
557 #endif /* CONFIG_PPC_BOOK3E */
558
559 #ifdef CONFIG_PREEMPT
560 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
561 li r0,_TIF_NEED_RESCHED /* bits to check */
562 ld r3,_MSR(r1)
563 ld r4,TI_FLAGS(r9)
564 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
565 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
566 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
567 bne do_work
568
569 #else /* !CONFIG_PREEMPT */
570 ld r3,_MSR(r1) /* Returning to user mode? */
571 andi. r3,r3,MSR_PR
572 beq restore /* if not, just restore regs and return */
573
574 /* Check current_thread_info()->flags */
575 clrrdi r9,r1,THREAD_SHIFT
576 ld r4,TI_FLAGS(r9)
577 andi. r0,r4,_TIF_USER_WORK_MASK
578 bne do_work
579 #endif
580
581 restore:
582 BEGIN_FW_FTR_SECTION
583 ld r5,SOFTE(r1)
584 FW_FTR_SECTION_ELSE
585 b .Liseries_check_pending_irqs
586 ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
587 2:
588 TRACE_AND_RESTORE_IRQ(r5);
589
590 /* extract EE bit and use it to restore paca->hard_enabled */
591 ld r3,_MSR(r1)
592 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
593 stb r4,PACAHARDIRQEN(r13)
594
595 #ifdef CONFIG_PPC_BOOK3E
596 b .exception_return_book3e
597 #else
598 ld r4,_CTR(r1)
599 ld r0,_LINK(r1)
600 mtctr r4
601 mtlr r0
602 ld r4,_XER(r1)
603 mtspr SPRN_XER,r4
604
605 REST_8GPRS(5, r1)
606
607 andi. r0,r3,MSR_RI
608 beq- unrecov_restore
609
610 /*
611 * Clear the reservation. If we know the CPU tracks the address of
612 * the reservation then we can potentially save some cycles and use
613 * a larx. On POWER6 and POWER7 this is significantly faster.
614 */
615 BEGIN_FTR_SECTION
616 stdcx. r0,0,r1 /* to clear the reservation */
617 FTR_SECTION_ELSE
618 ldarx r4,0,r1
619 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
620
621 /*
622 * Clear RI before restoring r13. If we are returning to
623 * userspace and we take an exception after restoring r13,
624 * we end up corrupting the userspace r13 value.
625 */
626 mfmsr r4
627 andc r4,r4,r0 /* r0 contains MSR_RI here */
628 mtmsrd r4,1
629
630 /*
631 * r13 is our per cpu area, only restore it if we are returning to
632 * userspace
633 */
634 andi. r0,r3,MSR_PR
635 beq 1f
636 ACCOUNT_CPU_USER_EXIT(r2, r4)
637 REST_GPR(13, r1)
638 1:
639 mtspr SPRN_SRR1,r3
640
641 ld r2,_CCR(r1)
642 mtcrf 0xFF,r2
643 ld r2,_NIP(r1)
644 mtspr SPRN_SRR0,r2
645
646 ld r0,GPR0(r1)
647 ld r2,GPR2(r1)
648 ld r3,GPR3(r1)
649 ld r4,GPR4(r1)
650 ld r1,GPR1(r1)
651
652 rfid
653 b . /* prevent speculative execution */
654
655 #endif /* CONFIG_PPC_BOOK3E */
656
657 .Liseries_check_pending_irqs:
658 #ifdef CONFIG_PPC_ISERIES
659 ld r5,SOFTE(r1)
660 cmpdi 0,r5,0
661 beq 2b
662 /* Check for pending interrupts (iSeries) */
663 ld r3,PACALPPACAPTR(r13)
664 ld r3,LPPACAANYINT(r3)
665 cmpdi r3,0
666 beq+ 2b /* skip do_IRQ if no interrupts */
667
668 li r3,0
669 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
670 #ifdef CONFIG_TRACE_IRQFLAGS
671 bl .trace_hardirqs_off
672 mfmsr r10
673 #endif
674 ori r10,r10,MSR_EE
675 mtmsrd r10 /* hard-enable again */
676 addi r3,r1,STACK_FRAME_OVERHEAD
677 bl .do_IRQ
678 b .ret_from_except_lite /* loop back and handle more */
679 #endif
680
681 do_work:
682 #ifdef CONFIG_PREEMPT
683 andi. r0,r3,MSR_PR /* Returning to user mode? */
684 bne user_work
685 /* Check that preempt_count() == 0 and interrupts are enabled */
686 lwz r8,TI_PREEMPT(r9)
687 cmpwi cr1,r8,0
688 ld r0,SOFTE(r1)
689 cmpdi r0,0
690 crandc eq,cr1*4+eq,eq
691 bne restore
692
693 /* Here we are preempting the current task.
694 *
695 * Ensure interrupts are soft-disabled. We also properly mark
696 * the PACA to reflect the fact that they are hard-disabled
697 * and trace the change
698 */
699 li r0,0
700 stb r0,PACASOFTIRQEN(r13)
701 stb r0,PACAHARDIRQEN(r13)
702 TRACE_DISABLE_INTS
703
704 /* Call the scheduler with soft IRQs off */
705 1: bl .preempt_schedule_irq
706
707 /* Hard-disable interrupts again (and update PACA) */
708 #ifdef CONFIG_PPC_BOOK3E
709 wrteei 0
710 #else
711 mfmsr r10
712 rldicl r10,r10,48,1
713 rotldi r10,r10,16
714 mtmsrd r10,1
715 #endif /* CONFIG_PPC_BOOK3E */
716 li r0,0
717 stb r0,PACAHARDIRQEN(r13)
718
719 /* Re-test flags and eventually loop */
720 clrrdi r9,r1,THREAD_SHIFT
721 ld r4,TI_FLAGS(r9)
722 andi. r0,r4,_TIF_NEED_RESCHED
723 bne 1b
724 b restore
725
726 user_work:
727 #endif /* CONFIG_PREEMPT */
728
729 /* Enable interrupts */
730 #ifdef CONFIG_PPC_BOOK3E
731 wrteei 1
732 #else
733 ori r10,r10,MSR_EE
734 mtmsrd r10,1
735 #endif /* CONFIG_PPC_BOOK3E */
736
737 andi. r0,r4,_TIF_NEED_RESCHED
738 beq 1f
739 bl .schedule
740 b .ret_from_except_lite
741
742 1: bl .save_nvgprs
743 addi r3,r1,STACK_FRAME_OVERHEAD
744 bl .do_signal
745 b .ret_from_except
746
747 unrecov_restore:
748 addi r3,r1,STACK_FRAME_OVERHEAD
749 bl .unrecoverable_exception
750 b unrecov_restore
751
752 #ifdef CONFIG_PPC_RTAS
753 /*
754 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
755 * called with the MMU off.
756 *
757 * In addition, we need to be in 32b mode, at least for now.
758 *
759 * Note: r3 is an input parameter to rtas, so don't trash it...
760 */
761 _GLOBAL(enter_rtas)
762 mflr r0
763 std r0,16(r1)
764 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
765
766 /* Because RTAS is running in 32b mode, it clobbers the high order half
767 * of all registers that it saves. We therefore save those registers
768 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
769 */
770 SAVE_GPR(2, r1) /* Save the TOC */
771 SAVE_GPR(13, r1) /* Save paca */
772 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
773 SAVE_10GPRS(22, r1) /* ditto */
774
775 mfcr r4
776 std r4,_CCR(r1)
777 mfctr r5
778 std r5,_CTR(r1)
779 mfspr r6,SPRN_XER
780 std r6,_XER(r1)
781 mfdar r7
782 std r7,_DAR(r1)
783 mfdsisr r8
784 std r8,_DSISR(r1)
785
786 /* Temporary workaround to clear CR until RTAS can be modified to
787 * ignore all bits.
788 */
789 li r0,0
790 mtcr r0
791
792 #ifdef CONFIG_BUG
793 /* There is no way it is acceptable to get here with interrupts enabled,
794 * check it with the asm equivalent of WARN_ON
795 */
796 lbz r0,PACASOFTIRQEN(r13)
797 1: tdnei r0,0
798 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
799 #endif
800
801 /* Hard-disable interrupts */
802 mfmsr r6
803 rldicl r7,r6,48,1
804 rotldi r7,r7,16
805 mtmsrd r7,1
806
807 /* Unfortunately, the stack pointer and the MSR are also clobbered,
808 * so they are saved in the PACA which allows us to restore
809 * our original state after RTAS returns.
810 */
811 std r1,PACAR1(r13)
812 std r6,PACASAVEDMSR(r13)
813
814 /* Setup our real return addr */
815 LOAD_REG_ADDR(r4,.rtas_return_loc)
816 clrldi r4,r4,2 /* convert to realmode address */
817 mtlr r4
818
819 li r0,0
820 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
821 andc r0,r6,r0
822
823 li r9,1
824 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
825 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
826 andc r6,r0,r9
827 sync /* disable interrupts so SRR0/1 */
828 mtmsrd r0 /* don't get trashed */
829
830 LOAD_REG_ADDR(r4, rtas)
831 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
832 ld r4,RTASBASE(r4) /* get the rtas->base value */
833
834 mtspr SPRN_SRR0,r5
835 mtspr SPRN_SRR1,r6
836 rfid
837 b . /* prevent speculative execution */
838
839 _STATIC(rtas_return_loc)
840 /* relocation is off at this point */
841 mfspr r4,SPRN_SPRG_PACA /* Get PACA */
842 clrldi r4,r4,2 /* convert to realmode address */
843
844 bcl 20,31,$+4
845 0: mflr r3
846 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
847
848 mfmsr r6
849 li r0,MSR_RI
850 andc r6,r6,r0
851 sync
852 mtmsrd r6
853
854 ld r1,PACAR1(r4) /* Restore our SP */
855 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
856
857 mtspr SPRN_SRR0,r3
858 mtspr SPRN_SRR1,r4
859 rfid
860 b . /* prevent speculative execution */
861
862 .align 3
863 1: .llong .rtas_restore_regs
864
865 _STATIC(rtas_restore_regs)
866 /* relocation is on at this point */
867 REST_GPR(2, r1) /* Restore the TOC */
868 REST_GPR(13, r1) /* Restore paca */
869 REST_8GPRS(14, r1) /* Restore the non-volatiles */
870 REST_10GPRS(22, r1) /* ditto */
871
872 mfspr r13,SPRN_SPRG_PACA
873
874 ld r4,_CCR(r1)
875 mtcr r4
876 ld r5,_CTR(r1)
877 mtctr r5
878 ld r6,_XER(r1)
879 mtspr SPRN_XER,r6
880 ld r7,_DAR(r1)
881 mtdar r7
882 ld r8,_DSISR(r1)
883 mtdsisr r8
884
885 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
886 ld r0,16(r1) /* get return address */
887
888 mtlr r0
889 blr /* return to caller */
890
891 #endif /* CONFIG_PPC_RTAS */
892
893 _GLOBAL(enter_prom)
894 mflr r0
895 std r0,16(r1)
896 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
897
898 /* Because PROM is running in 32b mode, it clobbers the high order half
899 * of all registers that it saves. We therefore save those registers
900 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
901 */
902 SAVE_GPR(2, r1)
903 SAVE_GPR(13, r1)
904 SAVE_8GPRS(14, r1)
905 SAVE_10GPRS(22, r1)
906 mfcr r10
907 mfmsr r11
908 std r10,_CCR(r1)
909 std r11,_MSR(r1)
910
911 /* Get the PROM entrypoint */
912 mtlr r4
913
914 /* Switch MSR to 32 bits mode
915 */
916 #ifdef CONFIG_PPC_BOOK3E
917 rlwinm r11,r11,0,1,31
918 mtmsr r11
919 #else /* CONFIG_PPC_BOOK3E */
920 mfmsr r11
921 li r12,1
922 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
923 andc r11,r11,r12
924 li r12,1
925 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
926 andc r11,r11,r12
927 mtmsrd r11
928 #endif /* CONFIG_PPC_BOOK3E */
929 isync
930
931 /* Enter PROM here... */
932 blrl
933
934 /* Just make sure that r1 top 32 bits didn't get
935 * corrupt by OF
936 */
937 rldicl r1,r1,0,32
938
939 /* Restore the MSR (back to 64 bits) */
940 ld r0,_MSR(r1)
941 MTMSRD(r0)
942 isync
943
944 /* Restore other registers */
945 REST_GPR(2, r1)
946 REST_GPR(13, r1)
947 REST_8GPRS(14, r1)
948 REST_10GPRS(22, r1)
949 ld r4,_CCR(r1)
950 mtcr r4
951
952 addi r1,r1,PROM_FRAME_SIZE
953 ld r0,16(r1)
954 mtlr r0
955 blr
956
957 #ifdef CONFIG_FUNCTION_TRACER
958 #ifdef CONFIG_DYNAMIC_FTRACE
959 _GLOBAL(mcount)
960 _GLOBAL(_mcount)
961 blr
962
963 _GLOBAL(ftrace_caller)
964 /* Taken from output of objdump from lib64/glibc */
965 mflr r3
966 ld r11, 0(r1)
967 stdu r1, -112(r1)
968 std r3, 128(r1)
969 ld r4, 16(r11)
970 subi r3, r3, MCOUNT_INSN_SIZE
971 .globl ftrace_call
972 ftrace_call:
973 bl ftrace_stub
974 nop
975 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
976 .globl ftrace_graph_call
977 ftrace_graph_call:
978 b ftrace_graph_stub
979 _GLOBAL(ftrace_graph_stub)
980 #endif
981 ld r0, 128(r1)
982 mtlr r0
983 addi r1, r1, 112
984 _GLOBAL(ftrace_stub)
985 blr
986 #else
987 _GLOBAL(mcount)
988 blr
989
990 _GLOBAL(_mcount)
991 /* Taken from output of objdump from lib64/glibc */
992 mflr r3
993 ld r11, 0(r1)
994 stdu r1, -112(r1)
995 std r3, 128(r1)
996 ld r4, 16(r11)
997
998 subi r3, r3, MCOUNT_INSN_SIZE
999 LOAD_REG_ADDR(r5,ftrace_trace_function)
1000 ld r5,0(r5)
1001 ld r5,0(r5)
1002 mtctr r5
1003 bctrl
1004 nop
1005
1006
1007 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1008 b ftrace_graph_caller
1009 #endif
1010 ld r0, 128(r1)
1011 mtlr r0
1012 addi r1, r1, 112
1013 _GLOBAL(ftrace_stub)
1014 blr
1015
1016 #endif /* CONFIG_DYNAMIC_FTRACE */
1017
1018 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1019 _GLOBAL(ftrace_graph_caller)
1020 /* load r4 with local address */
1021 ld r4, 128(r1)
1022 subi r4, r4, MCOUNT_INSN_SIZE
1023
1024 /* get the parent address */
1025 ld r11, 112(r1)
1026 addi r3, r11, 16
1027
1028 bl .prepare_ftrace_return
1029 nop
1030
1031 ld r0, 128(r1)
1032 mtlr r0
1033 addi r1, r1, 112
1034 blr
1035
1036 _GLOBAL(return_to_handler)
1037 /* need to save return values */
1038 std r4, -24(r1)
1039 std r3, -16(r1)
1040 std r31, -8(r1)
1041 mr r31, r1
1042 stdu r1, -112(r1)
1043
1044 bl .ftrace_return_to_handler
1045 nop
1046
1047 /* return value has real return address */
1048 mtlr r3
1049
1050 ld r1, 0(r1)
1051 ld r4, -24(r1)
1052 ld r3, -16(r1)
1053 ld r31, -8(r1)
1054
1055 /* Jump back to real return address */
1056 blr
1057
1058 _GLOBAL(mod_return_to_handler)
1059 /* need to save return values */
1060 std r4, -32(r1)
1061 std r3, -24(r1)
1062 /* save TOC */
1063 std r2, -16(r1)
1064 std r31, -8(r1)
1065 mr r31, r1
1066 stdu r1, -112(r1)
1067
1068 /*
1069 * We are in a module using the module's TOC.
1070 * Switch to our TOC to run inside the core kernel.
1071 */
1072 ld r2, PACATOC(r13)
1073
1074 bl .ftrace_return_to_handler
1075 nop
1076
1077 /* return value has real return address */
1078 mtlr r3
1079
1080 ld r1, 0(r1)
1081 ld r4, -32(r1)
1082 ld r3, -24(r1)
1083 ld r2, -16(r1)
1084 ld r31, -8(r1)
1085
1086 /* Jump back to real return address */
1087 blr
1088 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1089 #endif /* CONFIG_FUNCTION_TRACER */
This page took 0.05239 seconds and 6 git commands to generate.