Merge remote-tracking branch 'keys/keys-next'
[deliverable/linux.git] / arch / powerpc / kernel / exceptions-64s.S
1 /*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15 #include <asm/hw_irq.h>
16 #include <asm/exception-64s.h>
17 #include <asm/ptrace.h>
18 #include <asm/cpuidle.h>
19
20 /*
21 * We layout physical memory as follows:
22 * 0x0000 - 0x00ff : Secondary processor spin code
23 * 0x0100 - 0x17ff : pSeries Interrupt prologs
24 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
25 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
26 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
27 * 0x7000 - 0x7fff : FWNMI data area
28 * 0x8000 - 0x8fff : Initial (CPU0) segment table
29 * 0x9000 - : Early init and support code
30 */
31 /* Syscall routine is used twice, in reloc-off and reloc-on paths */
32 #define SYSCALL_PSERIES_1 \
33 BEGIN_FTR_SECTION \
34 cmpdi r0,0x1ebe ; \
35 beq- 1f ; \
36 END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
37 mr r9,r13 ; \
38 GET_PACA(r13) ; \
39 mfspr r11,SPRN_SRR0 ; \
40 0:
41
42 #define SYSCALL_PSERIES_2_RFID \
43 mfspr r12,SPRN_SRR1 ; \
44 ld r10,PACAKBASE(r13) ; \
45 LOAD_HANDLER(r10, system_call_entry) ; \
46 mtspr SPRN_SRR0,r10 ; \
47 ld r10,PACAKMSR(r13) ; \
48 mtspr SPRN_SRR1,r10 ; \
49 rfid ; \
50 b . ; /* prevent speculative execution */
51
52 #define SYSCALL_PSERIES_3 \
53 /* Fast LE/BE switch system call */ \
54 1: mfspr r12,SPRN_SRR1 ; \
55 xori r12,r12,MSR_LE ; \
56 mtspr SPRN_SRR1,r12 ; \
57 rfid ; /* return to userspace */ \
58 b . ; /* prevent speculative execution */
59
60 #if defined(CONFIG_RELOCATABLE)
61 /*
62 * We can't branch directly so we do it via the CTR which
63 * is volatile across system calls.
64 */
65 #define SYSCALL_PSERIES_2_DIRECT \
66 mflr r10 ; \
67 ld r12,PACAKBASE(r13) ; \
68 LOAD_HANDLER(r12, system_call_entry) ; \
69 mtctr r12 ; \
70 mfspr r12,SPRN_SRR1 ; \
71 /* Re-use of r13... No spare regs to do this */ \
72 li r13,MSR_RI ; \
73 mtmsrd r13,1 ; \
74 GET_PACA(r13) ; /* get r13 back */ \
75 bctr ;
76 #else
77 /* We can branch directly */
78 #define SYSCALL_PSERIES_2_DIRECT \
79 mfspr r12,SPRN_SRR1 ; \
80 li r10,MSR_RI ; \
81 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
82 b system_call_common ;
83 #endif
84
85 /*
86 * This is the start of the interrupt handlers for pSeries
87 * This code runs with relocation off.
88 * Code from here to __end_interrupts gets copied down to real
89 * address 0x100 when we are running a relocatable kernel.
90 * Therefore any relative branches in this section must only
91 * branch to labels in this section.
92 */
93 . = 0x100
94 .globl __start_interrupts
95 __start_interrupts:
96
97 .globl system_reset_pSeries;
98 system_reset_pSeries:
99 SET_SCRATCH0(r13)
100 #ifdef CONFIG_PPC_P7_NAP
101 BEGIN_FTR_SECTION
102 /* Running native on arch 2.06 or later, check if we are
103 * waking up from nap/sleep/winkle.
104 */
105 mfspr r13,SPRN_SRR1
106 rlwinm. r13,r13,47-31,30,31
107 beq 9f
108
109 cmpwi cr3,r13,2
110 GET_PACA(r13)
111 bl pnv_restore_hyp_resource
112
113 li r0,PNV_THREAD_RUNNING
114 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
115
116 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
117 li r0,KVM_HWTHREAD_IN_KERNEL
118 stb r0,HSTATE_HWTHREAD_STATE(r13)
119 /* Order setting hwthread_state vs. testing hwthread_req */
120 sync
121 lbz r0,HSTATE_HWTHREAD_REQ(r13)
122 cmpwi r0,0
123 beq 1f
124 b kvm_start_guest
125 1:
126 #endif
127
128 /* Return SRR1 from power7_nap() */
129 mfspr r3,SPRN_SRR1
130 blt cr3,2f
131 b pnv_wakeup_loss
132 2: b pnv_wakeup_noloss
133
134 9:
135 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
136 #endif /* CONFIG_PPC_P7_NAP */
137 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
138 NOTEST, 0x100)
139
140 . = 0x200
141 machine_check_pSeries_1:
142 /* This is moved out of line as it can be patched by FW, but
143 * some code path might still want to branch into the original
144 * vector
145 */
146 SET_SCRATCH0(r13) /* save r13 */
147 /*
148 * Running native on arch 2.06 or later, we may wakeup from winkle
149 * inside machine check. If yes, then last bit of HSPGR0 would be set
150 * to 1. Hence clear it unconditionally.
151 */
152 GET_PACA(r13)
153 clrrdi r13,r13,1
154 SET_PACA(r13)
155 EXCEPTION_PROLOG_0(PACA_EXMC)
156 BEGIN_FTR_SECTION
157 b machine_check_powernv_early
158 FTR_SECTION_ELSE
159 b machine_check_pSeries_0
160 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
161
162 . = 0x300
163 .globl data_access_pSeries
164 data_access_pSeries:
165 SET_SCRATCH0(r13)
166 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
167 KVMTEST, 0x300)
168
169 . = 0x380
170 .globl data_access_slb_pSeries
171 data_access_slb_pSeries:
172 SET_SCRATCH0(r13)
173 EXCEPTION_PROLOG_0(PACA_EXSLB)
174 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
175 std r3,PACA_EXSLB+EX_R3(r13)
176 mfspr r3,SPRN_DAR
177 mfspr r12,SPRN_SRR1
178 #ifndef CONFIG_RELOCATABLE
179 b slb_miss_realmode
180 #else
181 /*
182 * We can't just use a direct branch to slb_miss_realmode
183 * because the distance from here to there depends on where
184 * the kernel ends up being put.
185 */
186 mfctr r11
187 ld r10,PACAKBASE(r13)
188 LOAD_HANDLER(r10, slb_miss_realmode)
189 mtctr r10
190 bctr
191 #endif
192
193 STD_EXCEPTION_PSERIES(0x400, instruction_access)
194
195 . = 0x480
196 .globl instruction_access_slb_pSeries
197 instruction_access_slb_pSeries:
198 SET_SCRATCH0(r13)
199 EXCEPTION_PROLOG_0(PACA_EXSLB)
200 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
201 std r3,PACA_EXSLB+EX_R3(r13)
202 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
203 mfspr r12,SPRN_SRR1
204 #ifndef CONFIG_RELOCATABLE
205 b slb_miss_realmode
206 #else
207 mfctr r11
208 ld r10,PACAKBASE(r13)
209 LOAD_HANDLER(r10, slb_miss_realmode)
210 mtctr r10
211 bctr
212 #endif
213
214 /* We open code these as we can't have a ". = x" (even with
215 * x = "." within a feature section
216 */
217 . = 0x500;
218 .globl hardware_interrupt_pSeries;
219 .globl hardware_interrupt_hv;
220 hardware_interrupt_pSeries:
221 hardware_interrupt_hv:
222 BEGIN_FTR_SECTION
223 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
224 EXC_HV, SOFTEN_TEST_HV)
225 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
226 FTR_SECTION_ELSE
227 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
228 EXC_STD, SOFTEN_TEST_PR)
229 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
230 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
231
232 STD_EXCEPTION_PSERIES(0x600, alignment)
233 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
234
235 STD_EXCEPTION_PSERIES(0x700, program_check)
236 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
237
238 STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
239 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
240
241 . = 0x900
242 .globl decrementer_pSeries
243 decrementer_pSeries:
244 _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
245
246 STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
247
248 MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
249 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
250
251 STD_EXCEPTION_PSERIES(0xb00, trap_0b)
252 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
253
254 . = 0xc00
255 .globl system_call_pSeries
256 system_call_pSeries:
257 /*
258 * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
259 * that support it) before changing to HMT_MEDIUM. That allows the KVM
260 * code to save that value into the guest state (it is the guest's PPR
261 * value). Otherwise just change to HMT_MEDIUM as userspace has
262 * already saved the PPR.
263 */
264 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
265 SET_SCRATCH0(r13)
266 GET_PACA(r13)
267 std r9,PACA_EXGEN+EX_R9(r13)
268 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
269 HMT_MEDIUM;
270 std r10,PACA_EXGEN+EX_R10(r13)
271 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
272 mfcr r9
273 KVMTEST(0xc00)
274 GET_SCRATCH0(r13)
275 #else
276 HMT_MEDIUM;
277 #endif
278 SYSCALL_PSERIES_1
279 SYSCALL_PSERIES_2_RFID
280 SYSCALL_PSERIES_3
281 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
282
283 STD_EXCEPTION_PSERIES(0xd00, single_step)
284 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
285
286 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
287 * out of line to handle them
288 */
289 . = 0xe00
290 hv_data_storage_trampoline:
291 SET_SCRATCH0(r13)
292 EXCEPTION_PROLOG_0(PACA_EXGEN)
293 b h_data_storage_hv
294
295 . = 0xe20
296 hv_instr_storage_trampoline:
297 SET_SCRATCH0(r13)
298 EXCEPTION_PROLOG_0(PACA_EXGEN)
299 b h_instr_storage_hv
300
301 . = 0xe40
302 emulation_assist_trampoline:
303 SET_SCRATCH0(r13)
304 EXCEPTION_PROLOG_0(PACA_EXGEN)
305 b emulation_assist_hv
306
307 . = 0xe60
308 hv_exception_trampoline:
309 SET_SCRATCH0(r13)
310 EXCEPTION_PROLOG_0(PACA_EXGEN)
311 b hmi_exception_early
312
313 . = 0xe80
314 hv_doorbell_trampoline:
315 SET_SCRATCH0(r13)
316 EXCEPTION_PROLOG_0(PACA_EXGEN)
317 b h_doorbell_hv
318
319 . = 0xea0
320 hv_virt_irq_trampoline:
321 SET_SCRATCH0(r13)
322 EXCEPTION_PROLOG_0(PACA_EXGEN)
323 b h_virt_irq_hv
324
325 /* We need to deal with the Altivec unavailable exception
326 * here which is at 0xf20, thus in the middle of the
327 * prolog code of the PerformanceMonitor one. A little
328 * trickery is thus necessary
329 */
330 . = 0xf00
331 performance_monitor_pseries_trampoline:
332 SET_SCRATCH0(r13)
333 EXCEPTION_PROLOG_0(PACA_EXGEN)
334 b performance_monitor_pSeries
335
336 . = 0xf20
337 altivec_unavailable_pseries_trampoline:
338 SET_SCRATCH0(r13)
339 EXCEPTION_PROLOG_0(PACA_EXGEN)
340 b altivec_unavailable_pSeries
341
342 . = 0xf40
343 vsx_unavailable_pseries_trampoline:
344 SET_SCRATCH0(r13)
345 EXCEPTION_PROLOG_0(PACA_EXGEN)
346 b vsx_unavailable_pSeries
347
348 . = 0xf60
349 facility_unavailable_trampoline:
350 SET_SCRATCH0(r13)
351 EXCEPTION_PROLOG_0(PACA_EXGEN)
352 b facility_unavailable_pSeries
353
354 . = 0xf80
355 hv_facility_unavailable_trampoline:
356 SET_SCRATCH0(r13)
357 EXCEPTION_PROLOG_0(PACA_EXGEN)
358 b facility_unavailable_hv
359
360 #ifdef CONFIG_CBE_RAS
361 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
362 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
363 #endif /* CONFIG_CBE_RAS */
364
365 STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
366 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
367
368 . = 0x1500
369 .global denorm_exception_hv
370 denorm_exception_hv:
371 mtspr SPRN_SPRG_HSCRATCH0,r13
372 EXCEPTION_PROLOG_0(PACA_EXGEN)
373 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
374
375 #ifdef CONFIG_PPC_DENORMALISATION
376 mfspr r10,SPRN_HSRR1
377 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
378 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
379 addi r11,r11,-4 /* HSRR0 is next instruction */
380 bne+ denorm_assist
381 #endif
382
383 KVMTEST(0x1500)
384 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
385 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
386
387 #ifdef CONFIG_CBE_RAS
388 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
389 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
390 #endif /* CONFIG_CBE_RAS */
391
392 STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
393 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
394
395 #ifdef CONFIG_CBE_RAS
396 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
397 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
398 #else
399 . = 0x1800
400 #endif /* CONFIG_CBE_RAS */
401
402
403 /*** Out of line interrupts support ***/
404
405 .align 7
406 /* moved from 0x200 */
407 machine_check_powernv_early:
408 BEGIN_FTR_SECTION
409 EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
410 /*
411 * Register contents:
412 * R13 = PACA
413 * R9 = CR
414 * Original R9 to R13 is saved on PACA_EXMC
415 *
416 * Switch to mc_emergency stack and handle re-entrancy (we limit
417 * the nested MCE upto level 4 to avoid stack overflow).
418 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
419 *
420 * We use paca->in_mce to check whether this is the first entry or
421 * nested machine check. We increment paca->in_mce to track nested
422 * machine checks.
423 *
424 * If this is the first entry then set stack pointer to
425 * paca->mc_emergency_sp, otherwise r1 is already pointing to
426 * stack frame on mc_emergency stack.
427 *
428 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
429 * checkstop if we get another machine check exception before we do
430 * rfid with MSR_ME=1.
431 */
432 mr r11,r1 /* Save r1 */
433 lhz r10,PACA_IN_MCE(r13)
434 cmpwi r10,0 /* Are we in nested machine check */
435 bne 0f /* Yes, we are. */
436 /* First machine check entry */
437 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
438 0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
439 addi r10,r10,1 /* increment paca->in_mce */
440 sth r10,PACA_IN_MCE(r13)
441 /* Limit nested MCE to level 4 to avoid stack overflow */
442 cmpwi r10,4
443 bgt 2f /* Check if we hit limit of 4 */
444 std r11,GPR1(r1) /* Save r1 on the stack. */
445 std r11,0(r1) /* make stack chain pointer */
446 mfspr r11,SPRN_SRR0 /* Save SRR0 */
447 std r11,_NIP(r1)
448 mfspr r11,SPRN_SRR1 /* Save SRR1 */
449 std r11,_MSR(r1)
450 mfspr r11,SPRN_DAR /* Save DAR */
451 std r11,_DAR(r1)
452 mfspr r11,SPRN_DSISR /* Save DSISR */
453 std r11,_DSISR(r1)
454 std r9,_CCR(r1) /* Save CR in stackframe */
455 /* Save r9 through r13 from EXMC save area to stack frame. */
456 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
457 mfmsr r11 /* get MSR value */
458 ori r11,r11,MSR_ME /* turn on ME bit */
459 ori r11,r11,MSR_RI /* turn on RI bit */
460 ld r12,PACAKBASE(r13) /* get high part of &label */
461 LOAD_HANDLER(r12, machine_check_handle_early)
462 1: mtspr SPRN_SRR0,r12
463 mtspr SPRN_SRR1,r11
464 rfid
465 b . /* prevent speculative execution */
466 2:
467 /* Stack overflow. Stay on emergency stack and panic.
468 * Keep the ME bit off while panic-ing, so that if we hit
469 * another machine check we checkstop.
470 */
471 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
472 ld r11,PACAKMSR(r13)
473 ld r12,PACAKBASE(r13)
474 LOAD_HANDLER(r12, unrecover_mce)
475 li r10,MSR_ME
476 andc r11,r11,r10 /* Turn off MSR_ME */
477 b 1b
478 b . /* prevent speculative execution */
479 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
480
481 machine_check_pSeries:
482 .globl machine_check_fwnmi
483 machine_check_fwnmi:
484 SET_SCRATCH0(r13) /* save r13 */
485 EXCEPTION_PROLOG_0(PACA_EXMC)
486 machine_check_pSeries_0:
487 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
488 /*
489 * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
490 * difference that MSR_RI is not enabled, because PACA_EXMC is being
491 * used, so nested machine check corrupts it. machine_check_common
492 * enables MSR_RI.
493 */
494 ld r12,PACAKBASE(r13)
495 ld r10,PACAKMSR(r13)
496 xori r10,r10,MSR_RI
497 mfspr r11,SPRN_SRR0
498 LOAD_HANDLER(r12, machine_check_common)
499 mtspr SPRN_SRR0,r12
500 mfspr r12,SPRN_SRR1
501 mtspr SPRN_SRR1,r10
502 rfid
503 b . /* prevent speculative execution */
504
505 KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
506 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
507 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
508 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
509 KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
510 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
511 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
512
513 #ifdef CONFIG_PPC_DENORMALISATION
514 denorm_assist:
515 BEGIN_FTR_SECTION
516 /*
517 * To denormalise we need to move a copy of the register to itself.
518 * For POWER6 do that here for all FP regs.
519 */
520 mfmsr r10
521 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
522 xori r10,r10,(MSR_FE0|MSR_FE1)
523 mtmsrd r10
524 sync
525
526 #define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
527 #define FMR4(n) FMR2(n) ; FMR2(n+2)
528 #define FMR8(n) FMR4(n) ; FMR4(n+4)
529 #define FMR16(n) FMR8(n) ; FMR8(n+8)
530 #define FMR32(n) FMR16(n) ; FMR16(n+16)
531 FMR32(0)
532
533 FTR_SECTION_ELSE
534 /*
535 * To denormalise we need to move a copy of the register to itself.
536 * For POWER7 do that here for the first 32 VSX registers only.
537 */
538 mfmsr r10
539 oris r10,r10,MSR_VSX@h
540 mtmsrd r10
541 sync
542
543 #define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
544 #define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
545 #define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
546 #define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
547 #define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
548 XVCPSGNDP32(0)
549
550 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
551
552 BEGIN_FTR_SECTION
553 b denorm_done
554 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
555 /*
556 * To denormalise we need to move a copy of the register to itself.
557 * For POWER8 we need to do that for all 64 VSX registers
558 */
559 XVCPSGNDP32(32)
560 denorm_done:
561 mtspr SPRN_HSRR0,r11
562 mtcrf 0x80,r9
563 ld r9,PACA_EXGEN+EX_R9(r13)
564 RESTORE_PPR_PACA(PACA_EXGEN, r10)
565 BEGIN_FTR_SECTION
566 ld r10,PACA_EXGEN+EX_CFAR(r13)
567 mtspr SPRN_CFAR,r10
568 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
569 ld r10,PACA_EXGEN+EX_R10(r13)
570 ld r11,PACA_EXGEN+EX_R11(r13)
571 ld r12,PACA_EXGEN+EX_R12(r13)
572 ld r13,PACA_EXGEN+EX_R13(r13)
573 HRFID
574 b .
575 #endif
576
577 .align 7
578 /* moved from 0xe00 */
579 STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
580 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
581 STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
582 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
583 STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
584 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
585 MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
586 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
587
588 MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
589 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
590
591 MASKABLE_EXCEPTION_HV_OOL(0xea2, h_virt_irq)
592 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xea2)
593
594 /* moved from 0xf00 */
595 STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
596 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
597 STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
598 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
599 STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
600 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
601 STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
602 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
603 STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
604 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
605
606 /*
607 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
608 * - If it was a decrementer interrupt, we bump the dec to max and and return.
609 * - If it was a doorbell we return immediately since doorbells are edge
610 * triggered and won't automatically refire.
611 * - If it was a HMI we return immediately since we handled it in realmode
612 * and it won't refire.
613 * - else we hard disable and return.
614 * This is called with r10 containing the value to OR to the paca field.
615 */
616 #define MASKED_INTERRUPT(_H) \
617 masked_##_H##interrupt: \
618 std r11,PACA_EXGEN+EX_R11(r13); \
619 lbz r11,PACAIRQHAPPENED(r13); \
620 or r11,r11,r10; \
621 stb r11,PACAIRQHAPPENED(r13); \
622 cmpwi r10,PACA_IRQ_DEC; \
623 bne 1f; \
624 lis r10,0x7fff; \
625 ori r10,r10,0xffff; \
626 mtspr SPRN_DEC,r10; \
627 b 2f; \
628 1: cmpwi r10,PACA_IRQ_DBELL; \
629 beq 2f; \
630 cmpwi r10,PACA_IRQ_HMI; \
631 beq 2f; \
632 mfspr r10,SPRN_##_H##SRR1; \
633 rldicl r10,r10,48,1; /* clear MSR_EE */ \
634 rotldi r10,r10,16; \
635 mtspr SPRN_##_H##SRR1,r10; \
636 2: mtcrf 0x80,r9; \
637 ld r9,PACA_EXGEN+EX_R9(r13); \
638 ld r10,PACA_EXGEN+EX_R10(r13); \
639 ld r11,PACA_EXGEN+EX_R11(r13); \
640 GET_SCRATCH0(r13); \
641 ##_H##rfid; \
642 b .
643
644 MASKED_INTERRUPT()
645 MASKED_INTERRUPT(H)
646
647 /*
648 * Called from arch_local_irq_enable when an interrupt needs
649 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
650 * which kind of interrupt. MSR:EE is already off. We generate a
651 * stackframe like if a real interrupt had happened.
652 *
653 * Note: While MSR:EE is off, we need to make sure that _MSR
654 * in the generated frame has EE set to 1 or the exception
655 * handler will not properly re-enable them.
656 */
657 _GLOBAL(__replay_interrupt)
658 /* We are going to jump to the exception common code which
659 * will retrieve various register values from the PACA which
660 * we don't give a damn about, so we don't bother storing them.
661 */
662 mfmsr r12
663 mflr r11
664 mfcr r9
665 ori r12,r12,MSR_EE
666 cmpwi r3,0x900
667 beq decrementer_common
668 cmpwi r3,0x500
669 beq hardware_interrupt_common
670 BEGIN_FTR_SECTION
671 cmpwi r3,0xe80
672 beq h_doorbell_common
673 cmpwi r3,0xea0
674 beq h_virt_irq_common
675 cmpwi r3,0xe60
676 beq hmi_exception_common
677 FTR_SECTION_ELSE
678 cmpwi r3,0xa00
679 beq doorbell_super_common
680 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
681 blr
682
683 #ifdef CONFIG_PPC_PSERIES
684 /*
685 * Vectors for the FWNMI option. Share common code.
686 */
687 .globl system_reset_fwnmi
688 .align 7
689 system_reset_fwnmi:
690 SET_SCRATCH0(r13) /* save r13 */
691 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
692 NOTEST, 0x100)
693
694 #endif /* CONFIG_PPC_PSERIES */
695
696 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
697 kvmppc_skip_interrupt:
698 /*
699 * Here all GPRs are unchanged from when the interrupt happened
700 * except for r13, which is saved in SPRG_SCRATCH0.
701 */
702 mfspr r13, SPRN_SRR0
703 addi r13, r13, 4
704 mtspr SPRN_SRR0, r13
705 GET_SCRATCH0(r13)
706 rfid
707 b .
708
709 kvmppc_skip_Hinterrupt:
710 /*
711 * Here all GPRs are unchanged from when the interrupt happened
712 * except for r13, which is saved in SPRG_SCRATCH0.
713 */
714 mfspr r13, SPRN_HSRR0
715 addi r13, r13, 4
716 mtspr SPRN_HSRR0, r13
717 GET_SCRATCH0(r13)
718 hrfid
719 b .
720 #endif
721
722 /*
723 * Ensure that any handlers that get invoked from the exception prologs
724 * above are below the first 64KB (0x10000) of the kernel image because
725 * the prologs assemble the addresses of these handlers using the
726 * LOAD_HANDLER macro, which uses an ori instruction.
727 */
728
729 /*** Common interrupt handlers ***/
730
731 STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
732
733 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
734 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
735 STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
736 #ifdef CONFIG_PPC_DOORBELL
737 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
738 #else
739 STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
740 #endif
741 STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
742 STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
743 STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
744 STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
745 STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception)
746 #ifdef CONFIG_PPC_DOORBELL
747 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
748 #else
749 STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
750 #endif
751 STD_EXCEPTION_COMMON_ASYNC(0xea0, h_virt_irq, do_IRQ)
752 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
753 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
754 STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
755 #ifdef CONFIG_ALTIVEC
756 STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
757 #else
758 STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
759 #endif
760
761 /*
762 * Relocation-on interrupts: A subset of the interrupts can be delivered
763 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
764 * it. Addresses are the same as the original interrupt addresses, but
765 * offset by 0xc000000000004000.
766 * It's impossible to receive interrupts below 0x300 via this mechanism.
767 * KVM: None of these traps are from the guest ; anything that escalated
768 * to HV=1 from HV=0 is delivered via real mode handlers.
769 */
770
771 /*
772 * This uses the standard macro, since the original 0x300 vector
773 * only has extra guff for STAB-based processors -- which never
774 * come here.
775 */
776 STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
777 . = 0x4380
778 .globl data_access_slb_relon_pSeries
779 data_access_slb_relon_pSeries:
780 SET_SCRATCH0(r13)
781 EXCEPTION_PROLOG_0(PACA_EXSLB)
782 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
783 std r3,PACA_EXSLB+EX_R3(r13)
784 mfspr r3,SPRN_DAR
785 mfspr r12,SPRN_SRR1
786 #ifndef CONFIG_RELOCATABLE
787 b slb_miss_realmode
788 #else
789 /*
790 * We can't just use a direct branch to slb_miss_realmode
791 * because the distance from here to there depends on where
792 * the kernel ends up being put.
793 */
794 mfctr r11
795 ld r10,PACAKBASE(r13)
796 LOAD_HANDLER(r10, slb_miss_realmode)
797 mtctr r10
798 bctr
799 #endif
800
801 STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
802 . = 0x4480
803 .globl instruction_access_slb_relon_pSeries
804 instruction_access_slb_relon_pSeries:
805 SET_SCRATCH0(r13)
806 EXCEPTION_PROLOG_0(PACA_EXSLB)
807 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
808 std r3,PACA_EXSLB+EX_R3(r13)
809 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
810 mfspr r12,SPRN_SRR1
811 #ifndef CONFIG_RELOCATABLE
812 b slb_miss_realmode
813 #else
814 mfctr r11
815 ld r10,PACAKBASE(r13)
816 LOAD_HANDLER(r10, slb_miss_realmode)
817 mtctr r10
818 bctr
819 #endif
820
821 . = 0x4500
822 .globl hardware_interrupt_relon_pSeries;
823 .globl hardware_interrupt_relon_hv;
824 hardware_interrupt_relon_pSeries:
825 hardware_interrupt_relon_hv:
826 BEGIN_FTR_SECTION
827 _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
828 FTR_SECTION_ELSE
829 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
830 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
831 STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
832 STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
833 STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
834 MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
835 STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
836 MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
837 STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
838
839 . = 0x4c00
840 .globl system_call_relon_pSeries
841 system_call_relon_pSeries:
842 HMT_MEDIUM
843 SYSCALL_PSERIES_1
844 SYSCALL_PSERIES_2_DIRECT
845 SYSCALL_PSERIES_3
846
847 STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
848
849 . = 0x4e00
850 b . /* Can't happen, see v2.07 Book III-S section 6.5 */
851
852 . = 0x4e20
853 b . /* Can't happen, see v2.07 Book III-S section 6.5 */
854
855 . = 0x4e40
856 emulation_assist_relon_trampoline:
857 SET_SCRATCH0(r13)
858 EXCEPTION_PROLOG_0(PACA_EXGEN)
859 b emulation_assist_relon_hv
860
861 . = 0x4e60
862 b . /* Can't happen, see v2.07 Book III-S section 6.5 */
863
864 . = 0x4e80
865 h_doorbell_relon_trampoline:
866 SET_SCRATCH0(r13)
867 EXCEPTION_PROLOG_0(PACA_EXGEN)
868 b h_doorbell_relon_hv
869
870 . = 0x4ea0
871 h_virt_irq_relon_trampoline:
872 SET_SCRATCH0(r13)
873 EXCEPTION_PROLOG_0(PACA_EXGEN)
874 b h_virt_irq_relon_hv
875
876 . = 0x4f00
877 performance_monitor_relon_pseries_trampoline:
878 SET_SCRATCH0(r13)
879 EXCEPTION_PROLOG_0(PACA_EXGEN)
880 b performance_monitor_relon_pSeries
881
882 . = 0x4f20
883 altivec_unavailable_relon_pseries_trampoline:
884 SET_SCRATCH0(r13)
885 EXCEPTION_PROLOG_0(PACA_EXGEN)
886 b altivec_unavailable_relon_pSeries
887
888 . = 0x4f40
889 vsx_unavailable_relon_pseries_trampoline:
890 SET_SCRATCH0(r13)
891 EXCEPTION_PROLOG_0(PACA_EXGEN)
892 b vsx_unavailable_relon_pSeries
893
894 . = 0x4f60
895 facility_unavailable_relon_trampoline:
896 SET_SCRATCH0(r13)
897 EXCEPTION_PROLOG_0(PACA_EXGEN)
898 b facility_unavailable_relon_pSeries
899
900 . = 0x4f80
901 hv_facility_unavailable_relon_trampoline:
902 SET_SCRATCH0(r13)
903 EXCEPTION_PROLOG_0(PACA_EXGEN)
904 b hv_facility_unavailable_relon_hv
905
906 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
907 #ifdef CONFIG_PPC_DENORMALISATION
908 . = 0x5500
909 b denorm_exception_hv
910 #endif
911 STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
912
913 .align 7
914 system_call_entry:
915 b system_call_common
916
917 ppc64_runlatch_on_trampoline:
918 b __ppc64_runlatch_on
919
920 /*
921 * Here r13 points to the paca, r9 contains the saved CR,
922 * SRR0 and SRR1 are saved in r11 and r12,
923 * r9 - r13 are saved in paca->exgen.
924 */
925 .align 7
926 .globl data_access_common
927 data_access_common:
928 mfspr r10,SPRN_DAR
929 std r10,PACA_EXGEN+EX_DAR(r13)
930 mfspr r10,SPRN_DSISR
931 stw r10,PACA_EXGEN+EX_DSISR(r13)
932 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
933 RECONCILE_IRQ_STATE(r10, r11)
934 ld r12,_MSR(r1)
935 ld r3,PACA_EXGEN+EX_DAR(r13)
936 lwz r4,PACA_EXGEN+EX_DSISR(r13)
937 li r5,0x300
938 std r3,_DAR(r1)
939 std r4,_DSISR(r1)
940 BEGIN_MMU_FTR_SECTION
941 b do_hash_page /* Try to handle as hpte fault */
942 MMU_FTR_SECTION_ELSE
943 b handle_page_fault
944 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
945
946 .align 7
947 .globl h_data_storage_common
948 h_data_storage_common:
949 mfspr r10,SPRN_HDAR
950 std r10,PACA_EXGEN+EX_DAR(r13)
951 mfspr r10,SPRN_HDSISR
952 stw r10,PACA_EXGEN+EX_DSISR(r13)
953 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
954 bl save_nvgprs
955 RECONCILE_IRQ_STATE(r10, r11)
956 addi r3,r1,STACK_FRAME_OVERHEAD
957 bl unknown_exception
958 b ret_from_except
959
960 .align 7
961 .globl instruction_access_common
962 instruction_access_common:
963 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
964 RECONCILE_IRQ_STATE(r10, r11)
965 ld r12,_MSR(r1)
966 ld r3,_NIP(r1)
967 andis. r4,r12,0x5820
968 li r5,0x400
969 std r3,_DAR(r1)
970 std r4,_DSISR(r1)
971 BEGIN_MMU_FTR_SECTION
972 b do_hash_page /* Try to handle as hpte fault */
973 MMU_FTR_SECTION_ELSE
974 b handle_page_fault
975 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
976
977 STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
978
979 /*
980 * Machine check is different because we use a different
981 * save area: PACA_EXMC instead of PACA_EXGEN.
982 */
983 .align 7
984 .globl machine_check_common
985 machine_check_common:
986
987 mfspr r10,SPRN_DAR
988 std r10,PACA_EXMC+EX_DAR(r13)
989 mfspr r10,SPRN_DSISR
990 stw r10,PACA_EXMC+EX_DSISR(r13)
991 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
992 FINISH_NAP
993 RECONCILE_IRQ_STATE(r10, r11)
994 ld r3,PACA_EXMC+EX_DAR(r13)
995 lwz r4,PACA_EXMC+EX_DSISR(r13)
996 /* Enable MSR_RI when finished with PACA_EXMC */
997 li r10,MSR_RI
998 mtmsrd r10,1
999 std r3,_DAR(r1)
1000 std r4,_DSISR(r1)
1001 bl save_nvgprs
1002 addi r3,r1,STACK_FRAME_OVERHEAD
1003 bl machine_check_exception
1004 b ret_from_except
1005
1006 .align 7
1007 .globl alignment_common
1008 alignment_common:
1009 mfspr r10,SPRN_DAR
1010 std r10,PACA_EXGEN+EX_DAR(r13)
1011 mfspr r10,SPRN_DSISR
1012 stw r10,PACA_EXGEN+EX_DSISR(r13)
1013 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1014 ld r3,PACA_EXGEN+EX_DAR(r13)
1015 lwz r4,PACA_EXGEN+EX_DSISR(r13)
1016 std r3,_DAR(r1)
1017 std r4,_DSISR(r1)
1018 bl save_nvgprs
1019 RECONCILE_IRQ_STATE(r10, r11)
1020 addi r3,r1,STACK_FRAME_OVERHEAD
1021 bl alignment_exception
1022 b ret_from_except
1023
1024 .align 7
1025 .globl program_check_common
1026 program_check_common:
1027 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1028 bl save_nvgprs
1029 RECONCILE_IRQ_STATE(r10, r11)
1030 addi r3,r1,STACK_FRAME_OVERHEAD
1031 bl program_check_exception
1032 b ret_from_except
1033
1034 .align 7
1035 .globl fp_unavailable_common
1036 fp_unavailable_common:
1037 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1038 bne 1f /* if from user, just load it up */
1039 bl save_nvgprs
1040 RECONCILE_IRQ_STATE(r10, r11)
1041 addi r3,r1,STACK_FRAME_OVERHEAD
1042 bl kernel_fp_unavailable_exception
1043 BUG_OPCODE
1044 1:
1045 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1046 BEGIN_FTR_SECTION
1047 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1048 * transaction), go do TM stuff
1049 */
1050 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1051 bne- 2f
1052 END_FTR_SECTION_IFSET(CPU_FTR_TM)
1053 #endif
1054 bl load_up_fpu
1055 b fast_exception_return
1056 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1057 2: /* User process was in a transaction */
1058 bl save_nvgprs
1059 RECONCILE_IRQ_STATE(r10, r11)
1060 addi r3,r1,STACK_FRAME_OVERHEAD
1061 bl fp_unavailable_tm
1062 b ret_from_except
1063 #endif
1064 .align 7
1065 .globl altivec_unavailable_common
1066 altivec_unavailable_common:
1067 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1068 #ifdef CONFIG_ALTIVEC
1069 BEGIN_FTR_SECTION
1070 beq 1f
1071 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1072 BEGIN_FTR_SECTION_NESTED(69)
1073 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1074 * transaction), go do TM stuff
1075 */
1076 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1077 bne- 2f
1078 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1079 #endif
1080 bl load_up_altivec
1081 b fast_exception_return
1082 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1083 2: /* User process was in a transaction */
1084 bl save_nvgprs
1085 RECONCILE_IRQ_STATE(r10, r11)
1086 addi r3,r1,STACK_FRAME_OVERHEAD
1087 bl altivec_unavailable_tm
1088 b ret_from_except
1089 #endif
1090 1:
1091 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1092 #endif
1093 bl save_nvgprs
1094 RECONCILE_IRQ_STATE(r10, r11)
1095 addi r3,r1,STACK_FRAME_OVERHEAD
1096 bl altivec_unavailable_exception
1097 b ret_from_except
1098
1099 .align 7
1100 .globl vsx_unavailable_common
1101 vsx_unavailable_common:
1102 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1103 #ifdef CONFIG_VSX
1104 BEGIN_FTR_SECTION
1105 beq 1f
1106 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1107 BEGIN_FTR_SECTION_NESTED(69)
1108 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1109 * transaction), go do TM stuff
1110 */
1111 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1112 bne- 2f
1113 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1114 #endif
1115 b load_up_vsx
1116 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1117 2: /* User process was in a transaction */
1118 bl save_nvgprs
1119 RECONCILE_IRQ_STATE(r10, r11)
1120 addi r3,r1,STACK_FRAME_OVERHEAD
1121 bl vsx_unavailable_tm
1122 b ret_from_except
1123 #endif
1124 1:
1125 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1126 #endif
1127 bl save_nvgprs
1128 RECONCILE_IRQ_STATE(r10, r11)
1129 addi r3,r1,STACK_FRAME_OVERHEAD
1130 bl vsx_unavailable_exception
1131 b ret_from_except
1132
1133 /* Equivalents to the above handlers for relocation-on interrupt vectors */
1134 STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1135 MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1136 MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq)
1137
1138 STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1139 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1140 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1141 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1142 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1143
1144 /*
1145 * The __end_interrupts marker must be past the out-of-line (OOL)
1146 * handlers, so that they are copied to real address 0x100 when running
1147 * a relocatable kernel. This ensures they can be reached from the short
1148 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
1149 * directly, without using LOAD_HANDLER().
1150 */
1151 .align 7
1152 .globl __end_interrupts
1153 __end_interrupts:
1154
1155 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1156 /*
1157 * Data area reserved for FWNMI option.
1158 * This address (0x7000) is fixed by the RPA.
1159 */
1160 .= 0x7000
1161 .globl fwnmi_data_area
1162 fwnmi_data_area:
1163
1164 /* pseries and powernv need to keep the whole page from
1165 * 0x7000 to 0x8000 free for use by the firmware
1166 */
1167 . = 0x8000
1168 #endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1169
1170 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
1171 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
1172
1173 #ifdef CONFIG_CBE_RAS
1174 STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
1175 STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
1176 STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
1177 #endif /* CONFIG_CBE_RAS */
1178
1179 .globl hmi_exception_early
1180 hmi_exception_early:
1181 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, 0xe62)
1182 mr r10,r1 /* Save r1 */
1183 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
1184 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1185 std r9,_CCR(r1) /* save CR in stackframe */
1186 mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
1187 std r11,_NIP(r1) /* save HSRR0 in stackframe */
1188 mfspr r12,SPRN_HSRR1 /* Save SRR1 */
1189 std r12,_MSR(r1) /* save SRR1 in stackframe */
1190 std r10,0(r1) /* make stack chain pointer */
1191 std r0,GPR0(r1) /* save r0 in stackframe */
1192 std r10,GPR1(r1) /* save r1 in stackframe */
1193 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1194 EXCEPTION_PROLOG_COMMON_3(0xe60)
1195 addi r3,r1,STACK_FRAME_OVERHEAD
1196 bl hmi_exception_realmode
1197 /* Windup the stack. */
1198 /* Move original HSRR0 and HSRR1 into the respective regs */
1199 ld r9,_MSR(r1)
1200 mtspr SPRN_HSRR1,r9
1201 ld r3,_NIP(r1)
1202 mtspr SPRN_HSRR0,r3
1203 ld r9,_CTR(r1)
1204 mtctr r9
1205 ld r9,_XER(r1)
1206 mtxer r9
1207 ld r9,_LINK(r1)
1208 mtlr r9
1209 REST_GPR(0, r1)
1210 REST_8GPRS(2, r1)
1211 REST_GPR(10, r1)
1212 ld r11,_CCR(r1)
1213 mtcr r11
1214 REST_GPR(11, r1)
1215 REST_2GPRS(12, r1)
1216 /* restore original r1. */
1217 ld r1,GPR1(r1)
1218
1219 /*
1220 * Go to virtual mode and pull the HMI event information from
1221 * firmware.
1222 */
1223 .globl hmi_exception_after_realmode
1224 hmi_exception_after_realmode:
1225 SET_SCRATCH0(r13)
1226 EXCEPTION_PROLOG_0(PACA_EXGEN)
1227 b hmi_exception_hv
1228
1229
1230 #define MACHINE_CHECK_HANDLER_WINDUP \
1231 /* Clear MSR_RI before setting SRR0 and SRR1. */\
1232 li r0,MSR_RI; \
1233 mfmsr r9; /* get MSR value */ \
1234 andc r9,r9,r0; \
1235 mtmsrd r9,1; /* Clear MSR_RI */ \
1236 /* Move original SRR0 and SRR1 into the respective regs */ \
1237 ld r9,_MSR(r1); \
1238 mtspr SPRN_SRR1,r9; \
1239 ld r3,_NIP(r1); \
1240 mtspr SPRN_SRR0,r3; \
1241 ld r9,_CTR(r1); \
1242 mtctr r9; \
1243 ld r9,_XER(r1); \
1244 mtxer r9; \
1245 ld r9,_LINK(r1); \
1246 mtlr r9; \
1247 REST_GPR(0, r1); \
1248 REST_8GPRS(2, r1); \
1249 REST_GPR(10, r1); \
1250 ld r11,_CCR(r1); \
1251 mtcr r11; \
1252 /* Decrement paca->in_mce. */ \
1253 lhz r12,PACA_IN_MCE(r13); \
1254 subi r12,r12,1; \
1255 sth r12,PACA_IN_MCE(r13); \
1256 REST_GPR(11, r1); \
1257 REST_2GPRS(12, r1); \
1258 /* restore original r1. */ \
1259 ld r1,GPR1(r1)
1260
1261 /*
1262 * Handle machine check early in real mode. We come here with
1263 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1264 */
1265 .align 7
1266 .globl machine_check_handle_early
1267 machine_check_handle_early:
1268 std r0,GPR0(r1) /* Save r0 */
1269 EXCEPTION_PROLOG_COMMON_3(0x200)
1270 bl save_nvgprs
1271 addi r3,r1,STACK_FRAME_OVERHEAD
1272 bl machine_check_early
1273 std r3,RESULT(r1) /* Save result */
1274 ld r12,_MSR(r1)
1275 #ifdef CONFIG_PPC_P7_NAP
1276 /*
1277 * Check if thread was in power saving mode. We come here when any
1278 * of the following is true:
1279 * a. thread wasn't in power saving mode
1280 * b. thread was in power saving mode with no state loss,
1281 * supervisor state loss or hypervisor state loss.
1282 *
1283 * Go back to nap/sleep/winkle mode again if (b) is true.
1284 */
1285 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
1286 beq 4f /* No, it wasn;t */
1287 /* Thread was in power saving mode. Go back to nap again. */
1288 cmpwi r11,2
1289 blt 3f
1290 /* Supervisor/Hypervisor state loss */
1291 li r0,1
1292 stb r0,PACA_NAPSTATELOST(r13)
1293 3: bl machine_check_queue_event
1294 MACHINE_CHECK_HANDLER_WINDUP
1295 GET_PACA(r13)
1296 ld r1,PACAR1(r13)
1297 /*
1298 * Check what idle state this CPU was in and go back to same mode
1299 * again.
1300 */
1301 lbz r3,PACA_THREAD_IDLE_STATE(r13)
1302 cmpwi r3,PNV_THREAD_NAP
1303 bgt 10f
1304 IDLE_STATE_ENTER_SEQ(PPC_NAP)
1305 /* No return */
1306 10:
1307 cmpwi r3,PNV_THREAD_SLEEP
1308 bgt 2f
1309 IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
1310 /* No return */
1311
1312 2:
1313 /*
1314 * Go back to winkle. Please note that this thread was woken up in
1315 * machine check from winkle and have not restored the per-subcore
1316 * state. Hence before going back to winkle, set last bit of HSPGR0
1317 * to 1. This will make sure that if this thread gets woken up
1318 * again at reset vector 0x100 then it will get chance to restore
1319 * the subcore state.
1320 */
1321 ori r13,r13,1
1322 SET_PACA(r13)
1323 IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
1324 /* No return */
1325 4:
1326 #endif
1327 /*
1328 * Check if we are coming from hypervisor userspace. If yes then we
1329 * continue in host kernel in V mode to deliver the MC event.
1330 */
1331 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
1332 beq 5f
1333 andi. r11,r12,MSR_PR /* See if coming from user. */
1334 bne 9f /* continue in V mode if we are. */
1335
1336 5:
1337 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1338 /*
1339 * We are coming from kernel context. Check if we are coming from
1340 * guest. if yes, then we can continue. We will fall through
1341 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1342 */
1343 lbz r11,HSTATE_IN_GUEST(r13)
1344 cmpwi r11,0 /* Check if coming from guest */
1345 bne 9f /* continue if we are. */
1346 #endif
1347 /*
1348 * At this point we are not sure about what context we come from.
1349 * Queue up the MCE event and return from the interrupt.
1350 * But before that, check if this is an un-recoverable exception.
1351 * If yes, then stay on emergency stack and panic.
1352 */
1353 andi. r11,r12,MSR_RI
1354 bne 2f
1355 1: mfspr r11,SPRN_SRR0
1356 ld r10,PACAKBASE(r13)
1357 LOAD_HANDLER(r10,unrecover_mce)
1358 mtspr SPRN_SRR0,r10
1359 ld r10,PACAKMSR(r13)
1360 /*
1361 * We are going down. But there are chances that we might get hit by
1362 * another MCE during panic path and we may run into unstable state
1363 * with no way out. Hence, turn ME bit off while going down, so that
1364 * when another MCE is hit during panic path, system will checkstop
1365 * and hypervisor will get restarted cleanly by SP.
1366 */
1367 li r3,MSR_ME
1368 andc r10,r10,r3 /* Turn off MSR_ME */
1369 mtspr SPRN_SRR1,r10
1370 rfid
1371 b .
1372 2:
1373 /*
1374 * Check if we have successfully handled/recovered from error, if not
1375 * then stay on emergency stack and panic.
1376 */
1377 ld r3,RESULT(r1) /* Load result */
1378 cmpdi r3,0 /* see if we handled MCE successfully */
1379
1380 beq 1b /* if !handled then panic */
1381 /*
1382 * Return from MC interrupt.
1383 * Queue up the MCE event so that we can log it later, while
1384 * returning from kernel or opal call.
1385 */
1386 bl machine_check_queue_event
1387 MACHINE_CHECK_HANDLER_WINDUP
1388 rfid
1389 9:
1390 /* Deliver the machine check to host kernel in V mode. */
1391 MACHINE_CHECK_HANDLER_WINDUP
1392 b machine_check_pSeries
1393
1394 unrecover_mce:
1395 /* Invoke machine_check_exception to print MCE event and panic. */
1396 addi r3,r1,STACK_FRAME_OVERHEAD
1397 bl machine_check_exception
1398 /*
1399 * We will not reach here. Even if we did, there is no way out. Call
1400 * unrecoverable_exception and die.
1401 */
1402 1: addi r3,r1,STACK_FRAME_OVERHEAD
1403 bl unrecoverable_exception
1404 b 1b
1405 /*
1406 * r13 points to the PACA, r9 contains the saved CR,
1407 * r12 contain the saved SRR1, SRR0 is still ready for return
1408 * r3 has the faulting address
1409 * r9 - r13 are saved in paca->exslb.
1410 * r3 is saved in paca->slb_r3
1411 * We assume we aren't going to take any exceptions during this procedure.
1412 */
1413 slb_miss_realmode:
1414 mflr r10
1415 #ifdef CONFIG_RELOCATABLE
1416 mtctr r11
1417 #endif
1418
1419 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1420 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
1421
1422 #ifdef CONFIG_PPC_STD_MMU_64
1423 BEGIN_MMU_FTR_SECTION
1424 bl slb_allocate_realmode
1425 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
1426 #endif
1427 /* All done -- return from exception. */
1428
1429 ld r10,PACA_EXSLB+EX_LR(r13)
1430 ld r3,PACA_EXSLB+EX_R3(r13)
1431 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1432
1433 mtlr r10
1434 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
1435 BEGIN_MMU_FTR_SECTION
1436 beq- 2f
1437 FTR_SECTION_ELSE
1438 b 2f
1439 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1440
1441 .machine push
1442 .machine "power4"
1443 mtcrf 0x80,r9
1444 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
1445 .machine pop
1446
1447 RESTORE_PPR_PACA(PACA_EXSLB, r9)
1448 ld r9,PACA_EXSLB+EX_R9(r13)
1449 ld r10,PACA_EXSLB+EX_R10(r13)
1450 ld r11,PACA_EXSLB+EX_R11(r13)
1451 ld r12,PACA_EXSLB+EX_R12(r13)
1452 ld r13,PACA_EXSLB+EX_R13(r13)
1453 rfid
1454 b . /* prevent speculative execution */
1455
1456 2: mfspr r11,SPRN_SRR0
1457 ld r10,PACAKBASE(r13)
1458 LOAD_HANDLER(r10,unrecov_slb)
1459 mtspr SPRN_SRR0,r10
1460 ld r10,PACAKMSR(r13)
1461 mtspr SPRN_SRR1,r10
1462 rfid
1463 b .
1464
1465 unrecov_slb:
1466 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1467 RECONCILE_IRQ_STATE(r10, r11)
1468 bl save_nvgprs
1469 1: addi r3,r1,STACK_FRAME_OVERHEAD
1470 bl unrecoverable_exception
1471 b 1b
1472
1473
1474 #ifdef CONFIG_PPC_970_NAP
1475 power4_fixup_nap:
1476 andc r9,r9,r10
1477 std r9,TI_LOCAL_FLAGS(r11)
1478 ld r10,_LINK(r1) /* make idle task do the */
1479 std r10,_NIP(r1) /* equivalent of a blr */
1480 blr
1481 #endif
1482
1483 /*
1484 * Hash table stuff
1485 */
1486 .align 7
1487 do_hash_page:
1488 #ifdef CONFIG_PPC_STD_MMU_64
1489 andis. r0,r4,0xa410 /* weird error? */
1490 bne- handle_page_fault /* if not, try to insert a HPTE */
1491 andis. r0,r4,DSISR_DABRMATCH@h
1492 bne- handle_dabr_fault
1493 CURRENT_THREAD_INFO(r11, r1)
1494 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1495 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1496 bne 77f /* then don't call hash_page now */
1497
1498 /*
1499 * r3 contains the faulting address
1500 * r4 msr
1501 * r5 contains the trap number
1502 * r6 contains dsisr
1503 *
1504 * at return r3 = 0 for success, 1 for page fault, negative for error
1505 */
1506 mr r4,r12
1507 ld r6,_DSISR(r1)
1508 bl __hash_page /* build HPTE if possible */
1509 cmpdi r3,0 /* see if __hash_page succeeded */
1510
1511 /* Success */
1512 beq fast_exc_return_irq /* Return from exception on success */
1513
1514 /* Error */
1515 blt- 13f
1516 #endif /* CONFIG_PPC_STD_MMU_64 */
1517
1518 /* Here we have a page fault that hash_page can't handle. */
1519 handle_page_fault:
1520 11: ld r4,_DAR(r1)
1521 ld r5,_DSISR(r1)
1522 addi r3,r1,STACK_FRAME_OVERHEAD
1523 bl do_page_fault
1524 cmpdi r3,0
1525 beq+ 12f
1526 bl save_nvgprs
1527 mr r5,r3
1528 addi r3,r1,STACK_FRAME_OVERHEAD
1529 lwz r4,_DAR(r1)
1530 bl bad_page_fault
1531 b ret_from_except
1532
1533 /* We have a data breakpoint exception - handle it */
1534 handle_dabr_fault:
1535 bl save_nvgprs
1536 ld r4,_DAR(r1)
1537 ld r5,_DSISR(r1)
1538 addi r3,r1,STACK_FRAME_OVERHEAD
1539 bl do_break
1540 12: b ret_from_except_lite
1541
1542
1543 #ifdef CONFIG_PPC_STD_MMU_64
1544 /* We have a page fault that hash_page could handle but HV refused
1545 * the PTE insertion
1546 */
1547 13: bl save_nvgprs
1548 mr r5,r3
1549 addi r3,r1,STACK_FRAME_OVERHEAD
1550 ld r4,_DAR(r1)
1551 bl low_hash_fault
1552 b ret_from_except
1553 #endif
1554
1555 /*
1556 * We come here as a result of a DSI at a point where we don't want
1557 * to call hash_page, such as when we are accessing memory (possibly
1558 * user memory) inside a PMU interrupt that occurred while interrupts
1559 * were soft-disabled. We want to invoke the exception handler for
1560 * the access, or panic if there isn't a handler.
1561 */
1562 77: bl save_nvgprs
1563 mr r4,r3
1564 addi r3,r1,STACK_FRAME_OVERHEAD
1565 li r5,SIGSEGV
1566 bl bad_page_fault
1567 b ret_from_except
1568
1569 /*
1570 * Here we have detected that the kernel stack pointer is bad.
1571 * R9 contains the saved CR, r13 points to the paca,
1572 * r10 contains the (bad) kernel stack pointer,
1573 * r11 and r12 contain the saved SRR0 and SRR1.
1574 * We switch to using an emergency stack, save the registers there,
1575 * and call kernel_bad_stack(), which panics.
1576 */
1577 bad_stack:
1578 ld r1,PACAEMERGSP(r13)
1579 subi r1,r1,64+INT_FRAME_SIZE
1580 std r9,_CCR(r1)
1581 std r10,GPR1(r1)
1582 std r11,_NIP(r1)
1583 std r12,_MSR(r1)
1584 mfspr r11,SPRN_DAR
1585 mfspr r12,SPRN_DSISR
1586 std r11,_DAR(r1)
1587 std r12,_DSISR(r1)
1588 mflr r10
1589 mfctr r11
1590 mfxer r12
1591 std r10,_LINK(r1)
1592 std r11,_CTR(r1)
1593 std r12,_XER(r1)
1594 SAVE_GPR(0,r1)
1595 SAVE_GPR(2,r1)
1596 ld r10,EX_R3(r3)
1597 std r10,GPR3(r1)
1598 SAVE_GPR(4,r1)
1599 SAVE_4GPRS(5,r1)
1600 ld r9,EX_R9(r3)
1601 ld r10,EX_R10(r3)
1602 SAVE_2GPRS(9,r1)
1603 ld r9,EX_R11(r3)
1604 ld r10,EX_R12(r3)
1605 ld r11,EX_R13(r3)
1606 std r9,GPR11(r1)
1607 std r10,GPR12(r1)
1608 std r11,GPR13(r1)
1609 BEGIN_FTR_SECTION
1610 ld r10,EX_CFAR(r3)
1611 std r10,ORIG_GPR3(r1)
1612 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1613 SAVE_8GPRS(14,r1)
1614 SAVE_10GPRS(22,r1)
1615 lhz r12,PACA_TRAP_SAVE(r13)
1616 std r12,_TRAP(r1)
1617 addi r11,r1,INT_FRAME_SIZE
1618 std r11,0(r1)
1619 li r12,0
1620 std r12,0(r11)
1621 ld r2,PACATOC(r13)
1622 ld r11,exception_marker@toc(r2)
1623 std r12,RESULT(r1)
1624 std r11,STACK_FRAME_OVERHEAD-16(r1)
1625 1: addi r3,r1,STACK_FRAME_OVERHEAD
1626 bl kernel_bad_stack
1627 b 1b
This page took 0.109621 seconds and 5 git commands to generate.