Commit | Line | Data |
---|---|---|
749cf76c CD |
1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License, version 2, as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | */ | |
342cd0ab CD |
18 | |
19 | #include <linux/linkage.h> | |
20 | #include <linux/const.h> | |
21 | #include <asm/unified.h> | |
22 | #include <asm/page.h> | |
f7ed45be | 23 | #include <asm/ptrace.h> |
749cf76c CD |
24 | #include <asm/asm-offsets.h> |
25 | #include <asm/kvm_asm.h> | |
342cd0ab | 26 | #include <asm/kvm_arm.h> |
f7ed45be CD |
27 | #include <asm/vfpmacros.h> |
28 | #include "interrupts_head.S" | |
342cd0ab CD |
29 | |
30 | .text | |
31 | ||
32 | __kvm_hyp_code_start: | |
33 | .globl __kvm_hyp_code_start | |
34 | ||
35 | /******************************************************************** | |
36 | * Flush per-VMID TLBs | |
f7ed45be | 37 | * |
48762767 | 38 | * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
f7ed45be CD |
39 | * |
40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs | |
41 | * inside the inner-shareable domain (which is the case for all v7 | |
42 | * implementations). If we come across a non-IS SMP implementation, we'll | |
43 | * have to use an IPI based mechanism. Until then, we stick to the simple | |
44 | * hardware assisted version. | |
48762767 MZ |
45 | * |
46 | * As v7 does not support flushing per IPA, just nuke the whole TLB | |
47 | * instead, ignoring the ipa value. | |
342cd0ab | 48 | */ |
48762767 | 49 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
f7ed45be CD |
50 | push {r2, r3} |
51 | ||
479c5ae2 | 52 | dsb ishst |
f7ed45be CD |
53 | add r0, r0, #KVM_VTTBR |
54 | ldrd r2, r3, [r0] | |
19b0e60a | 55 | mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR |
f7ed45be CD |
56 | isb |
57 | mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) | |
e3ab547f | 58 | dsb ish |
f7ed45be CD |
59 | isb |
60 | mov r2, #0 | |
61 | mov r3, #0 | |
62 | mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 | |
63 | isb @ Not necessary if followed by eret | |
64 | ||
65 | pop {r2, r3} | |
d5d8184d | 66 | bx lr |
48762767 | 67 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
d5d8184d | 68 | |
72fc36b6 MS |
69 | /** |
70 | * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs | |
71 | * | |
72 | * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address | |
73 | * parameter | |
74 | */ | |
75 | ||
76 | ENTRY(__kvm_tlb_flush_vmid) | |
77 | b __kvm_tlb_flush_vmid_ipa | |
78 | ENDPROC(__kvm_tlb_flush_vmid) | |
79 | ||
d5d8184d | 80 | /******************************************************************** |
f7ed45be CD |
81 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable |
82 | * domain, for all VMIDs | |
83 | * | |
84 | * void __kvm_flush_vm_context(void); | |
d5d8184d | 85 | */ |
342cd0ab | 86 | ENTRY(__kvm_flush_vm_context) |
f7ed45be CD |
87 | mov r0, #0 @ rn parameter for c15 flushes is SBZ |
88 | ||
89 | /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ | |
90 | mcr p15, 4, r0, c8, c3, 4 | |
91 | /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ | |
92 | mcr p15, 0, r0, c7, c1, 0 | |
e3ab547f | 93 | dsb ish |
f7ed45be CD |
94 | isb @ Not necessary if followed by eret |
95 | ||
342cd0ab CD |
96 | bx lr |
97 | ENDPROC(__kvm_flush_vm_context) | |
98 | ||
f7ed45be | 99 | |
342cd0ab CD |
100 | /******************************************************************** |
101 | * Hypervisor world-switch code | |
f7ed45be CD |
102 | * |
103 | * | |
104 | * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) | |
342cd0ab CD |
105 | */ |
106 | ENTRY(__kvm_vcpu_run) | |
f7ed45be CD |
107 | @ Save the vcpu pointer |
108 | mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR | |
109 | ||
110 | save_host_regs | |
111 | ||
1a89dd91 | 112 | restore_vgic_state |
53e72406 | 113 | restore_timer_state |
1a89dd91 | 114 | |
f7ed45be CD |
115 | @ Store hardware CP15 state and load guest state |
116 | read_cp15_state store_to_vcpu = 0 | |
117 | write_cp15_state read_from_vcpu = 1 | |
118 | ||
119 | @ If the host kernel has not been configured with VFPv3 support, | |
120 | @ then it is safer if we deny guests from using it as well. | |
121 | #ifdef CONFIG_VFPv3 | |
122 | @ Set FPEXC_EN so the guest doesn't trap floating point instructions | |
123 | VFPFMRX r2, FPEXC @ VMRS | |
124 | push {r2} | |
125 | orr r2, r2, #FPEXC_EN | |
126 | VFPFMXR FPEXC, r2 @ VMSR | |
127 | #endif | |
128 | ||
129 | @ Configure Hyp-role | |
130 | configure_hyp_role vmentry | |
131 | ||
132 | @ Trap coprocessor CRx accesses | |
133 | set_hstr vmentry | |
134 | set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) | |
135 | set_hdcr vmentry | |
136 | ||
137 | @ Write configured ID register into MIDR alias | |
138 | ldr r1, [vcpu, #VCPU_MIDR] | |
139 | mcr p15, 4, r1, c0, c0, 0 | |
140 | ||
141 | @ Write guest view of MPIDR into VMPIDR | |
142 | ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] | |
143 | mcr p15, 4, r1, c0, c0, 5 | |
144 | ||
145 | @ Set up guest memory translation | |
146 | ldr r1, [vcpu, #VCPU_KVM] | |
147 | add r1, r1, #KVM_VTTBR | |
148 | ldrd r2, r3, [r1] | |
19b0e60a | 149 | mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR |
f7ed45be CD |
150 | |
151 | @ We're all done, just restore the GPRs and go to the guest | |
152 | restore_guest_regs | |
153 | clrex @ Clear exclusive monitor | |
154 | eret | |
155 | ||
156 | __kvm_vcpu_return: | |
157 | /* | |
158 | * return convention: | |
159 | * guest r0, r1, r2 saved on the stack | |
160 | * r0: vcpu pointer | |
161 | * r1: exception code | |
162 | */ | |
163 | save_guest_regs | |
164 | ||
165 | @ Set VMID == 0 | |
166 | mov r2, #0 | |
167 | mov r3, #0 | |
168 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR | |
169 | ||
170 | @ Don't trap coprocessor accesses for host kernel | |
171 | set_hstr vmexit | |
172 | set_hdcr vmexit | |
85e84ba3 | 173 | set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore |
f7ed45be CD |
174 | |
175 | #ifdef CONFIG_VFPv3 | |
f7ed45be CD |
176 | @ Switch VFP/NEON hardware state to the host's |
177 | add r7, vcpu, #VCPU_VFP_GUEST | |
178 | store_vfp_state r7 | |
179 | add r7, vcpu, #VCPU_VFP_HOST | |
180 | ldr r7, [r7] | |
181 | restore_vfp_state r7 | |
182 | ||
183 | after_vfp_restore: | |
184 | @ Restore FPEXC_EN which we clobbered on entry | |
185 | pop {r2} | |
186 | VFPFMXR FPEXC, r2 | |
85e84ba3 MZ |
187 | #else |
188 | after_vfp_restore: | |
f7ed45be CD |
189 | #endif |
190 | ||
191 | @ Reset Hyp-role | |
192 | configure_hyp_role vmexit | |
193 | ||
194 | @ Let host read hardware MIDR | |
195 | mrc p15, 0, r2, c0, c0, 0 | |
196 | mcr p15, 4, r2, c0, c0, 0 | |
197 | ||
198 | @ Back to hardware MPIDR | |
199 | mrc p15, 0, r2, c0, c0, 5 | |
200 | mcr p15, 4, r2, c0, c0, 5 | |
201 | ||
202 | @ Store guest CP15 state and restore host state | |
203 | read_cp15_state store_to_vcpu = 1 | |
204 | write_cp15_state read_from_vcpu = 0 | |
205 | ||
53e72406 | 206 | save_timer_state |
1a89dd91 MZ |
207 | save_vgic_state |
208 | ||
f7ed45be CD |
209 | restore_host_regs |
210 | clrex @ Clear exclusive monitor | |
6d7311b5 | 211 | #ifndef CONFIG_CPU_ENDIAN_BE8 |
f7ed45be CD |
212 | mov r0, r1 @ Return the return code |
213 | mov r1, #0 @ Clear upper bits in return value | |
6d7311b5 VK |
214 | #else |
215 | @ r1 already has return code | |
216 | mov r0, #0 @ Clear upper bits in return value | |
217 | #endif /* CONFIG_CPU_ENDIAN_BE8 */ | |
f7ed45be | 218 | bx lr @ return to IOCTL |
342cd0ab CD |
219 | |
220 | /******************************************************************** | |
221 | * Call function in Hyp mode | |
222 | * | |
223 | * | |
224 | * u64 kvm_call_hyp(void *hypfn, ...); | |
225 | * | |
226 | * This is not really a variadic function in the classic C-way and care must | |
227 | * be taken when calling this to ensure parameters are passed in registers | |
228 | * only, since the stack will change between the caller and the callee. | |
229 | * | |
230 | * Call the function with the first argument containing a pointer to the | |
231 | * function you wish to call in Hyp mode, and subsequent arguments will be | |
232 | * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the | |
233 | * function pointer can be passed). The function being called must be mapped | |
234 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | |
235 | * passed in r0 and r1. | |
236 | * | |
b20c9f29 MZ |
237 | * A function pointer with a value of 0xffffffff has a special meaning, |
238 | * and is used to implement __hyp_get_vectors in the same way as in | |
239 | * arch/arm/kernel/hyp_stub.S. | |
240 | * | |
342cd0ab CD |
241 | * The calling convention follows the standard AAPCS: |
242 | * r0 - r3: caller save | |
243 | * r12: caller save | |
244 | * rest: callee save | |
245 | */ | |
246 | ENTRY(kvm_call_hyp) | |
247 | hvc #0 | |
248 | bx lr | |
249 | ||
250 | /******************************************************************** | |
251 | * Hypervisor exception vector and handlers | |
f7ed45be CD |
252 | * |
253 | * | |
254 | * The KVM/ARM Hypervisor ABI is defined as follows: | |
255 | * | |
256 | * Entry to Hyp mode from the host kernel will happen _only_ when an HVC | |
257 | * instruction is issued since all traps are disabled when running the host | |
258 | * kernel as per the Hyp-mode initialization at boot time. | |
259 | * | |
0b5e3bac | 260 | * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc |
f7ed45be | 261 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the |
0b5e3bac | 262 | * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC |
f7ed45be CD |
263 | * instructions are called from within Hyp-mode. |
264 | * | |
265 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): | |
266 | * Switching to Hyp mode is done through a simple HVC #0 instruction. The | |
267 | * exception vector code will check that the HVC comes from VMID==0 and if | |
268 | * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. | |
269 | * - r0 contains a pointer to a HYP function | |
270 | * - r1, r2, and r3 contain arguments to the above function. | |
271 | * - The HYP function will be called with its arguments in r0, r1 and r2. | |
272 | * On HYP function return, we return directly to SVC. | |
273 | * | |
274 | * Note that the above is used to execute code in Hyp-mode from a host-kernel | |
275 | * point of view, and is a different concept from performing a world-switch and | |
276 | * executing guest code SVC mode (with a VMID != 0). | |
342cd0ab CD |
277 | */ |
278 | ||
f7ed45be CD |
279 | /* Handle undef, svc, pabt, or dabt by crashing with a user notice */ |
280 | .macro bad_exception exception_code, panic_str | |
281 | push {r0-r2} | |
282 | mrrc p15, 6, r0, r1, c2 @ Read VTTBR | |
283 | lsr r1, r1, #16 | |
284 | ands r1, r1, #0xff | |
285 | beq 99f | |
286 | ||
287 | load_vcpu @ Load VCPU pointer | |
288 | .if \exception_code == ARM_EXCEPTION_DATA_ABORT | |
289 | mrc p15, 4, r2, c5, c2, 0 @ HSR | |
290 | mrc p15, 4, r1, c6, c0, 0 @ HDFAR | |
291 | str r2, [vcpu, #VCPU_HSR] | |
292 | str r1, [vcpu, #VCPU_HxFAR] | |
293 | .endif | |
294 | .if \exception_code == ARM_EXCEPTION_PREF_ABORT | |
295 | mrc p15, 4, r2, c5, c2, 0 @ HSR | |
296 | mrc p15, 4, r1, c6, c0, 2 @ HIFAR | |
297 | str r2, [vcpu, #VCPU_HSR] | |
298 | str r1, [vcpu, #VCPU_HxFAR] | |
299 | .endif | |
300 | mov r1, #\exception_code | |
301 | b __kvm_vcpu_return | |
302 | ||
303 | @ We were in the host already. Let's craft a panic-ing return to SVC. | |
304 | 99: mrs r2, cpsr | |
305 | bic r2, r2, #MODE_MASK | |
306 | orr r2, r2, #SVC_MODE | |
307 | THUMB( orr r2, r2, #PSR_T_BIT ) | |
308 | msr spsr_cxsf, r2 | |
309 | mrs r1, ELR_hyp | |
5890298a | 310 | ldr r2, =panic |
f7ed45be CD |
311 | msr ELR_hyp, r2 |
312 | ldr r0, =\panic_str | |
22cfbb6d | 313 | clrex @ Clear exclusive monitor |
f7ed45be CD |
314 | eret |
315 | .endm | |
316 | ||
317 | .text | |
318 | ||
342cd0ab CD |
319 | .align 5 |
320 | __kvm_hyp_vector: | |
321 | .globl __kvm_hyp_vector | |
f7ed45be CD |
322 | |
323 | @ Hyp-mode exception vector | |
324 | W(b) hyp_reset | |
325 | W(b) hyp_undef | |
326 | W(b) hyp_svc | |
327 | W(b) hyp_pabt | |
328 | W(b) hyp_dabt | |
329 | W(b) hyp_hvc | |
330 | W(b) hyp_irq | |
331 | W(b) hyp_fiq | |
332 | ||
333 | .align | |
334 | hyp_reset: | |
335 | b hyp_reset | |
336 | ||
337 | .align | |
338 | hyp_undef: | |
339 | bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str | |
340 | ||
341 | .align | |
342 | hyp_svc: | |
343 | bad_exception ARM_EXCEPTION_HVC, svc_die_str | |
344 | ||
345 | .align | |
346 | hyp_pabt: | |
347 | bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str | |
348 | ||
349 | .align | |
350 | hyp_dabt: | |
351 | bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str | |
352 | ||
353 | .align | |
354 | hyp_hvc: | |
355 | /* | |
356 | * Getting here is either becuase of a trap from a guest or from calling | |
357 | * HVC from the host kernel, which means "switch to Hyp mode". | |
358 | */ | |
359 | push {r0, r1, r2} | |
360 | ||
361 | @ Check syndrome register | |
362 | mrc p15, 4, r1, c5, c2, 0 @ HSR | |
363 | lsr r0, r1, #HSR_EC_SHIFT | |
f7ed45be CD |
364 | cmp r0, #HSR_EC_HVC |
365 | bne guest_trap @ Not HVC instr. | |
366 | ||
367 | /* | |
368 | * Let's check if the HVC came from VMID 0 and allow simple | |
369 | * switch to Hyp mode | |
370 | */ | |
371 | mrrc p15, 6, r0, r2, c2 | |
372 | lsr r2, r2, #16 | |
373 | and r2, r2, #0xff | |
374 | cmp r2, #0 | |
375 | bne guest_trap @ Guest called HVC | |
376 | ||
054167b3 MS |
377 | /* |
378 | * Getting here means host called HVC, we shift parameters and branch | |
379 | * to Hyp function. | |
380 | */ | |
f7ed45be CD |
381 | pop {r0, r1, r2} |
382 | ||
b20c9f29 MZ |
383 | /* Check for __hyp_get_vectors */ |
384 | cmp r0, #-1 | |
385 | mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR | |
386 | beq 1f | |
387 | ||
f7ed45be CD |
388 | push {lr} |
389 | mrs lr, SPSR | |
390 | push {lr} | |
391 | ||
392 | mov lr, r0 | |
393 | mov r0, r1 | |
394 | mov r1, r2 | |
395 | mov r2, r3 | |
396 | ||
397 | THUMB( orr lr, #1) | |
398 | blx lr @ Call the HYP function | |
399 | ||
400 | pop {lr} | |
401 | msr SPSR_csxf, lr | |
402 | pop {lr} | |
b20c9f29 | 403 | 1: eret |
f7ed45be CD |
404 | |
405 | guest_trap: | |
406 | load_vcpu @ Load VCPU pointer to r0 | |
407 | str r1, [vcpu, #VCPU_HSR] | |
408 | ||
409 | @ Check if we need the fault information | |
410 | lsr r1, r1, #HSR_EC_SHIFT | |
054167b3 MS |
411 | #ifdef CONFIG_VFPv3 |
412 | cmp r1, #HSR_EC_CP_0_13 | |
413 | beq switch_to_guest_vfp | |
414 | #endif | |
f7ed45be CD |
415 | cmp r1, #HSR_EC_IABT |
416 | mrceq p15, 4, r2, c6, c0, 2 @ HIFAR | |
417 | beq 2f | |
418 | cmp r1, #HSR_EC_DABT | |
419 | bne 1f | |
420 | mrc p15, 4, r2, c6, c0, 0 @ HDFAR | |
421 | ||
422 | 2: str r2, [vcpu, #VCPU_HxFAR] | |
423 | ||
424 | /* | |
425 | * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: | |
426 | * | |
427 | * Abort on the stage 2 translation for a memory access from a | |
428 | * Non-secure PL1 or PL0 mode: | |
429 | * | |
430 | * For any Access flag fault or Translation fault, and also for any | |
431 | * Permission fault on the stage 2 translation of a memory access | |
432 | * made as part of a translation table walk for a stage 1 translation, | |
433 | * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR | |
434 | * is UNKNOWN. | |
435 | */ | |
436 | ||
437 | /* Check for permission fault, and S1PTW */ | |
438 | mrc p15, 4, r1, c5, c2, 0 @ HSR | |
439 | and r0, r1, #HSR_FSC_TYPE | |
440 | cmp r0, #FSC_PERM | |
441 | tsteq r1, #(1 << 7) @ S1PTW | |
442 | mrcne p15, 4, r2, c6, c0, 4 @ HPFAR | |
443 | bne 3f | |
444 | ||
6a077e4a MZ |
445 | /* Preserve PAR */ |
446 | mrrc p15, 0, r0, r1, c7 @ PAR | |
447 | push {r0, r1} | |
448 | ||
f7ed45be CD |
449 | /* Resolve IPA using the xFAR */ |
450 | mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR | |
451 | isb | |
452 | mrrc p15, 0, r0, r1, c7 @ PAR | |
453 | tst r0, #1 | |
454 | bne 4f @ Failed translation | |
455 | ubfx r2, r0, #12, #20 | |
456 | lsl r2, r2, #4 | |
457 | orr r2, r2, r1, lsl #24 | |
458 | ||
6a077e4a MZ |
459 | /* Restore PAR */ |
460 | pop {r0, r1} | |
461 | mcrr p15, 0, r0, r1, c7 @ PAR | |
462 | ||
f7ed45be CD |
463 | 3: load_vcpu @ Load VCPU pointer to r0 |
464 | str r2, [r0, #VCPU_HPFAR] | |
465 | ||
466 | 1: mov r1, #ARM_EXCEPTION_HVC | |
467 | b __kvm_vcpu_return | |
468 | ||
6a077e4a MZ |
469 | 4: pop {r0, r1} @ Failed translation, return to guest |
470 | mcrr p15, 0, r0, r1, c7 @ PAR | |
22cfbb6d | 471 | clrex |
6a077e4a | 472 | pop {r0, r1, r2} |
f7ed45be CD |
473 | eret |
474 | ||
475 | /* | |
476 | * If VFPv3 support is not available, then we will not switch the VFP | |
477 | * registers; however cp10 and cp11 accesses will still trap and fallback | |
478 | * to the regular coprocessor emulation code, which currently will | |
479 | * inject an undefined exception to the guest. | |
480 | */ | |
481 | #ifdef CONFIG_VFPv3 | |
482 | switch_to_guest_vfp: | |
f7ed45be CD |
483 | push {r3-r7} |
484 | ||
485 | @ NEON/VFP used. Turn on VFP access. | |
85e84ba3 | 486 | set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) |
f7ed45be CD |
487 | |
488 | @ Switch VFP/NEON hardware state to the guest's | |
489 | add r7, r0, #VCPU_VFP_HOST | |
490 | ldr r7, [r7] | |
491 | store_vfp_state r7 | |
492 | add r7, r0, #VCPU_VFP_GUEST | |
493 | restore_vfp_state r7 | |
494 | ||
495 | pop {r3-r7} | |
496 | pop {r0-r2} | |
22cfbb6d | 497 | clrex |
f7ed45be CD |
498 | eret |
499 | #endif | |
500 | ||
501 | .align | |
502 | hyp_irq: | |
503 | push {r0, r1, r2} | |
504 | mov r1, #ARM_EXCEPTION_IRQ | |
505 | load_vcpu @ Load VCPU pointer to r0 | |
506 | b __kvm_vcpu_return | |
507 | ||
508 | .align | |
509 | hyp_fiq: | |
510 | b hyp_fiq | |
511 | ||
512 | .ltorg | |
342cd0ab CD |
513 | |
514 | __kvm_hyp_code_end: | |
515 | .globl __kvm_hyp_code_end | |
f7ed45be CD |
516 | |
517 | .section ".rodata" | |
518 | ||
519 | und_die_str: | |
1fe40f6d | 520 | .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" |
f7ed45be | 521 | pabt_die_str: |
1fe40f6d | 522 | .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n" |
f7ed45be | 523 | dabt_die_str: |
1fe40f6d | 524 | .ascii "unexpected data abort in Hyp mode at: %#08x\n" |
f7ed45be | 525 | svc_die_str: |
1fe40f6d | 526 | .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" |