2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Generation of main entry point for the guest, exception handling.
8 * Copyright (C) 2012 MIPS Technologies, Inc.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 * Copyright (C) 2016 Imagination Technologies Ltd.
14 #include <linux/kvm_host.h>
16 #include <asm/setup.h>
27 #if _MIPS_SIM == _MIPS_SIM_ABI32
32 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
34 #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
39 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
50 /* Some CP0 registers */
51 #define C0_HWRENA 7, 0
52 #define C0_BADVADDR 8, 0
53 #define C0_ENTRYHI 10, 0
54 #define C0_STATUS 12, 0
55 #define C0_CAUSE 13, 0
57 #define C0_EBASE 15, 1
58 #define C0_CONFIG3 16, 3
59 #define C0_CONFIG5 16, 5
60 #define C0_DDATA_LO 28, 3
61 #define C0_ERROREPC 30, 0
63 #define CALLFRAME_SIZ 32
74 UASM_L_LA(_return_to_host
)
75 UASM_L_LA(_kernel_asid
)
77 static void *kvm_mips_build_enter_guest(void *addr
);
78 static void *kvm_mips_build_ret_from_exit(void *addr
);
79 static void *kvm_mips_build_ret_to_guest(void *addr
);
80 static void *kvm_mips_build_ret_to_host(void *addr
);
83 * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
84 * @addr: Address to start writing code.
86 * Assemble the start of the vcpu_run function to run a guest VCPU. The function
87 * conforms to the following prototype:
89 * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
91 * The exit from the guest and return to the caller is handled by the code
92 * generated by kvm_mips_build_ret_to_host().
94 * Returns: Next address after end of written function.
96 void *kvm_mips_build_vcpu_run(void *addr
)
106 /* k0/k1 not being used in host kernel context */
107 uasm_i_addiu(&p
, K1
, SP
, -(int)sizeof(struct pt_regs
));
108 for (i
= 16; i
< 32; ++i
) {
111 UASM_i_SW(&p
, i
, offsetof(struct pt_regs
, regs
[i
]), K1
);
116 UASM_i_SW(&p
, V0
, offsetof(struct pt_regs
, lo
), K1
);
118 UASM_i_SW(&p
, V1
, offsetof(struct pt_regs
, hi
), K1
);
120 /* Save host status */
121 uasm_i_mfc0(&p
, V0
, C0_STATUS
);
122 UASM_i_SW(&p
, V0
, offsetof(struct pt_regs
, cp0_status
), K1
);
124 /* Save DDATA_LO, will be used to store pointer to vcpu */
125 uasm_i_mfc0(&p
, V1
, C0_DDATA_LO
);
126 UASM_i_SW(&p
, V1
, offsetof(struct pt_regs
, cp0_epc
), K1
);
128 /* DDATA_LO has pointer to vcpu */
129 uasm_i_mtc0(&p
, A1
, C0_DDATA_LO
);
131 /* Offset into vcpu->arch */
132 uasm_i_addiu(&p
, K1
, A1
, offsetof(struct kvm_vcpu
, arch
));
135 * Save the host stack to VCPU, used for exception processing
136 * when we exit from the Guest
138 UASM_i_SW(&p
, SP
, offsetof(struct kvm_vcpu_arch
, host_stack
), K1
);
140 /* Save the kernel gp as well */
141 UASM_i_SW(&p
, GP
, offsetof(struct kvm_vcpu_arch
, host_gp
), K1
);
144 * Setup status register for running the guest in UM, interrupts
147 UASM_i_LA(&p
, K0
, ST0_EXL
| KSU_USER
| ST0_BEV
);
148 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
151 /* load up the new EBASE */
152 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, guest_ebase
), K1
);
153 uasm_i_mtc0(&p
, K0
, C0_EBASE
);
156 * Now that the new EBASE has been loaded, unset BEV, set
157 * interrupt mask as it was but make sure that timer interrupts
160 uasm_i_addiu(&p
, K0
, ZERO
, ST0_EXL
| KSU_USER
| ST0_IE
);
161 uasm_i_andi(&p
, V0
, V0
, ST0_IM
);
162 uasm_i_or(&p
, K0
, K0
, V0
);
163 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
166 p
= kvm_mips_build_enter_guest(p
);
172 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
173 * @addr: Address to start writing code.
175 * Assemble the code to resume guest execution. This code is common between the
176 * initial entry into the guest from the host, and returning from the exit
177 * handler back to the guest.
179 * Returns: Next address after end of written function.
181 static void *kvm_mips_build_enter_guest(void *addr
)
185 struct uasm_label labels
[2];
186 struct uasm_reloc relocs
[2];
187 struct uasm_label
*l
= labels
;
188 struct uasm_reloc
*r
= relocs
;
190 memset(labels
, 0, sizeof(labels
));
191 memset(relocs
, 0, sizeof(relocs
));
194 UASM_i_LW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, pc
), K1
);
195 uasm_i_mtc0(&p
, T0
, C0_EPC
);
197 /* Set the ASID for the Guest Kernel */
198 UASM_i_LW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, cop0
), K1
);
199 UASM_i_LW(&p
, T0
, offsetof(struct mips_coproc
, reg
[MIPS_CP0_STATUS
][0]),
201 uasm_i_andi(&p
, T0
, T0
, KSU_USER
| ST0_ERL
| ST0_EXL
);
202 uasm_i_xori(&p
, T0
, T0
, KSU_USER
);
203 uasm_il_bnez(&p
, &r
, T0
, label_kernel_asid
);
204 uasm_i_addiu(&p
, T1
, K1
,
205 offsetof(struct kvm_vcpu_arch
, guest_kernel_asid
));
207 uasm_i_addiu(&p
, T1
, K1
,
208 offsetof(struct kvm_vcpu_arch
, guest_user_asid
));
209 uasm_l_kernel_asid(&l
, p
);
211 /* t1: contains the base of the ASID array, need to get the cpu id */
212 /* smp_processor_id */
213 UASM_i_LW(&p
, T2
, offsetof(struct thread_info
, cpu
), GP
);
215 uasm_i_sll(&p
, T2
, T2
, 2);
216 UASM_i_ADDU(&p
, T3
, T1
, T2
);
217 UASM_i_LW(&p
, K0
, 0, T3
);
218 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
219 /* x sizeof(struct cpuinfo_mips)/4 */
220 uasm_i_addiu(&p
, T3
, ZERO
, sizeof(struct cpuinfo_mips
)/4);
221 uasm_i_mul(&p
, T2
, T2
, T3
);
223 UASM_i_LA_mostly(&p
, AT
, (long)&cpu_data
[0].asid_mask
);
224 UASM_i_ADDU(&p
, AT
, AT
, T2
);
225 UASM_i_LW(&p
, T2
, uasm_rel_lo((long)&cpu_data
[0].asid_mask
), AT
);
226 uasm_i_and(&p
, K0
, K0
, T2
);
228 uasm_i_andi(&p
, K0
, K0
, MIPS_ENTRYHI_ASID
);
230 uasm_i_mtc0(&p
, K0
, C0_ENTRYHI
);
233 /* Disable RDHWR access */
234 uasm_i_mtc0(&p
, ZERO
, C0_HWRENA
);
236 /* load the guest context from VCPU and return */
237 for (i
= 1; i
< 32; ++i
) {
238 /* Guest k0/k1 loaded later */
239 if (i
== K0
|| i
== K1
)
241 UASM_i_LW(&p
, i
, offsetof(struct kvm_vcpu_arch
, gprs
[i
]), K1
);
245 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, hi
), K1
);
248 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, lo
), K1
);
251 /* Restore the guest's k0/k1 registers */
252 UASM_i_LW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, gprs
[K0
]), K1
);
253 UASM_i_LW(&p
, K1
, offsetof(struct kvm_vcpu_arch
, gprs
[K1
]), K1
);
258 uasm_resolve_relocs(relocs
, labels
);
264 * kvm_mips_build_exception() - Assemble first level guest exception handler.
265 * @addr: Address to start writing code.
267 * Assemble exception vector code for guest execution. The generated vector will
268 * jump to the common exception handler generated by kvm_mips_build_exit().
270 * Returns: Next address after end of written function.
272 void *kvm_mips_build_exception(void *addr
)
277 uasm_i_mtc0(&p
, K0
, C0_ERROREPC
);
281 uasm_i_mfc0(&p
, K0
, C0_EBASE
);
282 /* Get rid of CPUNum */
283 uasm_i_srl(&p
, K0
, K0
, 10);
284 uasm_i_sll(&p
, K0
, K0
, 10);
285 /* Save k1 @ offset 0x3000 */
286 UASM_i_SW(&p
, K1
, 0x3000, K0
);
288 /* Exception handler is installed @ offset 0x2000 */
289 uasm_i_addiu(&p
, K0
, K0
, 0x2000);
290 /* Jump to the function */
298 * kvm_mips_build_exit() - Assemble common guest exit handler.
299 * @addr: Address to start writing code.
301 * Assemble the generic guest exit handling code. This is called by the
302 * exception vectors (generated by kvm_mips_build_exception()), and calls
303 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
304 * depending on the return value.
306 * Returns: Next address after end of written function.
308 void *kvm_mips_build_exit(void *addr
)
312 struct uasm_label labels
[3];
313 struct uasm_reloc relocs
[3];
314 struct uasm_label
*l
= labels
;
315 struct uasm_reloc
*r
= relocs
;
317 memset(labels
, 0, sizeof(labels
));
318 memset(relocs
, 0, sizeof(relocs
));
321 * Generic Guest exception handler. We end up here when the guest
322 * does something that causes a trap to kernel mode.
325 /* Get the VCPU pointer from DDATA_LO */
326 uasm_i_mfc0(&p
, K1
, C0_DDATA_LO
);
327 uasm_i_addiu(&p
, K1
, K1
, offsetof(struct kvm_vcpu
, arch
));
329 /* Start saving Guest context to VCPU */
330 for (i
= 0; i
< 32; ++i
) {
331 /* Guest k0/k1 saved later */
332 if (i
== K0
|| i
== K1
)
334 UASM_i_SW(&p
, i
, offsetof(struct kvm_vcpu_arch
, gprs
[i
]), K1
);
337 /* We need to save hi/lo and restore them on the way out */
339 UASM_i_SW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, hi
), K1
);
342 UASM_i_SW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, lo
), K1
);
344 /* Finally save guest k0/k1 to VCPU */
345 uasm_i_mfc0(&p
, T0
, C0_ERROREPC
);
346 UASM_i_SW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, gprs
[K0
]), K1
);
348 /* Get GUEST k1 and save it in VCPU */
349 uasm_i_addiu(&p
, T1
, ZERO
, ~0x2ff);
350 uasm_i_mfc0(&p
, T0
, C0_EBASE
);
351 uasm_i_and(&p
, T0
, T0
, T1
);
352 UASM_i_LW(&p
, T0
, 0x3000, T0
);
353 UASM_i_SW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, gprs
[K1
]), K1
);
355 /* Now that context has been saved, we can use other registers */
358 uasm_i_mfc0(&p
, A1
, C0_DDATA_LO
);
359 uasm_i_move(&p
, S1
, A1
);
361 /* Restore run (vcpu->run) */
362 UASM_i_LW(&p
, A0
, offsetof(struct kvm_vcpu
, run
), A1
);
363 /* Save pointer to run in s0, will be saved by the compiler */
364 uasm_i_move(&p
, S0
, A0
);
367 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
370 uasm_i_mfc0(&p
, K0
, C0_EPC
);
371 UASM_i_SW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, pc
), K1
);
373 uasm_i_mfc0(&p
, K0
, C0_BADVADDR
);
374 UASM_i_SW(&p
, K0
, offsetof(struct kvm_vcpu_arch
, host_cp0_badvaddr
),
377 uasm_i_mfc0(&p
, K0
, C0_CAUSE
);
378 uasm_i_sw(&p
, K0
, offsetof(struct kvm_vcpu_arch
, host_cp0_cause
), K1
);
380 /* Now restore the host state just enough to run the handlers */
382 /* Switch EBASE to the one used by Linux */
383 /* load up the host EBASE */
384 uasm_i_mfc0(&p
, V0
, C0_STATUS
);
386 uasm_i_lui(&p
, AT
, ST0_BEV
>> 16);
387 uasm_i_or(&p
, K0
, V0
, AT
);
389 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
392 UASM_i_LA_mostly(&p
, K0
, (long)&ebase
);
393 UASM_i_LW(&p
, K0
, uasm_rel_lo((long)&ebase
), K0
);
394 uasm_i_mtc0(&p
, K0
, C0_EBASE
);
397 * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
398 * trigger FPE for pending exceptions.
400 uasm_i_lui(&p
, AT
, ST0_CU1
>> 16);
401 uasm_i_and(&p
, V1
, V0
, AT
);
402 uasm_il_beqz(&p
, &r
, V1
, label_fpu_1
);
404 uasm_i_cfc1(&p
, T0
, 31);
405 uasm_i_sw(&p
, T0
, offsetof(struct kvm_vcpu_arch
, fpu
.fcr31
), K1
);
406 uasm_i_ctc1(&p
, ZERO
, 31);
409 #ifdef CONFIG_CPU_HAS_MSA
411 * If MSA is enabled, save MSACSR and clear it so that later
412 * instructions don't trigger MSAFPE for pending exceptions.
414 uasm_i_mfc0(&p
, T0
, C0_CONFIG3
);
415 uasm_i_ext(&p
, T0
, T0
, 28, 1); /* MIPS_CONF3_MSAP */
416 uasm_il_beqz(&p
, &r
, T0
, label_msa_1
);
418 uasm_i_mfc0(&p
, T0
, C0_CONFIG5
);
419 uasm_i_ext(&p
, T0
, T0
, 27, 1); /* MIPS_CONF5_MSAEN */
420 uasm_il_beqz(&p
, &r
, T0
, label_msa_1
);
422 uasm_i_cfcmsa(&p
, T0
, MSA_CSR
);
423 uasm_i_sw(&p
, T0
, offsetof(struct kvm_vcpu_arch
, fpu
.msacsr
),
425 uasm_i_ctcmsa(&p
, MSA_CSR
, ZERO
);
429 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
430 uasm_i_addiu(&p
, AT
, ZERO
, ~(ST0_EXL
| KSU_USER
| ST0_IE
));
431 uasm_i_and(&p
, V0
, V0
, AT
);
432 uasm_i_lui(&p
, AT
, ST0_CU0
>> 16);
433 uasm_i_or(&p
, V0
, V0
, AT
);
434 uasm_i_mtc0(&p
, V0
, C0_STATUS
);
437 /* Load up host GP */
438 UASM_i_LW(&p
, GP
, offsetof(struct kvm_vcpu_arch
, host_gp
), K1
);
440 /* Need a stack before we can jump to "C" */
441 UASM_i_LW(&p
, SP
, offsetof(struct kvm_vcpu_arch
, host_stack
), K1
);
443 /* Saved host state */
444 uasm_i_addiu(&p
, SP
, SP
, -(int)sizeof(struct pt_regs
));
447 * XXXKYMA do we need to load the host ASID, maybe not because the
448 * kernel entries are marked GLOBAL, need to verify
451 /* Restore host DDATA_LO */
452 UASM_i_LW(&p
, K0
, offsetof(struct pt_regs
, cp0_epc
), SP
);
453 uasm_i_mtc0(&p
, K0
, C0_DDATA_LO
);
455 /* Restore RDHWR access */
456 UASM_i_LA_mostly(&p
, K0
, (long)&hwrena
);
457 uasm_i_lw(&p
, K0
, uasm_rel_lo((long)&hwrena
), K0
);
458 uasm_i_mtc0(&p
, K0
, C0_HWRENA
);
460 /* Jump to handler */
462 * XXXKYMA: not sure if this is safe, how large is the stack??
463 * Now jump to the kvm_mips_handle_exit() to see if we can deal
464 * with this in the kernel
466 UASM_i_LA(&p
, T9
, (unsigned long)kvm_mips_handle_exit
);
467 uasm_i_jalr(&p
, RA
, T9
);
468 uasm_i_addiu(&p
, SP
, SP
, -CALLFRAME_SIZ
);
470 uasm_resolve_relocs(relocs
, labels
);
472 p
= kvm_mips_build_ret_from_exit(p
);
478 * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
479 * @addr: Address to start writing code.
481 * Assemble the code to handle the return from kvm_mips_handle_exit(), either
482 * resuming the guest or returning to the host depending on the return value.
484 * Returns: Next address after end of written function.
486 static void *kvm_mips_build_ret_from_exit(void *addr
)
489 struct uasm_label labels
[2];
490 struct uasm_reloc relocs
[2];
491 struct uasm_label
*l
= labels
;
492 struct uasm_reloc
*r
= relocs
;
494 memset(labels
, 0, sizeof(labels
));
495 memset(relocs
, 0, sizeof(relocs
));
497 /* Return from handler Make sure interrupts are disabled */
502 * XXXKYMA: k0/k1 could have been blown away if we processed
503 * an exception while we were handling the exception from the
507 uasm_i_move(&p
, K1
, S1
);
508 uasm_i_addiu(&p
, K1
, K1
, offsetof(struct kvm_vcpu
, arch
));
511 * Check return value, should tell us if we are returning to the
512 * host (handle I/O etc)or resuming the guest
514 uasm_i_andi(&p
, T0
, V0
, RESUME_HOST
);
515 uasm_il_bnez(&p
, &r
, T0
, label_return_to_host
);
518 p
= kvm_mips_build_ret_to_guest(p
);
520 uasm_l_return_to_host(&l
, p
);
521 p
= kvm_mips_build_ret_to_host(p
);
523 uasm_resolve_relocs(relocs
, labels
);
529 * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
530 * @addr: Address to start writing code.
532 * Assemble the code to handle return from the guest exit handler
533 * (kvm_mips_handle_exit()) back to the guest.
535 * Returns: Next address after end of written function.
537 static void *kvm_mips_build_ret_to_guest(void *addr
)
541 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
542 uasm_i_mtc0(&p
, S1
, C0_DDATA_LO
);
544 /* Load up the Guest EBASE to minimize the window where BEV is set */
545 UASM_i_LW(&p
, T0
, offsetof(struct kvm_vcpu_arch
, guest_ebase
), K1
);
547 /* Switch EBASE back to the one used by KVM */
548 uasm_i_mfc0(&p
, V1
, C0_STATUS
);
549 uasm_i_lui(&p
, AT
, ST0_BEV
>> 16);
550 uasm_i_or(&p
, K0
, V1
, AT
);
551 uasm_i_mtc0(&p
, K0
, C0_STATUS
);
553 uasm_i_mtc0(&p
, T0
, C0_EBASE
);
555 /* Setup status register for running guest in UM */
556 uasm_i_ori(&p
, V1
, V1
, ST0_EXL
| KSU_USER
| ST0_IE
);
557 UASM_i_LA(&p
, AT
, ~(ST0_CU0
| ST0_MX
));
558 uasm_i_and(&p
, V1
, V1
, AT
);
559 uasm_i_mtc0(&p
, V1
, C0_STATUS
);
562 p
= kvm_mips_build_enter_guest(p
);
568 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
569 * @addr: Address to start writing code.
571 * Assemble the code to handle return from the guest exit handler
572 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
573 * function generated by kvm_mips_build_vcpu_run().
575 * Returns: Next address after end of written function.
577 static void *kvm_mips_build_ret_to_host(void *addr
)
582 /* EBASE is already pointing to Linux */
583 UASM_i_LW(&p
, K1
, offsetof(struct kvm_vcpu_arch
, host_stack
), K1
);
584 uasm_i_addiu(&p
, K1
, K1
, -(int)sizeof(struct pt_regs
));
586 /* Restore host DDATA_LO */
587 UASM_i_LW(&p
, K0
, offsetof(struct pt_regs
, cp0_epc
), K1
);
588 uasm_i_mtc0(&p
, K0
, C0_DDATA_LO
);
591 * r2/v0 is the return code, shift it down by 2 (arithmetic)
592 * to recover the err code
594 uasm_i_sra(&p
, K0
, V0
, 2);
595 uasm_i_move(&p
, V0
, K0
);
597 /* Load context saved on the host stack */
598 for (i
= 16; i
< 31; ++i
) {
601 UASM_i_LW(&p
, i
, offsetof(struct pt_regs
, regs
[i
]), K1
);
604 UASM_i_LW(&p
, K0
, offsetof(struct pt_regs
, hi
), K1
);
607 UASM_i_LW(&p
, K0
, offsetof(struct pt_regs
, lo
), K1
);
610 /* Restore RDHWR access */
611 UASM_i_LA_mostly(&p
, K0
, (long)&hwrena
);
612 uasm_i_lw(&p
, K0
, uasm_rel_lo((long)&hwrena
), K0
);
613 uasm_i_mtc0(&p
, K0
, C0_HWRENA
);
615 /* Restore RA, which is the address we will return to */
616 UASM_i_LW(&p
, RA
, offsetof(struct pt_regs
, regs
[RA
]), K1
);
This page took 0.044981 seconds and 5 git commands to generate.