2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu-info.h>
25 #include <asm/mmu_context.h>
26 #include <asm/tlbflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #include "interrupt.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu
*vcpu
,
45 unsigned int dspcontrol
;
46 union mips_instruction insn
;
47 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
49 long nextpc
= KVM_INVALID_INST
;
54 /* Read the instruction */
55 insn
.word
= kvm_get_inst((u32
*) epc
, vcpu
);
57 if (insn
.word
== KVM_INVALID_INST
)
58 return KVM_INVALID_INST
;
60 switch (insn
.i_format
.opcode
) {
61 /* jr and jalr are in r_format format. */
63 switch (insn
.r_format
.func
) {
65 arch
->gprs
[insn
.r_format
.rd
] = epc
+ 8;
68 nextpc
= arch
->gprs
[insn
.r_format
.rs
];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn
.i_format
.rt
) {
82 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
83 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
91 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
92 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
100 arch
->gprs
[31] = epc
+ 8;
101 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
102 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
110 arch
->gprs
[31] = epc
+ 8;
111 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
112 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
121 dspcontrol
= rddsp(0x01);
123 if (dspcontrol
>= 32)
124 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
132 /* These are unconditional and in j_format. */
134 arch
->gprs
[31] = instpc
+ 8;
139 epc
|= (insn
.j_format
.target
<< 2);
143 /* These are conditional and in i_format. */
146 if (arch
->gprs
[insn
.i_format
.rs
] ==
147 arch
->gprs
[insn
.i_format
.rt
])
148 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
156 if (arch
->gprs
[insn
.i_format
.rs
] !=
157 arch
->gprs
[insn
.i_format
.rt
])
158 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
164 case blez_op
: /* not really i_format */
166 /* rt field assumed to be zero */
167 if ((long)arch
->gprs
[insn
.i_format
.rs
] <= 0)
168 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
176 /* rt field assumed to be zero */
177 if ((long)arch
->gprs
[insn
.i_format
.rs
] > 0)
178 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
184 /* And now the FPA/cp1 branch instructions. */
186 kvm_err("%s: unsupported cop1_op\n", __func__
);
193 kvm_err("%s: unaligned epc\n", __func__
);
197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__
);
201 enum emulation_result
update_pc(struct kvm_vcpu
*vcpu
, u32 cause
)
203 unsigned long branch_pc
;
204 enum emulation_result er
= EMULATE_DONE
;
206 if (cause
& CAUSEF_BD
) {
207 branch_pc
= kvm_compute_return_epc(vcpu
, vcpu
->arch
.pc
);
208 if (branch_pc
== KVM_INVALID_INST
) {
211 vcpu
->arch
.pc
= branch_pc
;
212 kvm_debug("BD update_pc(): New PC: %#lx\n",
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
229 * 0 otherwise (in which case CP0_Count timer is running).
231 static inline int kvm_mips_count_disabled(struct kvm_vcpu
*vcpu
)
233 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
235 return (vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) ||
236 (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
);
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246 static u32
kvm_mips_ktime_to_count(struct kvm_vcpu
*vcpu
, ktime_t now
)
251 now_ns
= ktime_to_ns(now
);
252 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
254 if (delta
>= vcpu
->arch
.count_period
) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods
= div64_s64(now_ns
, vcpu
->arch
.count_period
);
257 vcpu
->arch
.count_dyn_bias
= -periods
* vcpu
->arch
.count_period
;
258 /* Recalculate delta with new bias */
259 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
263 * We've ensured that:
264 * delta < count_period
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
272 return div_u64(delta
* vcpu
->arch
.count_hz
, NSEC_PER_SEC
);
276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
283 * Returns: Effective monotonic ktime for CP0_Count.
285 static inline ktime_t
kvm_mips_count_time(struct kvm_vcpu
*vcpu
)
287 if (unlikely(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
288 return vcpu
->arch
.count_resume
;
294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
301 * Returns: The current value of the guest CP0_Count register.
303 static u32
kvm_mips_read_count_running(struct kvm_vcpu
*vcpu
, ktime_t now
)
305 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
306 ktime_t expires
, threshold
;
310 /* Calculate the biased and scaled guest CP0_Count */
311 count
= vcpu
->arch
.count_bias
+ kvm_mips_ktime_to_count(vcpu
, now
);
312 compare
= kvm_read_c0_guest_compare(cop0
);
315 * Find whether CP0_Count has reached the closest timer interrupt. If
316 * not, we shouldn't inject it.
318 if ((s32
)(count
- compare
) < 0)
322 * The CP0_Count we're going to return has already reached the closest
323 * timer interrupt. Quickly check if it really is a new interrupt by
324 * looking at whether the interval until the hrtimer expiry time is
325 * less than 1/4 of the timer period.
327 expires
= hrtimer_get_expires(&vcpu
->arch
.comparecount_timer
);
328 threshold
= ktime_add_ns(now
, vcpu
->arch
.count_period
/ 4);
329 if (ktime_before(expires
, threshold
)) {
331 * Cancel it while we handle it so there's no chance of
332 * interference with the timeout handler.
334 running
= hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
336 /* Nothing should be waiting on the timeout */
337 kvm_mips_callbacks
->queue_timer_int(vcpu
);
340 * Restart the timer if it was running based on the expiry time
341 * we read, so that we don't push it back 2 periods.
344 expires
= ktime_add_ns(expires
,
345 vcpu
->arch
.count_period
);
346 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expires
,
355 * kvm_mips_read_count() - Read the current count value.
356 * @vcpu: Virtual CPU.
358 * Read the current guest CP0_Count value, taking into account whether the timer
361 * Returns: The current guest CP0_Count value.
363 u32
kvm_mips_read_count(struct kvm_vcpu
*vcpu
)
365 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
367 /* If count disabled just read static copy of count */
368 if (kvm_mips_count_disabled(vcpu
))
369 return kvm_read_c0_guest_count(cop0
);
371 return kvm_mips_read_count_running(vcpu
, ktime_get());
375 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
376 * @vcpu: Virtual CPU.
377 * @count: Output pointer for CP0_Count value at point of freeze.
379 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
380 * at the point it was frozen. It is guaranteed that any pending interrupts at
381 * the point it was frozen are handled, and none after that point.
383 * This is useful where the time/CP0_Count is needed in the calculation of the
386 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
388 * Returns: The ktime at the point of freeze.
390 static ktime_t
kvm_mips_freeze_hrtimer(struct kvm_vcpu
*vcpu
, u32
*count
)
394 /* stop hrtimer before finding time */
395 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
398 /* find count at this point and handle pending hrtimer */
399 *count
= kvm_mips_read_count_running(vcpu
, now
);
405 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
406 * @vcpu: Virtual CPU.
407 * @now: ktime at point of resume.
408 * @count: CP0_Count at point of resume.
410 * Resumes the timer and updates the timer expiry based on @now and @count.
411 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
412 * parameters need to be changed.
414 * It is guaranteed that a timer interrupt immediately after resume will be
415 * handled, but not if CP_Compare is exactly at @count. That case is already
416 * handled by kvm_mips_freeze_timer().
418 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
420 static void kvm_mips_resume_hrtimer(struct kvm_vcpu
*vcpu
,
421 ktime_t now
, u32 count
)
423 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
428 /* Calculate timeout (wrap 0 to 2^32) */
429 compare
= kvm_read_c0_guest_compare(cop0
);
430 delta
= (u64
)(u32
)(compare
- count
- 1) + 1;
431 delta
= div_u64(delta
* NSEC_PER_SEC
, vcpu
->arch
.count_hz
);
432 expire
= ktime_add_ns(now
, delta
);
434 /* Update hrtimer to use new timeout */
435 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
436 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expire
, HRTIMER_MODE_ABS
);
440 * kvm_mips_write_count() - Modify the count and update timer.
441 * @vcpu: Virtual CPU.
442 * @count: Guest CP0_Count value to set.
444 * Sets the CP0_Count value and updates the timer accordingly.
446 void kvm_mips_write_count(struct kvm_vcpu
*vcpu
, u32 count
)
448 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
452 now
= kvm_mips_count_time(vcpu
);
453 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
455 if (kvm_mips_count_disabled(vcpu
))
456 /* The timer's disabled, adjust the static count */
457 kvm_write_c0_guest_count(cop0
, count
);
460 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
464 * kvm_mips_init_count() - Initialise timer.
465 * @vcpu: Virtual CPU.
467 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
468 * it going if it's enabled.
470 void kvm_mips_init_count(struct kvm_vcpu
*vcpu
)
473 vcpu
->arch
.count_hz
= 100*1000*1000;
474 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32,
475 vcpu
->arch
.count_hz
);
476 vcpu
->arch
.count_dyn_bias
= 0;
479 kvm_mips_write_count(vcpu
, 0);
483 * kvm_mips_set_count_hz() - Update the frequency of the timer.
484 * @vcpu: Virtual CPU.
485 * @count_hz: Frequency of CP0_Count timer in Hz.
487 * Change the frequency of the CP0_Count timer. This is done atomically so that
488 * CP0_Count is continuous and no timer interrupt is lost.
490 * Returns: -EINVAL if @count_hz is out of range.
493 int kvm_mips_set_count_hz(struct kvm_vcpu
*vcpu
, s64 count_hz
)
495 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
500 /* ensure the frequency is in a sensible range... */
501 if (count_hz
<= 0 || count_hz
> NSEC_PER_SEC
)
503 /* ... and has actually changed */
504 if (vcpu
->arch
.count_hz
== count_hz
)
507 /* Safely freeze timer so we can keep it continuous */
508 dc
= kvm_mips_count_disabled(vcpu
);
510 now
= kvm_mips_count_time(vcpu
);
511 count
= kvm_read_c0_guest_count(cop0
);
513 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
516 /* Update the frequency */
517 vcpu
->arch
.count_hz
= count_hz
;
518 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32, count_hz
);
519 vcpu
->arch
.count_dyn_bias
= 0;
521 /* Calculate adjusted bias so dynamic count is unchanged */
522 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
524 /* Update and resume hrtimer */
526 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
531 * kvm_mips_write_compare() - Modify compare and update timer.
532 * @vcpu: Virtual CPU.
533 * @compare: New CP0_Compare value.
534 * @ack: Whether to acknowledge timer interrupt.
536 * Update CP0_Compare to a new value and update the timeout.
537 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
538 * any pending timer interrupt is preserved.
540 void kvm_mips_write_compare(struct kvm_vcpu
*vcpu
, u32 compare
, bool ack
)
542 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
544 u32 old_compare
= kvm_read_c0_guest_compare(cop0
);
548 /* if unchanged, must just be an ack */
549 if (old_compare
== compare
) {
552 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
553 kvm_write_c0_guest_compare(cop0
, compare
);
557 /* freeze_hrtimer() takes care of timer interrupts <= count */
558 dc
= kvm_mips_count_disabled(vcpu
);
560 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
563 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
565 kvm_write_c0_guest_compare(cop0
, compare
);
567 /* resume_hrtimer() takes care of timer interrupts > count */
569 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
573 * kvm_mips_count_disable() - Disable count.
574 * @vcpu: Virtual CPU.
576 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
577 * time will be handled but not after.
579 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
580 * count_ctl.DC has been set (count disabled).
582 * Returns: The time that the timer was stopped.
584 static ktime_t
kvm_mips_count_disable(struct kvm_vcpu
*vcpu
)
586 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
591 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
593 /* Set the static count from the dynamic count, handling pending TI */
595 count
= kvm_mips_read_count_running(vcpu
, now
);
596 kvm_write_c0_guest_count(cop0
, count
);
602 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
603 * @vcpu: Virtual CPU.
605 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
606 * before the final stop time will be handled if the timer isn't disabled by
607 * count_ctl.DC, but not after.
609 * Assumes CP0_Cause.DC is clear (count enabled).
611 void kvm_mips_count_disable_cause(struct kvm_vcpu
*vcpu
)
613 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
615 kvm_set_c0_guest_cause(cop0
, CAUSEF_DC
);
616 if (!(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
617 kvm_mips_count_disable(vcpu
);
621 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
622 * @vcpu: Virtual CPU.
624 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
625 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
626 * potentially before even returning, so the caller should be careful with
627 * ordering of CP0_Cause modifications so as not to lose it.
629 * Assumes CP0_Cause.DC is set (count disabled).
631 void kvm_mips_count_enable_cause(struct kvm_vcpu
*vcpu
)
633 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
636 kvm_clear_c0_guest_cause(cop0
, CAUSEF_DC
);
639 * Set the dynamic count to match the static count.
640 * This starts the hrtimer if count_ctl.DC allows it.
641 * Otherwise it conveniently updates the biases.
643 count
= kvm_read_c0_guest_count(cop0
);
644 kvm_mips_write_count(vcpu
, count
);
648 * kvm_mips_set_count_ctl() - Update the count control KVM register.
649 * @vcpu: Virtual CPU.
650 * @count_ctl: Count control register new value.
652 * Set the count control KVM register. The timer is updated accordingly.
654 * Returns: -EINVAL if reserved bits are set.
657 int kvm_mips_set_count_ctl(struct kvm_vcpu
*vcpu
, s64 count_ctl
)
659 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
660 s64 changed
= count_ctl
^ vcpu
->arch
.count_ctl
;
665 /* Only allow defined bits to be changed */
666 if (changed
& ~(s64
)(KVM_REG_MIPS_COUNT_CTL_DC
))
669 /* Apply new value */
670 vcpu
->arch
.count_ctl
= count_ctl
;
672 /* Master CP0_Count disable */
673 if (changed
& KVM_REG_MIPS_COUNT_CTL_DC
) {
674 /* Is CP0_Cause.DC already disabling CP0_Count? */
675 if (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
) {
676 if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
)
677 /* Just record the current time */
678 vcpu
->arch
.count_resume
= ktime_get();
679 } else if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) {
680 /* disable timer and record current time */
681 vcpu
->arch
.count_resume
= kvm_mips_count_disable(vcpu
);
684 * Calculate timeout relative to static count at resume
685 * time (wrap 0 to 2^32).
687 count
= kvm_read_c0_guest_count(cop0
);
688 compare
= kvm_read_c0_guest_compare(cop0
);
689 delta
= (u64
)(u32
)(compare
- count
- 1) + 1;
690 delta
= div_u64(delta
* NSEC_PER_SEC
,
691 vcpu
->arch
.count_hz
);
692 expire
= ktime_add_ns(vcpu
->arch
.count_resume
, delta
);
694 /* Handle pending interrupt */
696 if (ktime_compare(now
, expire
) >= 0)
697 /* Nothing should be waiting on the timeout */
698 kvm_mips_callbacks
->queue_timer_int(vcpu
);
700 /* Resume hrtimer without changing bias */
701 count
= kvm_mips_read_count_running(vcpu
, now
);
702 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
710 * kvm_mips_set_count_resume() - Update the count resume KVM register.
711 * @vcpu: Virtual CPU.
712 * @count_resume: Count resume register new value.
714 * Set the count resume KVM register.
716 * Returns: -EINVAL if out of valid range (0..now).
719 int kvm_mips_set_count_resume(struct kvm_vcpu
*vcpu
, s64 count_resume
)
722 * It doesn't make sense for the resume time to be in the future, as it
723 * would be possible for the next interrupt to be more than a full
724 * period in the future.
726 if (count_resume
< 0 || count_resume
> ktime_to_ns(ktime_get()))
729 vcpu
->arch
.count_resume
= ns_to_ktime(count_resume
);
734 * kvm_mips_count_timeout() - Push timer forward on timeout.
735 * @vcpu: Virtual CPU.
737 * Handle an hrtimer event by push the hrtimer forward a period.
739 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
741 enum hrtimer_restart
kvm_mips_count_timeout(struct kvm_vcpu
*vcpu
)
743 /* Add the Count period to the current expiry time */
744 hrtimer_add_expires_ns(&vcpu
->arch
.comparecount_timer
,
745 vcpu
->arch
.count_period
);
746 return HRTIMER_RESTART
;
749 enum emulation_result
kvm_mips_emul_eret(struct kvm_vcpu
*vcpu
)
751 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
752 enum emulation_result er
= EMULATE_DONE
;
754 if (kvm_read_c0_guest_status(cop0
) & ST0_EXL
) {
755 kvm_debug("[%#lx] ERET to %#lx\n", vcpu
->arch
.pc
,
756 kvm_read_c0_guest_epc(cop0
));
757 kvm_clear_c0_guest_status(cop0
, ST0_EXL
);
758 vcpu
->arch
.pc
= kvm_read_c0_guest_epc(cop0
);
760 } else if (kvm_read_c0_guest_status(cop0
) & ST0_ERL
) {
761 kvm_clear_c0_guest_status(cop0
, ST0_ERL
);
762 vcpu
->arch
.pc
= kvm_read_c0_guest_errorepc(cop0
);
764 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
772 enum emulation_result
kvm_mips_emul_wait(struct kvm_vcpu
*vcpu
)
774 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu
->arch
.pc
,
775 vcpu
->arch
.pending_exceptions
);
777 ++vcpu
->stat
.wait_exits
;
778 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_WAIT
);
779 if (!vcpu
->arch
.pending_exceptions
) {
781 kvm_vcpu_block(vcpu
);
784 * We we are runnable, then definitely go off to user space to
785 * check if any I/O interrupts are pending.
787 if (kvm_check_request(KVM_REQ_UNHALT
, vcpu
)) {
788 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
789 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
797 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
798 * we can catch this, if things ever change
800 enum emulation_result
kvm_mips_emul_tlbr(struct kvm_vcpu
*vcpu
)
802 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
803 unsigned long pc
= vcpu
->arch
.pc
;
805 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc
, kvm_read_c0_guest_index(cop0
));
809 /* Write Guest TLB Entry @ Index */
810 enum emulation_result
kvm_mips_emul_tlbwi(struct kvm_vcpu
*vcpu
)
812 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
813 int index
= kvm_read_c0_guest_index(cop0
);
814 struct kvm_mips_tlb
*tlb
= NULL
;
815 unsigned long pc
= vcpu
->arch
.pc
;
817 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
818 kvm_debug("%s: illegal index: %d\n", __func__
, index
);
819 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
820 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
821 kvm_read_c0_guest_entrylo0(cop0
),
822 kvm_read_c0_guest_entrylo1(cop0
),
823 kvm_read_c0_guest_pagemask(cop0
));
824 index
= (index
& ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE
;
827 tlb
= &vcpu
->arch
.guest_tlb
[index
];
829 * Probe the shadow host TLB for the entry being overwritten, if one
830 * matches, invalidate it
832 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
834 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
835 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
836 tlb
->tlb_lo
[0] = kvm_read_c0_guest_entrylo0(cop0
);
837 tlb
->tlb_lo
[1] = kvm_read_c0_guest_entrylo1(cop0
);
839 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
840 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
841 kvm_read_c0_guest_entrylo0(cop0
),
842 kvm_read_c0_guest_entrylo1(cop0
),
843 kvm_read_c0_guest_pagemask(cop0
));
848 /* Write Guest TLB Entry @ Random Index */
849 enum emulation_result
kvm_mips_emul_tlbwr(struct kvm_vcpu
*vcpu
)
851 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
852 struct kvm_mips_tlb
*tlb
= NULL
;
853 unsigned long pc
= vcpu
->arch
.pc
;
856 get_random_bytes(&index
, sizeof(index
));
857 index
&= (KVM_MIPS_GUEST_TLB_SIZE
- 1);
859 tlb
= &vcpu
->arch
.guest_tlb
[index
];
862 * Probe the shadow host TLB for the entry being overwritten, if one
863 * matches, invalidate it
865 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
867 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
868 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
869 tlb
->tlb_lo
[0] = kvm_read_c0_guest_entrylo0(cop0
);
870 tlb
->tlb_lo
[1] = kvm_read_c0_guest_entrylo1(cop0
);
872 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
873 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
874 kvm_read_c0_guest_entrylo0(cop0
),
875 kvm_read_c0_guest_entrylo1(cop0
));
880 enum emulation_result
kvm_mips_emul_tlbp(struct kvm_vcpu
*vcpu
)
882 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
883 long entryhi
= kvm_read_c0_guest_entryhi(cop0
);
884 unsigned long pc
= vcpu
->arch
.pc
;
887 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
889 kvm_write_c0_guest_index(cop0
, index
);
891 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc
, entryhi
,
898 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
899 * @vcpu: Virtual CPU.
901 * Finds the mask of bits which are writable in the guest's Config1 CP0
902 * register, by userland (currently read-only to the guest).
904 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu
*vcpu
)
906 unsigned int mask
= 0;
908 /* Permit FPU to be present if FPU is supported */
909 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
))
910 mask
|= MIPS_CONF1_FP
;
916 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
917 * @vcpu: Virtual CPU.
919 * Finds the mask of bits which are writable in the guest's Config3 CP0
920 * register, by userland (currently read-only to the guest).
922 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu
*vcpu
)
924 /* Config4 is optional */
925 unsigned int mask
= MIPS_CONF_M
;
927 /* Permit MSA to be present if MSA is supported */
928 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
))
929 mask
|= MIPS_CONF3_MSA
;
935 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
936 * @vcpu: Virtual CPU.
938 * Finds the mask of bits which are writable in the guest's Config4 CP0
939 * register, by userland (currently read-only to the guest).
941 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu
*vcpu
)
943 /* Config5 is optional */
948 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
949 * @vcpu: Virtual CPU.
951 * Finds the mask of bits which are writable in the guest's Config5 CP0
952 * register, by the guest itself.
954 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu
*vcpu
)
956 unsigned int mask
= 0;
958 /* Permit MSAEn changes if MSA supported and enabled */
959 if (kvm_mips_guest_has_msa(&vcpu
->arch
))
960 mask
|= MIPS_CONF5_MSAEN
;
963 * Permit guest FPU mode changes if FPU is enabled and the relevant
964 * feature exists according to FIR register.
966 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
968 mask
|= MIPS_CONF5_FRE
;
969 /* We don't support UFR or UFE */
975 enum emulation_result
kvm_mips_emulate_CP0(union mips_instruction inst
,
978 struct kvm_vcpu
*vcpu
)
980 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
981 enum emulation_result er
= EMULATE_DONE
;
983 unsigned long curr_pc
;
986 * Update PC and hold onto current PC in case there is
987 * an error and we want to rollback the PC
989 curr_pc
= vcpu
->arch
.pc
;
990 er
= update_pc(vcpu
, cause
);
991 if (er
== EMULATE_FAIL
)
994 if (inst
.co_format
.co
) {
995 switch (inst
.co_format
.func
) {
996 case tlbr_op
: /* Read indexed TLB entry */
997 er
= kvm_mips_emul_tlbr(vcpu
);
999 case tlbwi_op
: /* Write indexed */
1000 er
= kvm_mips_emul_tlbwi(vcpu
);
1002 case tlbwr_op
: /* Write random */
1003 er
= kvm_mips_emul_tlbwr(vcpu
);
1005 case tlbp_op
: /* TLB Probe */
1006 er
= kvm_mips_emul_tlbp(vcpu
);
1009 kvm_err("!!!COP0_RFE!!!\n");
1012 er
= kvm_mips_emul_eret(vcpu
);
1013 goto dont_update_pc
;
1015 er
= kvm_mips_emul_wait(vcpu
);
1019 rt
= inst
.c0r_format
.rt
;
1020 rd
= inst
.c0r_format
.rd
;
1021 sel
= inst
.c0r_format
.sel
;
1023 switch (inst
.c0r_format
.rs
) {
1025 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1026 cop0
->stat
[rd
][sel
]++;
1029 if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1030 vcpu
->arch
.gprs
[rt
] = kvm_mips_read_count(vcpu
);
1031 } else if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
1032 vcpu
->arch
.gprs
[rt
] = 0x0;
1033 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1034 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1037 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
1039 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1040 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1044 trace_kvm_hwr(vcpu
, KVM_TRACE_MFC0
,
1045 KVM_TRACE_COP0(rd
, sel
),
1046 vcpu
->arch
.gprs
[rt
]);
1050 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
1052 trace_kvm_hwr(vcpu
, KVM_TRACE_DMFC0
,
1053 KVM_TRACE_COP0(rd
, sel
),
1054 vcpu
->arch
.gprs
[rt
]);
1058 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1059 cop0
->stat
[rd
][sel
]++;
1061 trace_kvm_hwr(vcpu
, KVM_TRACE_MTC0
,
1062 KVM_TRACE_COP0(rd
, sel
),
1063 vcpu
->arch
.gprs
[rt
]);
1065 if ((rd
== MIPS_CP0_TLB_INDEX
)
1066 && (vcpu
->arch
.gprs
[rt
] >=
1067 KVM_MIPS_GUEST_TLB_SIZE
)) {
1068 kvm_err("Invalid TLB Index: %ld",
1069 vcpu
->arch
.gprs
[rt
]);
1073 #define C0_EBASE_CORE_MASK 0xff
1074 if ((rd
== MIPS_CP0_PRID
) && (sel
== 1)) {
1075 /* Preserve CORE number */
1076 kvm_change_c0_guest_ebase(cop0
,
1077 ~(C0_EBASE_CORE_MASK
),
1078 vcpu
->arch
.gprs
[rt
]);
1079 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1080 kvm_read_c0_guest_ebase(cop0
));
1081 } else if (rd
== MIPS_CP0_TLB_HI
&& sel
== 0) {
1083 vcpu
->arch
.gprs
[rt
] & KVM_ENTRYHI_ASID
;
1084 if ((KSEGX(vcpu
->arch
.gprs
[rt
]) != CKSEG0
) &&
1085 ((kvm_read_c0_guest_entryhi(cop0
) &
1086 KVM_ENTRYHI_ASID
) != nasid
)) {
1087 trace_kvm_asid_change(vcpu
,
1088 kvm_read_c0_guest_entryhi(cop0
)
1092 /* Blow away the shadow host TLBs */
1093 kvm_mips_flush_host_tlb(1);
1095 kvm_write_c0_guest_entryhi(cop0
,
1096 vcpu
->arch
.gprs
[rt
]);
1098 /* Are we writing to COUNT */
1099 else if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1100 kvm_mips_write_count(vcpu
, vcpu
->arch
.gprs
[rt
]);
1102 } else if ((rd
== MIPS_CP0_COMPARE
) && (sel
== 0)) {
1103 /* If we are writing to COMPARE */
1104 /* Clear pending timer interrupt, if any */
1105 kvm_mips_write_compare(vcpu
,
1106 vcpu
->arch
.gprs
[rt
],
1108 } else if ((rd
== MIPS_CP0_STATUS
) && (sel
== 0)) {
1109 unsigned int old_val
, val
, change
;
1111 old_val
= kvm_read_c0_guest_status(cop0
);
1112 val
= vcpu
->arch
.gprs
[rt
];
1113 change
= val
^ old_val
;
1115 /* Make sure that the NMI bit is never set */
1119 * Don't allow CU1 or FR to be set unless FPU
1120 * capability enabled and exists in guest
1123 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1124 val
&= ~(ST0_CU1
| ST0_FR
);
1127 * Also don't allow FR to be set if host doesn't
1130 if (!(current_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
1134 /* Handle changes in FPU mode */
1138 * FPU and Vector register state is made
1139 * UNPREDICTABLE by a change of FR, so don't
1140 * even bother saving it.
1142 if (change
& ST0_FR
)
1146 * If MSA state is already live, it is undefined
1147 * how it interacts with FR=0 FPU state, and we
1148 * don't want to hit reserved instruction
1149 * exceptions trying to save the MSA state later
1150 * when CU=1 && FR=1, so play it safe and save
1153 if (change
& ST0_CU1
&& !(val
& ST0_FR
) &&
1154 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1158 * Propagate CU1 (FPU enable) changes
1159 * immediately if the FPU context is already
1160 * loaded. When disabling we leave the context
1161 * loaded so it can be quickly enabled again in
1164 if (change
& ST0_CU1
&&
1165 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)
1166 change_c0_status(ST0_CU1
, val
);
1170 kvm_write_c0_guest_status(cop0
, val
);
1172 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1174 * If FPU present, we need CU1/FR bits to take
1175 * effect fairly soon.
1177 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1178 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1180 } else if ((rd
== MIPS_CP0_CONFIG
) && (sel
== 5)) {
1181 unsigned int old_val
, val
, change
, wrmask
;
1183 old_val
= kvm_read_c0_guest_config5(cop0
);
1184 val
= vcpu
->arch
.gprs
[rt
];
1186 /* Only a few bits are writable in Config5 */
1187 wrmask
= kvm_mips_config5_wrmask(vcpu
);
1188 change
= (val
^ old_val
) & wrmask
;
1189 val
= old_val
^ change
;
1192 /* Handle changes in FPU/MSA modes */
1196 * Propagate FRE changes immediately if the FPU
1197 * context is already loaded.
1199 if (change
& MIPS_CONF5_FRE
&&
1200 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_FPU
)
1201 change_c0_config5(MIPS_CONF5_FRE
, val
);
1204 * Propagate MSAEn changes immediately if the
1205 * MSA context is already loaded. When disabling
1206 * we leave the context loaded so it can be
1207 * quickly enabled again in the near future.
1209 if (change
& MIPS_CONF5_MSAEN
&&
1210 vcpu
->arch
.aux_inuse
& KVM_MIPS_AUX_MSA
)
1211 change_c0_config5(MIPS_CONF5_MSAEN
,
1216 kvm_write_c0_guest_config5(cop0
, val
);
1217 } else if ((rd
== MIPS_CP0_CAUSE
) && (sel
== 0)) {
1218 u32 old_cause
, new_cause
;
1220 old_cause
= kvm_read_c0_guest_cause(cop0
);
1221 new_cause
= vcpu
->arch
.gprs
[rt
];
1222 /* Update R/W bits */
1223 kvm_change_c0_guest_cause(cop0
, 0x08800300,
1225 /* DC bit enabling/disabling timer? */
1226 if ((old_cause
^ new_cause
) & CAUSEF_DC
) {
1227 if (new_cause
& CAUSEF_DC
)
1228 kvm_mips_count_disable_cause(vcpu
);
1230 kvm_mips_count_enable_cause(vcpu
);
1233 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
];
1234 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1235 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1241 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1242 vcpu
->arch
.pc
, rt
, rd
, sel
);
1243 trace_kvm_hwr(vcpu
, KVM_TRACE_DMTC0
,
1244 KVM_TRACE_COP0(rd
, sel
),
1245 vcpu
->arch
.gprs
[rt
]);
1250 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1251 cop0
->stat
[MIPS_CP0_STATUS
][0]++;
1254 vcpu
->arch
.gprs
[rt
] =
1255 kvm_read_c0_guest_status(cop0
);
1257 if (inst
.mfmc0_format
.sc
) {
1258 kvm_debug("[%#lx] mfmc0_op: EI\n",
1260 kvm_set_c0_guest_status(cop0
, ST0_IE
);
1262 kvm_debug("[%#lx] mfmc0_op: DI\n",
1264 kvm_clear_c0_guest_status(cop0
, ST0_IE
);
1271 u32 css
= cop0
->reg
[MIPS_CP0_STATUS
][2] & 0xf;
1273 (cop0
->reg
[MIPS_CP0_STATUS
][2] >> 6) & 0xf;
1275 * We don't support any shadow register sets, so
1276 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1282 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss
, rd
,
1283 vcpu
->arch
.gprs
[rt
]);
1284 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.gprs
[rt
];
1288 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1289 vcpu
->arch
.pc
, inst
.c0r_format
.rs
);
1296 /* Rollback PC only if emulation was unsuccessful */
1297 if (er
== EMULATE_FAIL
)
1298 vcpu
->arch
.pc
= curr_pc
;
1302 * This is for special instructions whose emulation
1303 * updates the PC, so do not overwrite the PC under
1310 enum emulation_result
kvm_mips_emulate_store(union mips_instruction inst
,
1312 struct kvm_run
*run
,
1313 struct kvm_vcpu
*vcpu
)
1315 enum emulation_result er
= EMULATE_DO_MMIO
;
1318 void *data
= run
->mmio
.data
;
1319 unsigned long curr_pc
;
1322 * Update PC and hold onto current PC in case there is
1323 * an error and we want to rollback the PC
1325 curr_pc
= vcpu
->arch
.pc
;
1326 er
= update_pc(vcpu
, cause
);
1327 if (er
== EMULATE_FAIL
)
1330 rt
= inst
.i_format
.rt
;
1332 switch (inst
.i_format
.opcode
) {
1335 if (bytes
> sizeof(run
->mmio
.data
)) {
1336 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1339 run
->mmio
.phys_addr
=
1340 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1342 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1346 run
->mmio
.len
= bytes
;
1347 run
->mmio
.is_write
= 1;
1348 vcpu
->mmio_needed
= 1;
1349 vcpu
->mmio_is_write
= 1;
1350 *(u8
*) data
= vcpu
->arch
.gprs
[rt
];
1351 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1352 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.gprs
[rt
],
1359 if (bytes
> sizeof(run
->mmio
.data
)) {
1360 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1363 run
->mmio
.phys_addr
=
1364 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1366 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1371 run
->mmio
.len
= bytes
;
1372 run
->mmio
.is_write
= 1;
1373 vcpu
->mmio_needed
= 1;
1374 vcpu
->mmio_is_write
= 1;
1375 *(u32
*) data
= vcpu
->arch
.gprs
[rt
];
1377 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1378 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1379 vcpu
->arch
.gprs
[rt
], *(u32
*) data
);
1384 if (bytes
> sizeof(run
->mmio
.data
)) {
1385 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1388 run
->mmio
.phys_addr
=
1389 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1391 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1396 run
->mmio
.len
= bytes
;
1397 run
->mmio
.is_write
= 1;
1398 vcpu
->mmio_needed
= 1;
1399 vcpu
->mmio_is_write
= 1;
1400 *(u16
*) data
= vcpu
->arch
.gprs
[rt
];
1402 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1403 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1404 vcpu
->arch
.gprs
[rt
], *(u32
*) data
);
1408 kvm_err("Store not yet supported (inst=0x%08x)\n",
1414 /* Rollback PC if emulation was unsuccessful */
1415 if (er
== EMULATE_FAIL
)
1416 vcpu
->arch
.pc
= curr_pc
;
1421 enum emulation_result
kvm_mips_emulate_load(union mips_instruction inst
,
1422 u32 cause
, struct kvm_run
*run
,
1423 struct kvm_vcpu
*vcpu
)
1425 enum emulation_result er
= EMULATE_DO_MMIO
;
1429 rt
= inst
.i_format
.rt
;
1430 op
= inst
.i_format
.opcode
;
1432 vcpu
->arch
.pending_load_cause
= cause
;
1433 vcpu
->arch
.io_gpr
= rt
;
1438 if (bytes
> sizeof(run
->mmio
.data
)) {
1439 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1444 run
->mmio
.phys_addr
=
1445 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1447 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1452 run
->mmio
.len
= bytes
;
1453 run
->mmio
.is_write
= 0;
1454 vcpu
->mmio_needed
= 1;
1455 vcpu
->mmio_is_write
= 0;
1461 if (bytes
> sizeof(run
->mmio
.data
)) {
1462 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1467 run
->mmio
.phys_addr
=
1468 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1470 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1475 run
->mmio
.len
= bytes
;
1476 run
->mmio
.is_write
= 0;
1477 vcpu
->mmio_needed
= 1;
1478 vcpu
->mmio_is_write
= 0;
1481 vcpu
->mmio_needed
= 2;
1483 vcpu
->mmio_needed
= 1;
1490 if (bytes
> sizeof(run
->mmio
.data
)) {
1491 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1496 run
->mmio
.phys_addr
=
1497 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1499 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1504 run
->mmio
.len
= bytes
;
1505 run
->mmio
.is_write
= 0;
1506 vcpu
->mmio_is_write
= 0;
1509 vcpu
->mmio_needed
= 2;
1511 vcpu
->mmio_needed
= 1;
1516 kvm_err("Load not yet supported (inst=0x%08x)\n",
1525 enum emulation_result
kvm_mips_emulate_cache(union mips_instruction inst
,
1526 u32
*opc
, u32 cause
,
1527 struct kvm_run
*run
,
1528 struct kvm_vcpu
*vcpu
)
1530 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1531 enum emulation_result er
= EMULATE_DONE
;
1532 u32 cache
, op_inst
, op
, base
;
1534 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1536 unsigned long curr_pc
;
1539 * Update PC and hold onto current PC in case there is
1540 * an error and we want to rollback the PC
1542 curr_pc
= vcpu
->arch
.pc
;
1543 er
= update_pc(vcpu
, cause
);
1544 if (er
== EMULATE_FAIL
)
1547 base
= inst
.i_format
.rs
;
1548 op_inst
= inst
.i_format
.rt
;
1549 offset
= inst
.i_format
.simmediate
;
1550 cache
= op_inst
& CacheOp_Cache
;
1551 op
= op_inst
& CacheOp_Op
;
1553 va
= arch
->gprs
[base
] + offset
;
1555 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1556 cache
, op
, base
, arch
->gprs
[base
], offset
);
1559 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1560 * invalidate the caches entirely by stepping through all the
1563 if (op
== Index_Writeback_Inv
) {
1564 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1565 vcpu
->arch
.pc
, vcpu
->arch
.gprs
[31], cache
, op
, base
,
1566 arch
->gprs
[base
], offset
);
1568 if (cache
== Cache_D
)
1570 else if (cache
== Cache_I
)
1573 kvm_err("%s: unsupported CACHE INDEX operation\n",
1575 return EMULATE_FAIL
;
1578 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1579 kvm_mips_trans_cache_index(inst
, opc
, vcpu
);
1585 if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
1586 if (kvm_mips_host_tlb_lookup(vcpu
, va
) < 0)
1587 kvm_mips_handle_kseg0_tlb_fault(va
, vcpu
);
1588 } else if ((KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
) ||
1589 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
1592 /* If an entry already exists then skip */
1593 if (kvm_mips_host_tlb_lookup(vcpu
, va
) >= 0)
1597 * If address not in the guest TLB, then give the guest a fault,
1598 * the resulting handler will do the right thing
1600 index
= kvm_mips_guest_tlb_lookup(vcpu
, (va
& VPN2_MASK
) |
1601 (kvm_read_c0_guest_entryhi
1602 (cop0
) & KVM_ENTRYHI_ASID
));
1605 vcpu
->arch
.host_cp0_badvaddr
= va
;
1606 vcpu
->arch
.pc
= curr_pc
;
1607 er
= kvm_mips_emulate_tlbmiss_ld(cause
, NULL
, run
,
1610 goto dont_update_pc
;
1612 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1614 * Check if the entry is valid, if not then setup a TLB
1615 * invalid exception to the guest
1617 if (!TLB_IS_VALID(*tlb
, va
)) {
1618 vcpu
->arch
.host_cp0_badvaddr
= va
;
1619 vcpu
->arch
.pc
= curr_pc
;
1620 er
= kvm_mips_emulate_tlbinv_ld(cause
, NULL
,
1623 goto dont_update_pc
;
1626 * We fault an entry from the guest tlb to the
1629 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
);
1633 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1634 cache
, op
, base
, arch
->gprs
[base
], offset
);
1642 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1643 if (op_inst
== Hit_Writeback_Inv_D
|| op_inst
== Hit_Invalidate_D
) {
1644 flush_dcache_line(va
);
1646 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1648 * Replace the CACHE instruction, with a SYNCI, not the same,
1651 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1653 } else if (op_inst
== Hit_Invalidate_I
) {
1654 flush_dcache_line(va
);
1655 flush_icache_line(va
);
1657 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1658 /* Replace the CACHE instruction, with a SYNCI */
1659 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1662 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1663 cache
, op
, base
, arch
->gprs
[base
], offset
);
1669 /* Rollback PC only if emulation was unsuccessful */
1670 if (er
== EMULATE_FAIL
)
1671 vcpu
->arch
.pc
= curr_pc
;
1675 * This is for exceptions whose emulation updates the PC, so do not
1676 * overwrite the PC under any circumstances
1682 enum emulation_result
kvm_mips_emulate_inst(u32 cause
, u32
*opc
,
1683 struct kvm_run
*run
,
1684 struct kvm_vcpu
*vcpu
)
1686 union mips_instruction inst
;
1687 enum emulation_result er
= EMULATE_DONE
;
1689 /* Fetch the instruction. */
1690 if (cause
& CAUSEF_BD
)
1693 inst
.word
= kvm_get_inst(opc
, vcpu
);
1695 switch (inst
.r_format
.opcode
) {
1697 er
= kvm_mips_emulate_CP0(inst
, opc
, cause
, run
, vcpu
);
1702 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
1709 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
1713 ++vcpu
->stat
.cache_exits
;
1714 trace_kvm_exit(vcpu
, KVM_TRACE_EXIT_CACHE
);
1715 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
, vcpu
);
1719 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc
,
1721 kvm_arch_vcpu_dump_regs(vcpu
);
1729 enum emulation_result
kvm_mips_emulate_syscall(u32 cause
,
1731 struct kvm_run
*run
,
1732 struct kvm_vcpu
*vcpu
)
1734 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1735 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1736 enum emulation_result er
= EMULATE_DONE
;
1738 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1740 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1741 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1743 if (cause
& CAUSEF_BD
)
1744 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1746 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1748 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch
->pc
);
1750 kvm_change_c0_guest_cause(cop0
, (0xff),
1751 (EXCCODE_SYS
<< CAUSEB_EXCCODE
));
1753 /* Set PC to the exception entry point */
1754 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1757 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1764 enum emulation_result
kvm_mips_emulate_tlbmiss_ld(u32 cause
,
1766 struct kvm_run
*run
,
1767 struct kvm_vcpu
*vcpu
)
1769 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1770 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1771 unsigned long entryhi
= (vcpu
->arch
. host_cp0_badvaddr
& VPN2_MASK
) |
1772 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1774 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1776 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1777 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1779 if (cause
& CAUSEF_BD
)
1780 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1782 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1784 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1787 /* set pc to the exception entry point */
1788 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1791 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1794 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1797 kvm_change_c0_guest_cause(cop0
, (0xff),
1798 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
1800 /* setup badvaddr, context and entryhi registers for the guest */
1801 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1802 /* XXXKYMA: is the context register used by linux??? */
1803 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1804 /* Blow away the shadow host TLBs */
1805 kvm_mips_flush_host_tlb(1);
1807 return EMULATE_DONE
;
1810 enum emulation_result
kvm_mips_emulate_tlbinv_ld(u32 cause
,
1812 struct kvm_run
*run
,
1813 struct kvm_vcpu
*vcpu
)
1815 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1816 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1817 unsigned long entryhi
=
1818 (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1819 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1821 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1823 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1824 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1826 if (cause
& CAUSEF_BD
)
1827 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1829 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1831 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1834 /* set pc to the exception entry point */
1835 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1838 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1840 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1843 kvm_change_c0_guest_cause(cop0
, (0xff),
1844 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
1846 /* setup badvaddr, context and entryhi registers for the guest */
1847 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1848 /* XXXKYMA: is the context register used by linux??? */
1849 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1850 /* Blow away the shadow host TLBs */
1851 kvm_mips_flush_host_tlb(1);
1853 return EMULATE_DONE
;
1856 enum emulation_result
kvm_mips_emulate_tlbmiss_st(u32 cause
,
1858 struct kvm_run
*run
,
1859 struct kvm_vcpu
*vcpu
)
1861 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1862 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1863 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1864 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1866 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1868 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1869 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1871 if (cause
& CAUSEF_BD
)
1872 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1874 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1876 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1879 /* Set PC to the exception entry point */
1880 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1882 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1884 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1887 kvm_change_c0_guest_cause(cop0
, (0xff),
1888 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
1890 /* setup badvaddr, context and entryhi registers for the guest */
1891 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1892 /* XXXKYMA: is the context register used by linux??? */
1893 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1894 /* Blow away the shadow host TLBs */
1895 kvm_mips_flush_host_tlb(1);
1897 return EMULATE_DONE
;
1900 enum emulation_result
kvm_mips_emulate_tlbinv_st(u32 cause
,
1902 struct kvm_run
*run
,
1903 struct kvm_vcpu
*vcpu
)
1905 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1906 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1907 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1908 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1910 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1912 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1913 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1915 if (cause
& CAUSEF_BD
)
1916 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1918 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1920 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1923 /* Set PC to the exception entry point */
1924 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1926 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1928 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1931 kvm_change_c0_guest_cause(cop0
, (0xff),
1932 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
1934 /* setup badvaddr, context and entryhi registers for the guest */
1935 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1936 /* XXXKYMA: is the context register used by linux??? */
1937 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1938 /* Blow away the shadow host TLBs */
1939 kvm_mips_flush_host_tlb(1);
1941 return EMULATE_DONE
;
1944 /* TLBMOD: store into address matching TLB with Dirty bit off */
1945 enum emulation_result
kvm_mips_handle_tlbmod(u32 cause
, u32
*opc
,
1946 struct kvm_run
*run
,
1947 struct kvm_vcpu
*vcpu
)
1949 enum emulation_result er
= EMULATE_DONE
;
1951 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1952 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1953 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1956 /* If address not in the guest TLB, then we are in trouble */
1957 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
1959 /* XXXKYMA Invalidate and retry */
1960 kvm_mips_host_tlb_inv(vcpu
, vcpu
->arch
.host_cp0_badvaddr
);
1961 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1963 kvm_mips_dump_guest_tlbs(vcpu
);
1964 kvm_mips_dump_host_tlbs();
1965 return EMULATE_FAIL
;
1969 er
= kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
1973 enum emulation_result
kvm_mips_emulate_tlbmod(u32 cause
,
1975 struct kvm_run
*run
,
1976 struct kvm_vcpu
*vcpu
)
1978 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1979 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1980 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1981 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1983 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1985 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1986 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1988 if (cause
& CAUSEF_BD
)
1989 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1991 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1993 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1996 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1998 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2000 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2003 kvm_change_c0_guest_cause(cop0
, (0xff),
2004 (EXCCODE_MOD
<< CAUSEB_EXCCODE
));
2006 /* setup badvaddr, context and entryhi registers for the guest */
2007 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2008 /* XXXKYMA: is the context register used by linux??? */
2009 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2010 /* Blow away the shadow host TLBs */
2011 kvm_mips_flush_host_tlb(1);
2013 return EMULATE_DONE
;
2016 enum emulation_result
kvm_mips_emulate_fpu_exc(u32 cause
,
2018 struct kvm_run
*run
,
2019 struct kvm_vcpu
*vcpu
)
2021 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2022 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2024 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2026 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2027 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2029 if (cause
& CAUSEF_BD
)
2030 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2032 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2036 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2038 kvm_change_c0_guest_cause(cop0
, (0xff),
2039 (EXCCODE_CPU
<< CAUSEB_EXCCODE
));
2040 kvm_change_c0_guest_cause(cop0
, (CAUSEF_CE
), (0x1 << CAUSEB_CE
));
2042 return EMULATE_DONE
;
2045 enum emulation_result
kvm_mips_emulate_ri_exc(u32 cause
,
2047 struct kvm_run
*run
,
2048 struct kvm_vcpu
*vcpu
)
2050 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2051 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2052 enum emulation_result er
= EMULATE_DONE
;
2054 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2056 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2057 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2059 if (cause
& CAUSEF_BD
)
2060 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2062 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2064 kvm_debug("Delivering RI @ pc %#lx\n", arch
->pc
);
2066 kvm_change_c0_guest_cause(cop0
, (0xff),
2067 (EXCCODE_RI
<< CAUSEB_EXCCODE
));
2069 /* Set PC to the exception entry point */
2070 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2073 kvm_err("Trying to deliver RI when EXL is already set\n");
2080 enum emulation_result
kvm_mips_emulate_bp_exc(u32 cause
,
2082 struct kvm_run
*run
,
2083 struct kvm_vcpu
*vcpu
)
2085 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2086 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2087 enum emulation_result er
= EMULATE_DONE
;
2089 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2091 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2092 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2094 if (cause
& CAUSEF_BD
)
2095 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2097 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2099 kvm_debug("Delivering BP @ pc %#lx\n", arch
->pc
);
2101 kvm_change_c0_guest_cause(cop0
, (0xff),
2102 (EXCCODE_BP
<< CAUSEB_EXCCODE
));
2104 /* Set PC to the exception entry point */
2105 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2108 kvm_err("Trying to deliver BP when EXL is already set\n");
2115 enum emulation_result
kvm_mips_emulate_trap_exc(u32 cause
,
2117 struct kvm_run
*run
,
2118 struct kvm_vcpu
*vcpu
)
2120 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2121 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2122 enum emulation_result er
= EMULATE_DONE
;
2124 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2126 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2127 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2129 if (cause
& CAUSEF_BD
)
2130 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2132 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2134 kvm_debug("Delivering TRAP @ pc %#lx\n", arch
->pc
);
2136 kvm_change_c0_guest_cause(cop0
, (0xff),
2137 (EXCCODE_TR
<< CAUSEB_EXCCODE
));
2139 /* Set PC to the exception entry point */
2140 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2143 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2150 enum emulation_result
kvm_mips_emulate_msafpe_exc(u32 cause
,
2152 struct kvm_run
*run
,
2153 struct kvm_vcpu
*vcpu
)
2155 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2156 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2157 enum emulation_result er
= EMULATE_DONE
;
2159 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2161 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2162 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2164 if (cause
& CAUSEF_BD
)
2165 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2167 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2169 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch
->pc
);
2171 kvm_change_c0_guest_cause(cop0
, (0xff),
2172 (EXCCODE_MSAFPE
<< CAUSEB_EXCCODE
));
2174 /* Set PC to the exception entry point */
2175 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2178 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2185 enum emulation_result
kvm_mips_emulate_fpe_exc(u32 cause
,
2187 struct kvm_run
*run
,
2188 struct kvm_vcpu
*vcpu
)
2190 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2191 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2192 enum emulation_result er
= EMULATE_DONE
;
2194 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2196 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2197 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2199 if (cause
& CAUSEF_BD
)
2200 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2202 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2204 kvm_debug("Delivering FPE @ pc %#lx\n", arch
->pc
);
2206 kvm_change_c0_guest_cause(cop0
, (0xff),
2207 (EXCCODE_FPE
<< CAUSEB_EXCCODE
));
2209 /* Set PC to the exception entry point */
2210 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2213 kvm_err("Trying to deliver FPE when EXL is already set\n");
2220 enum emulation_result
kvm_mips_emulate_msadis_exc(u32 cause
,
2222 struct kvm_run
*run
,
2223 struct kvm_vcpu
*vcpu
)
2225 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2226 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2227 enum emulation_result er
= EMULATE_DONE
;
2229 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2231 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2232 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2234 if (cause
& CAUSEF_BD
)
2235 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2237 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2239 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch
->pc
);
2241 kvm_change_c0_guest_cause(cop0
, (0xff),
2242 (EXCCODE_MSADIS
<< CAUSEB_EXCCODE
));
2244 /* Set PC to the exception entry point */
2245 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2248 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2255 enum emulation_result
kvm_mips_handle_ri(u32 cause
, u32
*opc
,
2256 struct kvm_run
*run
,
2257 struct kvm_vcpu
*vcpu
)
2259 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2260 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2261 enum emulation_result er
= EMULATE_DONE
;
2262 unsigned long curr_pc
;
2263 union mips_instruction inst
;
2266 * Update PC and hold onto current PC in case there is
2267 * an error and we want to rollback the PC
2269 curr_pc
= vcpu
->arch
.pc
;
2270 er
= update_pc(vcpu
, cause
);
2271 if (er
== EMULATE_FAIL
)
2274 /* Fetch the instruction. */
2275 if (cause
& CAUSEF_BD
)
2278 inst
.word
= kvm_get_inst(opc
, vcpu
);
2280 if (inst
.word
== KVM_INVALID_INST
) {
2281 kvm_err("%s: Cannot get inst @ %p\n", __func__
, opc
);
2282 return EMULATE_FAIL
;
2285 if (inst
.r_format
.opcode
== spec3_op
&&
2286 inst
.r_format
.func
== rdhwr_op
) {
2287 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2288 int rd
= inst
.r_format
.rd
;
2289 int rt
= inst
.r_format
.rt
;
2290 int sel
= inst
.r_format
.re
& 0x7;
2292 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2293 if (usermode
&& !(kvm_read_c0_guest_hwrena(cop0
) & BIT(rd
))) {
2294 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2299 case MIPS_HWR_CPUNUM
: /* CPU number */
2302 case MIPS_HWR_SYNCISTEP
: /* SYNCI length */
2303 arch
->gprs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
2304 current_cpu_data
.icache
.linesz
);
2306 case MIPS_HWR_CC
: /* Read count register */
2307 arch
->gprs
[rt
] = kvm_mips_read_count(vcpu
);
2309 case MIPS_HWR_CCRES
: /* Count register resolution */
2310 switch (current_cpu_data
.cputype
) {
2319 case MIPS_HWR_ULR
: /* Read UserLocal register */
2320 arch
->gprs
[rt
] = kvm_read_c0_guest_userlocal(cop0
);
2324 kvm_debug("RDHWR %#x not supported @ %p\n", rd
, opc
);
2328 trace_kvm_hwr(vcpu
, KVM_TRACE_RDHWR
, KVM_TRACE_HWR(rd
, sel
),
2329 vcpu
->arch
.gprs
[rt
]);
2331 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2336 return EMULATE_DONE
;
2340 * Rollback PC (if in branch delay slot then the PC already points to
2341 * branch target), and pass the RI exception to the guest OS.
2343 vcpu
->arch
.pc
= curr_pc
;
2344 return kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
2347 enum emulation_result
kvm_mips_complete_mmio_load(struct kvm_vcpu
*vcpu
,
2348 struct kvm_run
*run
)
2350 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
2351 enum emulation_result er
= EMULATE_DONE
;
2353 if (run
->mmio
.len
> sizeof(*gpr
)) {
2354 kvm_err("Bad MMIO length: %d", run
->mmio
.len
);
2359 er
= update_pc(vcpu
, vcpu
->arch
.pending_load_cause
);
2360 if (er
== EMULATE_FAIL
)
2363 switch (run
->mmio
.len
) {
2365 *gpr
= *(s32
*) run
->mmio
.data
;
2369 if (vcpu
->mmio_needed
== 2)
2370 *gpr
= *(s16
*) run
->mmio
.data
;
2372 *gpr
= *(u16
*)run
->mmio
.data
;
2376 if (vcpu
->mmio_needed
== 2)
2377 *gpr
= *(s8
*) run
->mmio
.data
;
2379 *gpr
= *(u8
*) run
->mmio
.data
;
2383 if (vcpu
->arch
.pending_load_cause
& CAUSEF_BD
)
2384 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2385 vcpu
->arch
.pc
, run
->mmio
.len
, vcpu
->arch
.io_gpr
, *gpr
,
2392 static enum emulation_result
kvm_mips_emulate_exc(u32 cause
,
2394 struct kvm_run
*run
,
2395 struct kvm_vcpu
*vcpu
)
2397 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2398 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2399 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2400 enum emulation_result er
= EMULATE_DONE
;
2402 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2404 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2405 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2407 if (cause
& CAUSEF_BD
)
2408 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2410 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2412 kvm_change_c0_guest_cause(cop0
, (0xff),
2413 (exccode
<< CAUSEB_EXCCODE
));
2415 /* Set PC to the exception entry point */
2416 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2417 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2419 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2420 exccode
, kvm_read_c0_guest_epc(cop0
),
2421 kvm_read_c0_guest_badvaddr(cop0
));
2423 kvm_err("Trying to deliver EXC when EXL is already set\n");
2430 enum emulation_result
kvm_mips_check_privilege(u32 cause
,
2432 struct kvm_run
*run
,
2433 struct kvm_vcpu
*vcpu
)
2435 enum emulation_result er
= EMULATE_DONE
;
2436 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2437 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
2439 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2448 case EXCCODE_MSAFPE
:
2450 case EXCCODE_MSADIS
:
2454 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 0)
2455 er
= EMULATE_PRIV_FAIL
;
2463 * We we are accessing Guest kernel space, then send an
2464 * address error exception to the guest
2466 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2467 kvm_debug("%s: LD MISS @ %#lx\n", __func__
,
2470 cause
|= (EXCCODE_ADEL
<< CAUSEB_EXCCODE
);
2471 er
= EMULATE_PRIV_FAIL
;
2477 * We we are accessing Guest kernel space, then send an
2478 * address error exception to the guest
2480 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2481 kvm_debug("%s: ST MISS @ %#lx\n", __func__
,
2484 cause
|= (EXCCODE_ADES
<< CAUSEB_EXCCODE
);
2485 er
= EMULATE_PRIV_FAIL
;
2490 kvm_debug("%s: address error ST @ %#lx\n", __func__
,
2492 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2494 cause
|= (EXCCODE_TLBS
<< CAUSEB_EXCCODE
);
2496 er
= EMULATE_PRIV_FAIL
;
2499 kvm_debug("%s: address error LD @ %#lx\n", __func__
,
2501 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2503 cause
|= (EXCCODE_TLBL
<< CAUSEB_EXCCODE
);
2505 er
= EMULATE_PRIV_FAIL
;
2508 er
= EMULATE_PRIV_FAIL
;
2513 if (er
== EMULATE_PRIV_FAIL
)
2514 kvm_mips_emulate_exc(cause
, opc
, run
, vcpu
);
2520 * User Address (UA) fault, this could happen if
2521 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2522 * case we pass on the fault to the guest kernel and let it handle it.
2523 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2524 * case we inject the TLB from the Guest TLB into the shadow host TLB
2526 enum emulation_result
kvm_mips_handle_tlbmiss(u32 cause
,
2528 struct kvm_run
*run
,
2529 struct kvm_vcpu
*vcpu
)
2531 enum emulation_result er
= EMULATE_DONE
;
2532 u32 exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2533 unsigned long va
= vcpu
->arch
.host_cp0_badvaddr
;
2536 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2537 vcpu
->arch
.host_cp0_badvaddr
);
2540 * KVM would not have got the exception if this entry was valid in the
2541 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2542 * send the guest an exception. The guest exc handler should then inject
2543 * an entry into the guest TLB.
2545 index
= kvm_mips_guest_tlb_lookup(vcpu
,
2547 (kvm_read_c0_guest_entryhi(vcpu
->arch
.cop0
) &
2550 if (exccode
== EXCCODE_TLBL
) {
2551 er
= kvm_mips_emulate_tlbmiss_ld(cause
, opc
, run
, vcpu
);
2552 } else if (exccode
== EXCCODE_TLBS
) {
2553 er
= kvm_mips_emulate_tlbmiss_st(cause
, opc
, run
, vcpu
);
2555 kvm_err("%s: invalid exc code: %d\n", __func__
,
2560 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
2563 * Check if the entry is valid, if not then setup a TLB invalid
2564 * exception to the guest
2566 if (!TLB_IS_VALID(*tlb
, va
)) {
2567 if (exccode
== EXCCODE_TLBL
) {
2568 er
= kvm_mips_emulate_tlbinv_ld(cause
, opc
, run
,
2570 } else if (exccode
== EXCCODE_TLBS
) {
2571 er
= kvm_mips_emulate_tlbinv_st(cause
, opc
, run
,
2574 kvm_err("%s: invalid exc code: %d\n", __func__
,
2579 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2580 tlb
->tlb_hi
, tlb
->tlb_lo
[0], tlb
->tlb_lo
[1]);
2582 * OK we have a Guest TLB entry, now inject it into the
2585 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
);