2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu-info.h>
25 #include <asm/mmu_context.h>
26 #include <asm/tlbflush.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #include "interrupt.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu
*vcpu
,
45 unsigned int dspcontrol
;
46 union mips_instruction insn
;
47 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
49 long nextpc
= KVM_INVALID_INST
;
54 /* Read the instruction */
55 insn
.word
= kvm_get_inst((uint32_t *) epc
, vcpu
);
57 if (insn
.word
== KVM_INVALID_INST
)
58 return KVM_INVALID_INST
;
60 switch (insn
.i_format
.opcode
) {
61 /* jr and jalr are in r_format format. */
63 switch (insn
.r_format
.func
) {
65 arch
->gprs
[insn
.r_format
.rd
] = epc
+ 8;
68 nextpc
= arch
->gprs
[insn
.r_format
.rs
];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn
.i_format
.rt
) {
82 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
83 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
91 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
92 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
100 arch
->gprs
[31] = epc
+ 8;
101 if ((long)arch
->gprs
[insn
.i_format
.rs
] < 0)
102 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
110 arch
->gprs
[31] = epc
+ 8;
111 if ((long)arch
->gprs
[insn
.i_format
.rs
] >= 0)
112 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
121 dspcontrol
= rddsp(0x01);
123 if (dspcontrol
>= 32)
124 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
132 /* These are unconditional and in j_format. */
134 arch
->gprs
[31] = instpc
+ 8;
139 epc
|= (insn
.j_format
.target
<< 2);
143 /* These are conditional and in i_format. */
146 if (arch
->gprs
[insn
.i_format
.rs
] ==
147 arch
->gprs
[insn
.i_format
.rt
])
148 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
156 if (arch
->gprs
[insn
.i_format
.rs
] !=
157 arch
->gprs
[insn
.i_format
.rt
])
158 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
164 case blez_op
: /* not really i_format */
166 /* rt field assumed to be zero */
167 if ((long)arch
->gprs
[insn
.i_format
.rs
] <= 0)
168 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
176 /* rt field assumed to be zero */
177 if ((long)arch
->gprs
[insn
.i_format
.rs
] > 0)
178 epc
= epc
+ 4 + (insn
.i_format
.simmediate
<< 2);
184 /* And now the FPA/cp1 branch instructions. */
186 kvm_err("%s: unsupported cop1_op\n", __func__
);
193 kvm_err("%s: unaligned epc\n", __func__
);
197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__
);
201 enum emulation_result
update_pc(struct kvm_vcpu
*vcpu
, uint32_t cause
)
203 unsigned long branch_pc
;
204 enum emulation_result er
= EMULATE_DONE
;
206 if (cause
& CAUSEF_BD
) {
207 branch_pc
= kvm_compute_return_epc(vcpu
, vcpu
->arch
.pc
);
208 if (branch_pc
== KVM_INVALID_INST
) {
211 vcpu
->arch
.pc
= branch_pc
;
212 kvm_debug("BD update_pc(): New PC: %#lx\n",
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu
->arch
.pc
);
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
229 * 0 otherwise (in which case CP0_Count timer is running).
231 static inline int kvm_mips_count_disabled(struct kvm_vcpu
*vcpu
)
233 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
235 return (vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) ||
236 (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
);
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu
*vcpu
, ktime_t now
)
251 now_ns
= ktime_to_ns(now
);
252 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
254 if (delta
>= vcpu
->arch
.count_period
) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods
= div64_s64(now_ns
, vcpu
->arch
.count_period
);
257 vcpu
->arch
.count_dyn_bias
= -periods
* vcpu
->arch
.count_period
;
258 /* Recalculate delta with new bias */
259 delta
= now_ns
+ vcpu
->arch
.count_dyn_bias
;
263 * We've ensured that:
264 * delta < count_period
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
272 return div_u64(delta
* vcpu
->arch
.count_hz
, NSEC_PER_SEC
);
276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
283 * Returns: Effective monotonic ktime for CP0_Count.
285 static inline ktime_t
kvm_mips_count_time(struct kvm_vcpu
*vcpu
)
287 if (unlikely(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
288 return vcpu
->arch
.count_resume
;
294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
301 * Returns: The current value of the guest CP0_Count register.
303 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu
*vcpu
, ktime_t now
)
305 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
306 ktime_t expires
, threshold
;
307 uint32_t count
, compare
;
310 /* Calculate the biased and scaled guest CP0_Count */
311 count
= vcpu
->arch
.count_bias
+ kvm_mips_ktime_to_count(vcpu
, now
);
312 compare
= kvm_read_c0_guest_compare(cop0
);
315 * Find whether CP0_Count has reached the closest timer interrupt. If
316 * not, we shouldn't inject it.
318 if ((int32_t)(count
- compare
) < 0)
322 * The CP0_Count we're going to return has already reached the closest
323 * timer interrupt. Quickly check if it really is a new interrupt by
324 * looking at whether the interval until the hrtimer expiry time is
325 * less than 1/4 of the timer period.
327 expires
= hrtimer_get_expires(&vcpu
->arch
.comparecount_timer
);
328 threshold
= ktime_add_ns(now
, vcpu
->arch
.count_period
/ 4);
329 if (ktime_before(expires
, threshold
)) {
331 * Cancel it while we handle it so there's no chance of
332 * interference with the timeout handler.
334 running
= hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
336 /* Nothing should be waiting on the timeout */
337 kvm_mips_callbacks
->queue_timer_int(vcpu
);
340 * Restart the timer if it was running based on the expiry time
341 * we read, so that we don't push it back 2 periods.
344 expires
= ktime_add_ns(expires
,
345 vcpu
->arch
.count_period
);
346 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expires
,
355 * kvm_mips_read_count() - Read the current count value.
356 * @vcpu: Virtual CPU.
358 * Read the current guest CP0_Count value, taking into account whether the timer
361 * Returns: The current guest CP0_Count value.
363 uint32_t kvm_mips_read_count(struct kvm_vcpu
*vcpu
)
365 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
367 /* If count disabled just read static copy of count */
368 if (kvm_mips_count_disabled(vcpu
))
369 return kvm_read_c0_guest_count(cop0
);
371 return kvm_mips_read_count_running(vcpu
, ktime_get());
375 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
376 * @vcpu: Virtual CPU.
377 * @count: Output pointer for CP0_Count value at point of freeze.
379 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
380 * at the point it was frozen. It is guaranteed that any pending interrupts at
381 * the point it was frozen are handled, and none after that point.
383 * This is useful where the time/CP0_Count is needed in the calculation of the
386 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
388 * Returns: The ktime at the point of freeze.
390 static ktime_t
kvm_mips_freeze_hrtimer(struct kvm_vcpu
*vcpu
,
395 /* stop hrtimer before finding time */
396 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
399 /* find count at this point and handle pending hrtimer */
400 *count
= kvm_mips_read_count_running(vcpu
, now
);
406 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
407 * @vcpu: Virtual CPU.
408 * @now: ktime at point of resume.
409 * @count: CP0_Count at point of resume.
411 * Resumes the timer and updates the timer expiry based on @now and @count.
412 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
413 * parameters need to be changed.
415 * It is guaranteed that a timer interrupt immediately after resume will be
416 * handled, but not if CP_Compare is exactly at @count. That case is already
417 * handled by kvm_mips_freeze_timer().
419 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
421 static void kvm_mips_resume_hrtimer(struct kvm_vcpu
*vcpu
,
422 ktime_t now
, uint32_t count
)
424 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
429 /* Calculate timeout (wrap 0 to 2^32) */
430 compare
= kvm_read_c0_guest_compare(cop0
);
431 delta
= (u64
)(uint32_t)(compare
- count
- 1) + 1;
432 delta
= div_u64(delta
* NSEC_PER_SEC
, vcpu
->arch
.count_hz
);
433 expire
= ktime_add_ns(now
, delta
);
435 /* Update hrtimer to use new timeout */
436 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
437 hrtimer_start(&vcpu
->arch
.comparecount_timer
, expire
, HRTIMER_MODE_ABS
);
441 * kvm_mips_write_count() - Modify the count and update timer.
442 * @vcpu: Virtual CPU.
443 * @count: Guest CP0_Count value to set.
445 * Sets the CP0_Count value and updates the timer accordingly.
447 void kvm_mips_write_count(struct kvm_vcpu
*vcpu
, uint32_t count
)
449 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
453 now
= kvm_mips_count_time(vcpu
);
454 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
456 if (kvm_mips_count_disabled(vcpu
))
457 /* The timer's disabled, adjust the static count */
458 kvm_write_c0_guest_count(cop0
, count
);
461 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
465 * kvm_mips_init_count() - Initialise timer.
466 * @vcpu: Virtual CPU.
468 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
469 * it going if it's enabled.
471 void kvm_mips_init_count(struct kvm_vcpu
*vcpu
)
474 vcpu
->arch
.count_hz
= 100*1000*1000;
475 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32,
476 vcpu
->arch
.count_hz
);
477 vcpu
->arch
.count_dyn_bias
= 0;
480 kvm_mips_write_count(vcpu
, 0);
484 * kvm_mips_set_count_hz() - Update the frequency of the timer.
485 * @vcpu: Virtual CPU.
486 * @count_hz: Frequency of CP0_Count timer in Hz.
488 * Change the frequency of the CP0_Count timer. This is done atomically so that
489 * CP0_Count is continuous and no timer interrupt is lost.
491 * Returns: -EINVAL if @count_hz is out of range.
494 int kvm_mips_set_count_hz(struct kvm_vcpu
*vcpu
, s64 count_hz
)
496 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
501 /* ensure the frequency is in a sensible range... */
502 if (count_hz
<= 0 || count_hz
> NSEC_PER_SEC
)
504 /* ... and has actually changed */
505 if (vcpu
->arch
.count_hz
== count_hz
)
508 /* Safely freeze timer so we can keep it continuous */
509 dc
= kvm_mips_count_disabled(vcpu
);
511 now
= kvm_mips_count_time(vcpu
);
512 count
= kvm_read_c0_guest_count(cop0
);
514 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
517 /* Update the frequency */
518 vcpu
->arch
.count_hz
= count_hz
;
519 vcpu
->arch
.count_period
= div_u64((u64
)NSEC_PER_SEC
<< 32, count_hz
);
520 vcpu
->arch
.count_dyn_bias
= 0;
522 /* Calculate adjusted bias so dynamic count is unchanged */
523 vcpu
->arch
.count_bias
= count
- kvm_mips_ktime_to_count(vcpu
, now
);
525 /* Update and resume hrtimer */
527 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
532 * kvm_mips_write_compare() - Modify compare and update timer.
533 * @vcpu: Virtual CPU.
534 * @compare: New CP0_Compare value.
535 * @ack: Whether to acknowledge timer interrupt.
537 * Update CP0_Compare to a new value and update the timeout.
538 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
539 * any pending timer interrupt is preserved.
541 void kvm_mips_write_compare(struct kvm_vcpu
*vcpu
, uint32_t compare
, bool ack
)
543 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
545 u32 old_compare
= kvm_read_c0_guest_compare(cop0
);
549 /* if unchanged, must just be an ack */
550 if (old_compare
== compare
) {
553 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
554 kvm_write_c0_guest_compare(cop0
, compare
);
558 /* freeze_hrtimer() takes care of timer interrupts <= count */
559 dc
= kvm_mips_count_disabled(vcpu
);
561 now
= kvm_mips_freeze_hrtimer(vcpu
, &count
);
564 kvm_mips_callbacks
->dequeue_timer_int(vcpu
);
566 kvm_write_c0_guest_compare(cop0
, compare
);
568 /* resume_hrtimer() takes care of timer interrupts > count */
570 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
574 * kvm_mips_count_disable() - Disable count.
575 * @vcpu: Virtual CPU.
577 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
578 * time will be handled but not after.
580 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
581 * count_ctl.DC has been set (count disabled).
583 * Returns: The time that the timer was stopped.
585 static ktime_t
kvm_mips_count_disable(struct kvm_vcpu
*vcpu
)
587 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
592 hrtimer_cancel(&vcpu
->arch
.comparecount_timer
);
594 /* Set the static count from the dynamic count, handling pending TI */
596 count
= kvm_mips_read_count_running(vcpu
, now
);
597 kvm_write_c0_guest_count(cop0
, count
);
603 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
604 * @vcpu: Virtual CPU.
606 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
607 * before the final stop time will be handled if the timer isn't disabled by
608 * count_ctl.DC, but not after.
610 * Assumes CP0_Cause.DC is clear (count enabled).
612 void kvm_mips_count_disable_cause(struct kvm_vcpu
*vcpu
)
614 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
616 kvm_set_c0_guest_cause(cop0
, CAUSEF_DC
);
617 if (!(vcpu
->arch
.count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
))
618 kvm_mips_count_disable(vcpu
);
622 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
623 * @vcpu: Virtual CPU.
625 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
626 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
627 * potentially before even returning, so the caller should be careful with
628 * ordering of CP0_Cause modifications so as not to lose it.
630 * Assumes CP0_Cause.DC is set (count disabled).
632 void kvm_mips_count_enable_cause(struct kvm_vcpu
*vcpu
)
634 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
637 kvm_clear_c0_guest_cause(cop0
, CAUSEF_DC
);
640 * Set the dynamic count to match the static count.
641 * This starts the hrtimer if count_ctl.DC allows it.
642 * Otherwise it conveniently updates the biases.
644 count
= kvm_read_c0_guest_count(cop0
);
645 kvm_mips_write_count(vcpu
, count
);
649 * kvm_mips_set_count_ctl() - Update the count control KVM register.
650 * @vcpu: Virtual CPU.
651 * @count_ctl: Count control register new value.
653 * Set the count control KVM register. The timer is updated accordingly.
655 * Returns: -EINVAL if reserved bits are set.
658 int kvm_mips_set_count_ctl(struct kvm_vcpu
*vcpu
, s64 count_ctl
)
660 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
661 s64 changed
= count_ctl
^ vcpu
->arch
.count_ctl
;
664 uint32_t count
, compare
;
666 /* Only allow defined bits to be changed */
667 if (changed
& ~(s64
)(KVM_REG_MIPS_COUNT_CTL_DC
))
670 /* Apply new value */
671 vcpu
->arch
.count_ctl
= count_ctl
;
673 /* Master CP0_Count disable */
674 if (changed
& KVM_REG_MIPS_COUNT_CTL_DC
) {
675 /* Is CP0_Cause.DC already disabling CP0_Count? */
676 if (kvm_read_c0_guest_cause(cop0
) & CAUSEF_DC
) {
677 if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
)
678 /* Just record the current time */
679 vcpu
->arch
.count_resume
= ktime_get();
680 } else if (count_ctl
& KVM_REG_MIPS_COUNT_CTL_DC
) {
681 /* disable timer and record current time */
682 vcpu
->arch
.count_resume
= kvm_mips_count_disable(vcpu
);
685 * Calculate timeout relative to static count at resume
686 * time (wrap 0 to 2^32).
688 count
= kvm_read_c0_guest_count(cop0
);
689 compare
= kvm_read_c0_guest_compare(cop0
);
690 delta
= (u64
)(uint32_t)(compare
- count
- 1) + 1;
691 delta
= div_u64(delta
* NSEC_PER_SEC
,
692 vcpu
->arch
.count_hz
);
693 expire
= ktime_add_ns(vcpu
->arch
.count_resume
, delta
);
695 /* Handle pending interrupt */
697 if (ktime_compare(now
, expire
) >= 0)
698 /* Nothing should be waiting on the timeout */
699 kvm_mips_callbacks
->queue_timer_int(vcpu
);
701 /* Resume hrtimer without changing bias */
702 count
= kvm_mips_read_count_running(vcpu
, now
);
703 kvm_mips_resume_hrtimer(vcpu
, now
, count
);
711 * kvm_mips_set_count_resume() - Update the count resume KVM register.
712 * @vcpu: Virtual CPU.
713 * @count_resume: Count resume register new value.
715 * Set the count resume KVM register.
717 * Returns: -EINVAL if out of valid range (0..now).
720 int kvm_mips_set_count_resume(struct kvm_vcpu
*vcpu
, s64 count_resume
)
723 * It doesn't make sense for the resume time to be in the future, as it
724 * would be possible for the next interrupt to be more than a full
725 * period in the future.
727 if (count_resume
< 0 || count_resume
> ktime_to_ns(ktime_get()))
730 vcpu
->arch
.count_resume
= ns_to_ktime(count_resume
);
735 * kvm_mips_count_timeout() - Push timer forward on timeout.
736 * @vcpu: Virtual CPU.
738 * Handle an hrtimer event by push the hrtimer forward a period.
740 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
742 enum hrtimer_restart
kvm_mips_count_timeout(struct kvm_vcpu
*vcpu
)
744 /* Add the Count period to the current expiry time */
745 hrtimer_add_expires_ns(&vcpu
->arch
.comparecount_timer
,
746 vcpu
->arch
.count_period
);
747 return HRTIMER_RESTART
;
750 enum emulation_result
kvm_mips_emul_eret(struct kvm_vcpu
*vcpu
)
752 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
753 enum emulation_result er
= EMULATE_DONE
;
755 if (kvm_read_c0_guest_status(cop0
) & ST0_EXL
) {
756 kvm_debug("[%#lx] ERET to %#lx\n", vcpu
->arch
.pc
,
757 kvm_read_c0_guest_epc(cop0
));
758 kvm_clear_c0_guest_status(cop0
, ST0_EXL
);
759 vcpu
->arch
.pc
= kvm_read_c0_guest_epc(cop0
);
761 } else if (kvm_read_c0_guest_status(cop0
) & ST0_ERL
) {
762 kvm_clear_c0_guest_status(cop0
, ST0_ERL
);
763 vcpu
->arch
.pc
= kvm_read_c0_guest_errorepc(cop0
);
765 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
773 enum emulation_result
kvm_mips_emul_wait(struct kvm_vcpu
*vcpu
)
775 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu
->arch
.pc
,
776 vcpu
->arch
.pending_exceptions
);
778 ++vcpu
->stat
.wait_exits
;
779 trace_kvm_exit(vcpu
, WAIT_EXITS
);
780 if (!vcpu
->arch
.pending_exceptions
) {
782 kvm_vcpu_block(vcpu
);
785 * We we are runnable, then definitely go off to user space to
786 * check if any I/O interrupts are pending.
788 if (kvm_check_request(KVM_REQ_UNHALT
, vcpu
)) {
789 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
790 vcpu
->run
->exit_reason
= KVM_EXIT_IRQ_WINDOW_OPEN
;
798 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
799 * we can catch this, if things ever change
801 enum emulation_result
kvm_mips_emul_tlbr(struct kvm_vcpu
*vcpu
)
803 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
804 uint32_t pc
= vcpu
->arch
.pc
;
806 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc
, kvm_read_c0_guest_index(cop0
));
810 /* Write Guest TLB Entry @ Index */
811 enum emulation_result
kvm_mips_emul_tlbwi(struct kvm_vcpu
*vcpu
)
813 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
814 int index
= kvm_read_c0_guest_index(cop0
);
815 struct kvm_mips_tlb
*tlb
= NULL
;
816 uint32_t pc
= vcpu
->arch
.pc
;
818 if (index
< 0 || index
>= KVM_MIPS_GUEST_TLB_SIZE
) {
819 kvm_debug("%s: illegal index: %d\n", __func__
, index
);
820 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
821 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
822 kvm_read_c0_guest_entrylo0(cop0
),
823 kvm_read_c0_guest_entrylo1(cop0
),
824 kvm_read_c0_guest_pagemask(cop0
));
825 index
= (index
& ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE
;
828 tlb
= &vcpu
->arch
.guest_tlb
[index
];
830 * Probe the shadow host TLB for the entry being overwritten, if one
831 * matches, invalidate it
833 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
835 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
836 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
837 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
838 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
840 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
841 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
842 kvm_read_c0_guest_entrylo0(cop0
),
843 kvm_read_c0_guest_entrylo1(cop0
),
844 kvm_read_c0_guest_pagemask(cop0
));
849 /* Write Guest TLB Entry @ Random Index */
850 enum emulation_result
kvm_mips_emul_tlbwr(struct kvm_vcpu
*vcpu
)
852 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
853 struct kvm_mips_tlb
*tlb
= NULL
;
854 uint32_t pc
= vcpu
->arch
.pc
;
857 get_random_bytes(&index
, sizeof(index
));
858 index
&= (KVM_MIPS_GUEST_TLB_SIZE
- 1);
860 tlb
= &vcpu
->arch
.guest_tlb
[index
];
863 * Probe the shadow host TLB for the entry being overwritten, if one
864 * matches, invalidate it
866 kvm_mips_host_tlb_inv(vcpu
, tlb
->tlb_hi
);
868 tlb
->tlb_mask
= kvm_read_c0_guest_pagemask(cop0
);
869 tlb
->tlb_hi
= kvm_read_c0_guest_entryhi(cop0
);
870 tlb
->tlb_lo0
= kvm_read_c0_guest_entrylo0(cop0
);
871 tlb
->tlb_lo1
= kvm_read_c0_guest_entrylo1(cop0
);
873 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
874 pc
, index
, kvm_read_c0_guest_entryhi(cop0
),
875 kvm_read_c0_guest_entrylo0(cop0
),
876 kvm_read_c0_guest_entrylo1(cop0
));
881 enum emulation_result
kvm_mips_emul_tlbp(struct kvm_vcpu
*vcpu
)
883 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
884 long entryhi
= kvm_read_c0_guest_entryhi(cop0
);
885 uint32_t pc
= vcpu
->arch
.pc
;
888 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
890 kvm_write_c0_guest_index(cop0
, index
);
892 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc
, entryhi
,
899 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
900 * @vcpu: Virtual CPU.
902 * Finds the mask of bits which are writable in the guest's Config1 CP0
903 * register, by userland (currently read-only to the guest).
905 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu
*vcpu
)
907 unsigned int mask
= 0;
909 /* Permit FPU to be present if FPU is supported */
910 if (kvm_mips_guest_can_have_fpu(&vcpu
->arch
))
911 mask
|= MIPS_CONF1_FP
;
917 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
918 * @vcpu: Virtual CPU.
920 * Finds the mask of bits which are writable in the guest's Config3 CP0
921 * register, by userland (currently read-only to the guest).
923 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu
*vcpu
)
925 /* Config4 is optional */
926 unsigned int mask
= MIPS_CONF_M
;
928 /* Permit MSA to be present if MSA is supported */
929 if (kvm_mips_guest_can_have_msa(&vcpu
->arch
))
930 mask
|= MIPS_CONF3_MSA
;
936 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
937 * @vcpu: Virtual CPU.
939 * Finds the mask of bits which are writable in the guest's Config4 CP0
940 * register, by userland (currently read-only to the guest).
942 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu
*vcpu
)
944 /* Config5 is optional */
949 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
950 * @vcpu: Virtual CPU.
952 * Finds the mask of bits which are writable in the guest's Config5 CP0
953 * register, by the guest itself.
955 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu
*vcpu
)
957 unsigned int mask
= 0;
959 /* Permit MSAEn changes if MSA supported and enabled */
960 if (kvm_mips_guest_has_msa(&vcpu
->arch
))
961 mask
|= MIPS_CONF5_MSAEN
;
964 * Permit guest FPU mode changes if FPU is enabled and the relevant
965 * feature exists according to FIR register.
967 if (kvm_mips_guest_has_fpu(&vcpu
->arch
)) {
969 mask
|= MIPS_CONF5_FRE
;
970 /* We don't support UFR or UFE */
976 enum emulation_result
kvm_mips_emulate_CP0(uint32_t inst
, uint32_t *opc
,
977 uint32_t cause
, struct kvm_run
*run
,
978 struct kvm_vcpu
*vcpu
)
980 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
981 enum emulation_result er
= EMULATE_DONE
;
982 int32_t rt
, rd
, copz
, sel
, co_bit
, op
;
983 uint32_t pc
= vcpu
->arch
.pc
;
984 unsigned long curr_pc
;
987 * Update PC and hold onto current PC in case there is
988 * an error and we want to rollback the PC
990 curr_pc
= vcpu
->arch
.pc
;
991 er
= update_pc(vcpu
, cause
);
992 if (er
== EMULATE_FAIL
)
995 copz
= (inst
>> 21) & 0x1f;
996 rt
= (inst
>> 16) & 0x1f;
997 rd
= (inst
>> 11) & 0x1f;
999 co_bit
= (inst
>> 25) & 1;
1005 case tlbr_op
: /* Read indexed TLB entry */
1006 er
= kvm_mips_emul_tlbr(vcpu
);
1008 case tlbwi_op
: /* Write indexed */
1009 er
= kvm_mips_emul_tlbwi(vcpu
);
1011 case tlbwr_op
: /* Write random */
1012 er
= kvm_mips_emul_tlbwr(vcpu
);
1014 case tlbp_op
: /* TLB Probe */
1015 er
= kvm_mips_emul_tlbp(vcpu
);
1018 kvm_err("!!!COP0_RFE!!!\n");
1021 er
= kvm_mips_emul_eret(vcpu
);
1022 goto dont_update_pc
;
1025 er
= kvm_mips_emul_wait(vcpu
);
1031 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1032 cop0
->stat
[rd
][sel
]++;
1035 if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1036 vcpu
->arch
.gprs
[rt
] = kvm_mips_read_count(vcpu
);
1037 } else if ((rd
== MIPS_CP0_ERRCTL
) && (sel
== 0)) {
1038 vcpu
->arch
.gprs
[rt
] = 0x0;
1039 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1040 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1043 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
1045 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1046 kvm_mips_trans_mfc0(inst
, opc
, vcpu
);
1051 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1052 pc
, rd
, sel
, rt
, vcpu
->arch
.gprs
[rt
]);
1057 vcpu
->arch
.gprs
[rt
] = cop0
->reg
[rd
][sel
];
1061 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1062 cop0
->stat
[rd
][sel
]++;
1064 if ((rd
== MIPS_CP0_TLB_INDEX
)
1065 && (vcpu
->arch
.gprs
[rt
] >=
1066 KVM_MIPS_GUEST_TLB_SIZE
)) {
1067 kvm_err("Invalid TLB Index: %ld",
1068 vcpu
->arch
.gprs
[rt
]);
1072 #define C0_EBASE_CORE_MASK 0xff
1073 if ((rd
== MIPS_CP0_PRID
) && (sel
== 1)) {
1074 /* Preserve CORE number */
1075 kvm_change_c0_guest_ebase(cop0
,
1076 ~(C0_EBASE_CORE_MASK
),
1077 vcpu
->arch
.gprs
[rt
]);
1078 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1079 kvm_read_c0_guest_ebase(cop0
));
1080 } else if (rd
== MIPS_CP0_TLB_HI
&& sel
== 0) {
1082 vcpu
->arch
.gprs
[rt
] & KVM_ENTRYHI_ASID
;
1083 if ((KSEGX(vcpu
->arch
.gprs
[rt
]) != CKSEG0
) &&
1084 ((kvm_read_c0_guest_entryhi(cop0
) &
1085 KVM_ENTRYHI_ASID
) != nasid
)) {
1086 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1087 kvm_read_c0_guest_entryhi(cop0
)
1090 & KVM_ENTRYHI_ASID
);
1092 /* Blow away the shadow host TLBs */
1093 kvm_mips_flush_host_tlb(1);
1095 kvm_write_c0_guest_entryhi(cop0
,
1096 vcpu
->arch
.gprs
[rt
]);
1098 /* Are we writing to COUNT */
1099 else if ((rd
== MIPS_CP0_COUNT
) && (sel
== 0)) {
1100 kvm_mips_write_count(vcpu
, vcpu
->arch
.gprs
[rt
]);
1102 } else if ((rd
== MIPS_CP0_COMPARE
) && (sel
== 0)) {
1103 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1104 pc
, kvm_read_c0_guest_compare(cop0
),
1105 vcpu
->arch
.gprs
[rt
]);
1107 /* If we are writing to COMPARE */
1108 /* Clear pending timer interrupt, if any */
1109 kvm_mips_write_compare(vcpu
,
1110 vcpu
->arch
.gprs
[rt
],
1112 } else if ((rd
== MIPS_CP0_STATUS
) && (sel
== 0)) {
1113 unsigned int old_val
, val
, change
;
1115 old_val
= kvm_read_c0_guest_status(cop0
);
1116 val
= vcpu
->arch
.gprs
[rt
];
1117 change
= val
^ old_val
;
1119 /* Make sure that the NMI bit is never set */
1123 * Don't allow CU1 or FR to be set unless FPU
1124 * capability enabled and exists in guest
1127 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1128 val
&= ~(ST0_CU1
| ST0_FR
);
1131 * Also don't allow FR to be set if host doesn't
1134 if (!(current_cpu_data
.fpu_id
& MIPS_FPIR_F64
))
1138 /* Handle changes in FPU mode */
1142 * FPU and Vector register state is made
1143 * UNPREDICTABLE by a change of FR, so don't
1144 * even bother saving it.
1146 if (change
& ST0_FR
)
1150 * If MSA state is already live, it is undefined
1151 * how it interacts with FR=0 FPU state, and we
1152 * don't want to hit reserved instruction
1153 * exceptions trying to save the MSA state later
1154 * when CU=1 && FR=1, so play it safe and save
1157 if (change
& ST0_CU1
&& !(val
& ST0_FR
) &&
1158 vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_MSA
)
1162 * Propagate CU1 (FPU enable) changes
1163 * immediately if the FPU context is already
1164 * loaded. When disabling we leave the context
1165 * loaded so it can be quickly enabled again in
1168 if (change
& ST0_CU1
&&
1169 vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_FPU
)
1170 change_c0_status(ST0_CU1
, val
);
1174 kvm_write_c0_guest_status(cop0
, val
);
1176 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1178 * If FPU present, we need CU1/FR bits to take
1179 * effect fairly soon.
1181 if (!kvm_mips_guest_has_fpu(&vcpu
->arch
))
1182 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1184 } else if ((rd
== MIPS_CP0_CONFIG
) && (sel
== 5)) {
1185 unsigned int old_val
, val
, change
, wrmask
;
1187 old_val
= kvm_read_c0_guest_config5(cop0
);
1188 val
= vcpu
->arch
.gprs
[rt
];
1190 /* Only a few bits are writable in Config5 */
1191 wrmask
= kvm_mips_config5_wrmask(vcpu
);
1192 change
= (val
^ old_val
) & wrmask
;
1193 val
= old_val
^ change
;
1196 /* Handle changes in FPU/MSA modes */
1200 * Propagate FRE changes immediately if the FPU
1201 * context is already loaded.
1203 if (change
& MIPS_CONF5_FRE
&&
1204 vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_FPU
)
1205 change_c0_config5(MIPS_CONF5_FRE
, val
);
1208 * Propagate MSAEn changes immediately if the
1209 * MSA context is already loaded. When disabling
1210 * we leave the context loaded so it can be
1211 * quickly enabled again in the near future.
1213 if (change
& MIPS_CONF5_MSAEN
&&
1214 vcpu
->arch
.fpu_inuse
& KVM_MIPS_FPU_MSA
)
1215 change_c0_config5(MIPS_CONF5_MSAEN
,
1220 kvm_write_c0_guest_config5(cop0
, val
);
1221 } else if ((rd
== MIPS_CP0_CAUSE
) && (sel
== 0)) {
1222 uint32_t old_cause
, new_cause
;
1224 old_cause
= kvm_read_c0_guest_cause(cop0
);
1225 new_cause
= vcpu
->arch
.gprs
[rt
];
1226 /* Update R/W bits */
1227 kvm_change_c0_guest_cause(cop0
, 0x08800300,
1229 /* DC bit enabling/disabling timer? */
1230 if ((old_cause
^ new_cause
) & CAUSEF_DC
) {
1231 if (new_cause
& CAUSEF_DC
)
1232 kvm_mips_count_disable_cause(vcpu
);
1234 kvm_mips_count_enable_cause(vcpu
);
1237 cop0
->reg
[rd
][sel
] = vcpu
->arch
.gprs
[rt
];
1238 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1239 kvm_mips_trans_mtc0(inst
, opc
, vcpu
);
1243 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc
,
1244 rd
, sel
, cop0
->reg
[rd
][sel
]);
1248 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1249 vcpu
->arch
.pc
, rt
, rd
, sel
);
1254 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1255 cop0
->stat
[MIPS_CP0_STATUS
][0]++;
1258 vcpu
->arch
.gprs
[rt
] =
1259 kvm_read_c0_guest_status(cop0
);
1262 kvm_debug("[%#lx] mfmc0_op: EI\n",
1264 kvm_set_c0_guest_status(cop0
, ST0_IE
);
1266 kvm_debug("[%#lx] mfmc0_op: DI\n",
1268 kvm_clear_c0_guest_status(cop0
, ST0_IE
);
1276 cop0
->reg
[MIPS_CP0_STATUS
][2] & 0xf;
1278 (cop0
->reg
[MIPS_CP0_STATUS
][2] >> 6) & 0xf;
1280 * We don't support any shadow register sets, so
1281 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1287 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss
, rd
,
1288 vcpu
->arch
.gprs
[rt
]);
1289 vcpu
->arch
.gprs
[rd
] = vcpu
->arch
.gprs
[rt
];
1293 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1294 vcpu
->arch
.pc
, copz
);
1301 /* Rollback PC only if emulation was unsuccessful */
1302 if (er
== EMULATE_FAIL
)
1303 vcpu
->arch
.pc
= curr_pc
;
1307 * This is for special instructions whose emulation
1308 * updates the PC, so do not overwrite the PC under
1315 enum emulation_result
kvm_mips_emulate_store(uint32_t inst
, uint32_t cause
,
1316 struct kvm_run
*run
,
1317 struct kvm_vcpu
*vcpu
)
1319 enum emulation_result er
= EMULATE_DO_MMIO
;
1320 int32_t op
, base
, rt
, offset
;
1322 void *data
= run
->mmio
.data
;
1323 unsigned long curr_pc
;
1326 * Update PC and hold onto current PC in case there is
1327 * an error and we want to rollback the PC
1329 curr_pc
= vcpu
->arch
.pc
;
1330 er
= update_pc(vcpu
, cause
);
1331 if (er
== EMULATE_FAIL
)
1334 rt
= (inst
>> 16) & 0x1f;
1335 base
= (inst
>> 21) & 0x1f;
1336 offset
= inst
& 0xffff;
1337 op
= (inst
>> 26) & 0x3f;
1342 if (bytes
> sizeof(run
->mmio
.data
)) {
1343 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1346 run
->mmio
.phys_addr
=
1347 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1349 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1353 run
->mmio
.len
= bytes
;
1354 run
->mmio
.is_write
= 1;
1355 vcpu
->mmio_needed
= 1;
1356 vcpu
->mmio_is_write
= 1;
1357 *(u8
*) data
= vcpu
->arch
.gprs
[rt
];
1358 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1359 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.gprs
[rt
],
1366 if (bytes
> sizeof(run
->mmio
.data
)) {
1367 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1370 run
->mmio
.phys_addr
=
1371 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1373 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1378 run
->mmio
.len
= bytes
;
1379 run
->mmio
.is_write
= 1;
1380 vcpu
->mmio_needed
= 1;
1381 vcpu
->mmio_is_write
= 1;
1382 *(uint32_t *) data
= vcpu
->arch
.gprs
[rt
];
1384 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1385 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1386 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
1391 if (bytes
> sizeof(run
->mmio
.data
)) {
1392 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1395 run
->mmio
.phys_addr
=
1396 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1398 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1403 run
->mmio
.len
= bytes
;
1404 run
->mmio
.is_write
= 1;
1405 vcpu
->mmio_needed
= 1;
1406 vcpu
->mmio_is_write
= 1;
1407 *(uint16_t *) data
= vcpu
->arch
.gprs
[rt
];
1409 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1410 vcpu
->arch
.pc
, vcpu
->arch
.host_cp0_badvaddr
,
1411 vcpu
->arch
.gprs
[rt
], *(uint32_t *) data
);
1415 kvm_err("Store not yet supported");
1420 /* Rollback PC if emulation was unsuccessful */
1421 if (er
== EMULATE_FAIL
)
1422 vcpu
->arch
.pc
= curr_pc
;
1427 enum emulation_result
kvm_mips_emulate_load(uint32_t inst
, uint32_t cause
,
1428 struct kvm_run
*run
,
1429 struct kvm_vcpu
*vcpu
)
1431 enum emulation_result er
= EMULATE_DO_MMIO
;
1432 int32_t op
, base
, rt
, offset
;
1435 rt
= (inst
>> 16) & 0x1f;
1436 base
= (inst
>> 21) & 0x1f;
1437 offset
= inst
& 0xffff;
1438 op
= (inst
>> 26) & 0x3f;
1440 vcpu
->arch
.pending_load_cause
= cause
;
1441 vcpu
->arch
.io_gpr
= rt
;
1446 if (bytes
> sizeof(run
->mmio
.data
)) {
1447 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1452 run
->mmio
.phys_addr
=
1453 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1455 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1460 run
->mmio
.len
= bytes
;
1461 run
->mmio
.is_write
= 0;
1462 vcpu
->mmio_needed
= 1;
1463 vcpu
->mmio_is_write
= 0;
1469 if (bytes
> sizeof(run
->mmio
.data
)) {
1470 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1475 run
->mmio
.phys_addr
=
1476 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1478 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1483 run
->mmio
.len
= bytes
;
1484 run
->mmio
.is_write
= 0;
1485 vcpu
->mmio_needed
= 1;
1486 vcpu
->mmio_is_write
= 0;
1489 vcpu
->mmio_needed
= 2;
1491 vcpu
->mmio_needed
= 1;
1498 if (bytes
> sizeof(run
->mmio
.data
)) {
1499 kvm_err("%s: bad MMIO length: %d\n", __func__
,
1504 run
->mmio
.phys_addr
=
1505 kvm_mips_callbacks
->gva_to_gpa(vcpu
->arch
.
1507 if (run
->mmio
.phys_addr
== KVM_INVALID_ADDR
) {
1512 run
->mmio
.len
= bytes
;
1513 run
->mmio
.is_write
= 0;
1514 vcpu
->mmio_is_write
= 0;
1517 vcpu
->mmio_needed
= 2;
1519 vcpu
->mmio_needed
= 1;
1524 kvm_err("Load not yet supported");
1532 int kvm_mips_sync_icache(unsigned long va
, struct kvm_vcpu
*vcpu
)
1534 unsigned long offset
= (va
& ~PAGE_MASK
);
1535 struct kvm
*kvm
= vcpu
->kvm
;
1540 gfn
= va
>> PAGE_SHIFT
;
1542 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
1543 kvm_err("%s: Invalid gfn: %#llx\n", __func__
, gfn
);
1544 kvm_mips_dump_host_tlbs();
1545 kvm_arch_vcpu_dump_regs(vcpu
);
1548 pfn
= kvm
->arch
.guest_pmap
[gfn
];
1549 pa
= (pfn
<< PAGE_SHIFT
) | offset
;
1551 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__
, va
,
1554 local_flush_icache_range(CKSEG0ADDR(pa
), 32);
1558 enum emulation_result
kvm_mips_emulate_cache(uint32_t inst
, uint32_t *opc
,
1560 struct kvm_run
*run
,
1561 struct kvm_vcpu
*vcpu
)
1563 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1564 enum emulation_result er
= EMULATE_DONE
;
1565 int32_t offset
, cache
, op_inst
, op
, base
;
1566 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1568 unsigned long curr_pc
;
1571 * Update PC and hold onto current PC in case there is
1572 * an error and we want to rollback the PC
1574 curr_pc
= vcpu
->arch
.pc
;
1575 er
= update_pc(vcpu
, cause
);
1576 if (er
== EMULATE_FAIL
)
1579 base
= (inst
>> 21) & 0x1f;
1580 op_inst
= (inst
>> 16) & 0x1f;
1581 offset
= (int16_t)inst
;
1582 cache
= op_inst
& CacheOp_Cache
;
1583 op
= op_inst
& CacheOp_Op
;
1585 va
= arch
->gprs
[base
] + offset
;
1587 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1588 cache
, op
, base
, arch
->gprs
[base
], offset
);
1591 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1592 * invalidate the caches entirely by stepping through all the
1595 if (op
== Index_Writeback_Inv
) {
1596 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1597 vcpu
->arch
.pc
, vcpu
->arch
.gprs
[31], cache
, op
, base
,
1598 arch
->gprs
[base
], offset
);
1600 if (cache
== Cache_D
)
1602 else if (cache
== Cache_I
)
1605 kvm_err("%s: unsupported CACHE INDEX operation\n",
1607 return EMULATE_FAIL
;
1610 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1611 kvm_mips_trans_cache_index(inst
, opc
, vcpu
);
1617 if (KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG0
) {
1618 if (kvm_mips_host_tlb_lookup(vcpu
, va
) < 0)
1619 kvm_mips_handle_kseg0_tlb_fault(va
, vcpu
);
1620 } else if ((KVM_GUEST_KSEGX(va
) < KVM_GUEST_KSEG0
) ||
1621 KVM_GUEST_KSEGX(va
) == KVM_GUEST_KSEG23
) {
1624 /* If an entry already exists then skip */
1625 if (kvm_mips_host_tlb_lookup(vcpu
, va
) >= 0)
1629 * If address not in the guest TLB, then give the guest a fault,
1630 * the resulting handler will do the right thing
1632 index
= kvm_mips_guest_tlb_lookup(vcpu
, (va
& VPN2_MASK
) |
1633 (kvm_read_c0_guest_entryhi
1634 (cop0
) & KVM_ENTRYHI_ASID
));
1637 vcpu
->arch
.host_cp0_entryhi
= (va
& VPN2_MASK
);
1638 vcpu
->arch
.host_cp0_badvaddr
= va
;
1639 er
= kvm_mips_emulate_tlbmiss_ld(cause
, NULL
, run
,
1642 goto dont_update_pc
;
1644 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
1646 * Check if the entry is valid, if not then setup a TLB
1647 * invalid exception to the guest
1649 if (!TLB_IS_VALID(*tlb
, va
)) {
1650 er
= kvm_mips_emulate_tlbinv_ld(cause
, NULL
,
1653 goto dont_update_pc
;
1656 * We fault an entry from the guest tlb to the
1659 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
,
1665 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1666 cache
, op
, base
, arch
->gprs
[base
], offset
);
1669 goto dont_update_pc
;
1674 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1675 if (op_inst
== Hit_Writeback_Inv_D
|| op_inst
== Hit_Invalidate_D
) {
1676 flush_dcache_line(va
);
1678 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1680 * Replace the CACHE instruction, with a SYNCI, not the same,
1683 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1685 } else if (op_inst
== Hit_Invalidate_I
) {
1686 flush_dcache_line(va
);
1687 flush_icache_line(va
);
1689 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1690 /* Replace the CACHE instruction, with a SYNCI */
1691 kvm_mips_trans_cache_va(inst
, opc
, vcpu
);
1694 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1695 cache
, op
, base
, arch
->gprs
[base
], offset
);
1698 goto dont_update_pc
;
1705 vcpu
->arch
.pc
= curr_pc
;
1710 enum emulation_result
kvm_mips_emulate_inst(unsigned long cause
, uint32_t *opc
,
1711 struct kvm_run
*run
,
1712 struct kvm_vcpu
*vcpu
)
1714 enum emulation_result er
= EMULATE_DONE
;
1717 /* Fetch the instruction. */
1718 if (cause
& CAUSEF_BD
)
1721 inst
= kvm_get_inst(opc
, vcpu
);
1723 switch (((union mips_instruction
)inst
).r_format
.opcode
) {
1725 er
= kvm_mips_emulate_CP0(inst
, opc
, cause
, run
, vcpu
);
1730 er
= kvm_mips_emulate_store(inst
, cause
, run
, vcpu
);
1737 er
= kvm_mips_emulate_load(inst
, cause
, run
, vcpu
);
1741 ++vcpu
->stat
.cache_exits
;
1742 trace_kvm_exit(vcpu
, CACHE_EXITS
);
1743 er
= kvm_mips_emulate_cache(inst
, opc
, cause
, run
, vcpu
);
1747 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc
,
1749 kvm_arch_vcpu_dump_regs(vcpu
);
1757 enum emulation_result
kvm_mips_emulate_syscall(unsigned long cause
,
1759 struct kvm_run
*run
,
1760 struct kvm_vcpu
*vcpu
)
1762 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1763 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1764 enum emulation_result er
= EMULATE_DONE
;
1766 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1768 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1769 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1771 if (cause
& CAUSEF_BD
)
1772 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1774 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1776 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch
->pc
);
1778 kvm_change_c0_guest_cause(cop0
, (0xff),
1779 (EXCCODE_SYS
<< CAUSEB_EXCCODE
));
1781 /* Set PC to the exception entry point */
1782 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1785 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1792 enum emulation_result
kvm_mips_emulate_tlbmiss_ld(unsigned long cause
,
1794 struct kvm_run
*run
,
1795 struct kvm_vcpu
*vcpu
)
1797 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1798 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1799 unsigned long entryhi
= (vcpu
->arch
. host_cp0_badvaddr
& VPN2_MASK
) |
1800 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1802 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1804 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1805 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1807 if (cause
& CAUSEF_BD
)
1808 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1810 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1812 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1815 /* set pc to the exception entry point */
1816 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1819 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1822 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1825 kvm_change_c0_guest_cause(cop0
, (0xff),
1826 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
1828 /* setup badvaddr, context and entryhi registers for the guest */
1829 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1830 /* XXXKYMA: is the context register used by linux??? */
1831 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1832 /* Blow away the shadow host TLBs */
1833 kvm_mips_flush_host_tlb(1);
1835 return EMULATE_DONE
;
1838 enum emulation_result
kvm_mips_emulate_tlbinv_ld(unsigned long cause
,
1840 struct kvm_run
*run
,
1841 struct kvm_vcpu
*vcpu
)
1843 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1844 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1845 unsigned long entryhi
=
1846 (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1847 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1849 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1851 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1852 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1854 if (cause
& CAUSEF_BD
)
1855 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1857 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1859 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1862 /* set pc to the exception entry point */
1863 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1866 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1868 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1871 kvm_change_c0_guest_cause(cop0
, (0xff),
1872 (EXCCODE_TLBL
<< CAUSEB_EXCCODE
));
1874 /* setup badvaddr, context and entryhi registers for the guest */
1875 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1876 /* XXXKYMA: is the context register used by linux??? */
1877 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1878 /* Blow away the shadow host TLBs */
1879 kvm_mips_flush_host_tlb(1);
1881 return EMULATE_DONE
;
1884 enum emulation_result
kvm_mips_emulate_tlbmiss_st(unsigned long cause
,
1886 struct kvm_run
*run
,
1887 struct kvm_vcpu
*vcpu
)
1889 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1890 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1891 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1892 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1894 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1896 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1897 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1899 if (cause
& CAUSEF_BD
)
1900 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1902 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1904 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1907 /* Set PC to the exception entry point */
1908 arch
->pc
= KVM_GUEST_KSEG0
+ 0x0;
1910 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1912 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1915 kvm_change_c0_guest_cause(cop0
, (0xff),
1916 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
1918 /* setup badvaddr, context and entryhi registers for the guest */
1919 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1920 /* XXXKYMA: is the context register used by linux??? */
1921 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1922 /* Blow away the shadow host TLBs */
1923 kvm_mips_flush_host_tlb(1);
1925 return EMULATE_DONE
;
1928 enum emulation_result
kvm_mips_emulate_tlbinv_st(unsigned long cause
,
1930 struct kvm_run
*run
,
1931 struct kvm_vcpu
*vcpu
)
1933 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1934 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
1935 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1936 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1938 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
1940 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
1941 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
1943 if (cause
& CAUSEF_BD
)
1944 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
1946 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
1948 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1951 /* Set PC to the exception entry point */
1952 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1954 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1956 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
1959 kvm_change_c0_guest_cause(cop0
, (0xff),
1960 (EXCCODE_TLBS
<< CAUSEB_EXCCODE
));
1962 /* setup badvaddr, context and entryhi registers for the guest */
1963 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
1964 /* XXXKYMA: is the context register used by linux??? */
1965 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
1966 /* Blow away the shadow host TLBs */
1967 kvm_mips_flush_host_tlb(1);
1969 return EMULATE_DONE
;
1972 /* TLBMOD: store into address matching TLB with Dirty bit off */
1973 enum emulation_result
kvm_mips_handle_tlbmod(unsigned long cause
, uint32_t *opc
,
1974 struct kvm_run
*run
,
1975 struct kvm_vcpu
*vcpu
)
1977 enum emulation_result er
= EMULATE_DONE
;
1979 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
1980 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
1981 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
1984 /* If address not in the guest TLB, then we are in trouble */
1985 index
= kvm_mips_guest_tlb_lookup(vcpu
, entryhi
);
1987 /* XXXKYMA Invalidate and retry */
1988 kvm_mips_host_tlb_inv(vcpu
, vcpu
->arch
.host_cp0_badvaddr
);
1989 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1991 kvm_mips_dump_guest_tlbs(vcpu
);
1992 kvm_mips_dump_host_tlbs();
1993 return EMULATE_FAIL
;
1997 er
= kvm_mips_emulate_tlbmod(cause
, opc
, run
, vcpu
);
2001 enum emulation_result
kvm_mips_emulate_tlbmod(unsigned long cause
,
2003 struct kvm_run
*run
,
2004 struct kvm_vcpu
*vcpu
)
2006 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2007 unsigned long entryhi
= (vcpu
->arch
.host_cp0_badvaddr
& VPN2_MASK
) |
2008 (kvm_read_c0_guest_entryhi(cop0
) & KVM_ENTRYHI_ASID
);
2009 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2011 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2013 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2014 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2016 if (cause
& CAUSEF_BD
)
2017 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2019 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2021 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2024 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2026 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2028 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2031 kvm_change_c0_guest_cause(cop0
, (0xff),
2032 (EXCCODE_MOD
<< CAUSEB_EXCCODE
));
2034 /* setup badvaddr, context and entryhi registers for the guest */
2035 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2036 /* XXXKYMA: is the context register used by linux??? */
2037 kvm_write_c0_guest_entryhi(cop0
, entryhi
);
2038 /* Blow away the shadow host TLBs */
2039 kvm_mips_flush_host_tlb(1);
2041 return EMULATE_DONE
;
2044 enum emulation_result
kvm_mips_emulate_fpu_exc(unsigned long cause
,
2046 struct kvm_run
*run
,
2047 struct kvm_vcpu
*vcpu
)
2049 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2050 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2052 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2054 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2055 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2057 if (cause
& CAUSEF_BD
)
2058 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2060 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2064 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2066 kvm_change_c0_guest_cause(cop0
, (0xff),
2067 (EXCCODE_CPU
<< CAUSEB_EXCCODE
));
2068 kvm_change_c0_guest_cause(cop0
, (CAUSEF_CE
), (0x1 << CAUSEB_CE
));
2070 return EMULATE_DONE
;
2073 enum emulation_result
kvm_mips_emulate_ri_exc(unsigned long cause
,
2075 struct kvm_run
*run
,
2076 struct kvm_vcpu
*vcpu
)
2078 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2079 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2080 enum emulation_result er
= EMULATE_DONE
;
2082 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2084 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2085 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2087 if (cause
& CAUSEF_BD
)
2088 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2090 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2092 kvm_debug("Delivering RI @ pc %#lx\n", arch
->pc
);
2094 kvm_change_c0_guest_cause(cop0
, (0xff),
2095 (EXCCODE_RI
<< CAUSEB_EXCCODE
));
2097 /* Set PC to the exception entry point */
2098 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2101 kvm_err("Trying to deliver RI when EXL is already set\n");
2108 enum emulation_result
kvm_mips_emulate_bp_exc(unsigned long cause
,
2110 struct kvm_run
*run
,
2111 struct kvm_vcpu
*vcpu
)
2113 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2114 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2115 enum emulation_result er
= EMULATE_DONE
;
2117 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2119 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2120 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2122 if (cause
& CAUSEF_BD
)
2123 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2125 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2127 kvm_debug("Delivering BP @ pc %#lx\n", arch
->pc
);
2129 kvm_change_c0_guest_cause(cop0
, (0xff),
2130 (EXCCODE_BP
<< CAUSEB_EXCCODE
));
2132 /* Set PC to the exception entry point */
2133 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2136 kvm_err("Trying to deliver BP when EXL is already set\n");
2143 enum emulation_result
kvm_mips_emulate_trap_exc(unsigned long cause
,
2145 struct kvm_run
*run
,
2146 struct kvm_vcpu
*vcpu
)
2148 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2149 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2150 enum emulation_result er
= EMULATE_DONE
;
2152 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2154 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2155 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2157 if (cause
& CAUSEF_BD
)
2158 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2160 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2162 kvm_debug("Delivering TRAP @ pc %#lx\n", arch
->pc
);
2164 kvm_change_c0_guest_cause(cop0
, (0xff),
2165 (EXCCODE_TR
<< CAUSEB_EXCCODE
));
2167 /* Set PC to the exception entry point */
2168 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2171 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2178 enum emulation_result
kvm_mips_emulate_msafpe_exc(unsigned long cause
,
2180 struct kvm_run
*run
,
2181 struct kvm_vcpu
*vcpu
)
2183 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2184 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2185 enum emulation_result er
= EMULATE_DONE
;
2187 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2189 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2190 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2192 if (cause
& CAUSEF_BD
)
2193 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2195 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2197 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch
->pc
);
2199 kvm_change_c0_guest_cause(cop0
, (0xff),
2200 (EXCCODE_MSAFPE
<< CAUSEB_EXCCODE
));
2202 /* Set PC to the exception entry point */
2203 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2206 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2213 enum emulation_result
kvm_mips_emulate_fpe_exc(unsigned long cause
,
2215 struct kvm_run
*run
,
2216 struct kvm_vcpu
*vcpu
)
2218 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2219 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2220 enum emulation_result er
= EMULATE_DONE
;
2222 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2224 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2225 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2227 if (cause
& CAUSEF_BD
)
2228 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2230 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2232 kvm_debug("Delivering FPE @ pc %#lx\n", arch
->pc
);
2234 kvm_change_c0_guest_cause(cop0
, (0xff),
2235 (EXCCODE_FPE
<< CAUSEB_EXCCODE
));
2237 /* Set PC to the exception entry point */
2238 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2241 kvm_err("Trying to deliver FPE when EXL is already set\n");
2248 enum emulation_result
kvm_mips_emulate_msadis_exc(unsigned long cause
,
2250 struct kvm_run
*run
,
2251 struct kvm_vcpu
*vcpu
)
2253 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2254 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2255 enum emulation_result er
= EMULATE_DONE
;
2257 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2259 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2260 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2262 if (cause
& CAUSEF_BD
)
2263 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2265 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2267 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch
->pc
);
2269 kvm_change_c0_guest_cause(cop0
, (0xff),
2270 (EXCCODE_MSADIS
<< CAUSEB_EXCCODE
));
2272 /* Set PC to the exception entry point */
2273 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2276 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2283 /* ll/sc, rdhwr, sync emulation */
2285 #define OPCODE 0xfc000000
2286 #define BASE 0x03e00000
2287 #define RT 0x001f0000
2288 #define OFFSET 0x0000ffff
2289 #define LL 0xc0000000
2290 #define SC 0xe0000000
2291 #define SPEC0 0x00000000
2292 #define SPEC3 0x7c000000
2293 #define RD 0x0000f800
2294 #define FUNC 0x0000003f
2295 #define SYNC 0x0000000f
2296 #define RDHWR 0x0000003b
2298 enum emulation_result
kvm_mips_handle_ri(unsigned long cause
, uint32_t *opc
,
2299 struct kvm_run
*run
,
2300 struct kvm_vcpu
*vcpu
)
2302 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2303 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2304 enum emulation_result er
= EMULATE_DONE
;
2305 unsigned long curr_pc
;
2309 * Update PC and hold onto current PC in case there is
2310 * an error and we want to rollback the PC
2312 curr_pc
= vcpu
->arch
.pc
;
2313 er
= update_pc(vcpu
, cause
);
2314 if (er
== EMULATE_FAIL
)
2317 /* Fetch the instruction. */
2318 if (cause
& CAUSEF_BD
)
2321 inst
= kvm_get_inst(opc
, vcpu
);
2323 if (inst
== KVM_INVALID_INST
) {
2324 kvm_err("%s: Cannot get inst @ %p\n", __func__
, opc
);
2325 return EMULATE_FAIL
;
2328 if ((inst
& OPCODE
) == SPEC3
&& (inst
& FUNC
) == RDHWR
) {
2329 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2330 int rd
= (inst
& RD
) >> 11;
2331 int rt
= (inst
& RT
) >> 16;
2332 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2333 if (usermode
&& !(kvm_read_c0_guest_hwrena(cop0
) & BIT(rd
))) {
2334 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2339 case 0: /* CPU number */
2342 case 1: /* SYNCI length */
2343 arch
->gprs
[rt
] = min(current_cpu_data
.dcache
.linesz
,
2344 current_cpu_data
.icache
.linesz
);
2346 case 2: /* Read count register */
2347 arch
->gprs
[rt
] = kvm_mips_read_count(vcpu
);
2349 case 3: /* Count register resolution */
2350 switch (current_cpu_data
.cputype
) {
2360 arch
->gprs
[rt
] = kvm_read_c0_guest_userlocal(cop0
);
2364 kvm_debug("RDHWR %#x not supported @ %p\n", rd
, opc
);
2368 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc
, inst
);
2372 return EMULATE_DONE
;
2376 * Rollback PC (if in branch delay slot then the PC already points to
2377 * branch target), and pass the RI exception to the guest OS.
2379 vcpu
->arch
.pc
= curr_pc
;
2380 return kvm_mips_emulate_ri_exc(cause
, opc
, run
, vcpu
);
2383 enum emulation_result
kvm_mips_complete_mmio_load(struct kvm_vcpu
*vcpu
,
2384 struct kvm_run
*run
)
2386 unsigned long *gpr
= &vcpu
->arch
.gprs
[vcpu
->arch
.io_gpr
];
2387 enum emulation_result er
= EMULATE_DONE
;
2389 if (run
->mmio
.len
> sizeof(*gpr
)) {
2390 kvm_err("Bad MMIO length: %d", run
->mmio
.len
);
2395 er
= update_pc(vcpu
, vcpu
->arch
.pending_load_cause
);
2396 if (er
== EMULATE_FAIL
)
2399 switch (run
->mmio
.len
) {
2401 *gpr
= *(int32_t *) run
->mmio
.data
;
2405 if (vcpu
->mmio_needed
== 2)
2406 *gpr
= *(int16_t *) run
->mmio
.data
;
2408 *gpr
= *(uint16_t *)run
->mmio
.data
;
2412 if (vcpu
->mmio_needed
== 2)
2413 *gpr
= *(int8_t *) run
->mmio
.data
;
2415 *gpr
= *(u8
*) run
->mmio
.data
;
2419 if (vcpu
->arch
.pending_load_cause
& CAUSEF_BD
)
2420 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2421 vcpu
->arch
.pc
, run
->mmio
.len
, vcpu
->arch
.io_gpr
, *gpr
,
2428 static enum emulation_result
kvm_mips_emulate_exc(unsigned long cause
,
2430 struct kvm_run
*run
,
2431 struct kvm_vcpu
*vcpu
)
2433 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2434 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
2435 struct kvm_vcpu_arch
*arch
= &vcpu
->arch
;
2436 enum emulation_result er
= EMULATE_DONE
;
2438 if ((kvm_read_c0_guest_status(cop0
) & ST0_EXL
) == 0) {
2440 kvm_write_c0_guest_epc(cop0
, arch
->pc
);
2441 kvm_set_c0_guest_status(cop0
, ST0_EXL
);
2443 if (cause
& CAUSEF_BD
)
2444 kvm_set_c0_guest_cause(cop0
, CAUSEF_BD
);
2446 kvm_clear_c0_guest_cause(cop0
, CAUSEF_BD
);
2448 kvm_change_c0_guest_cause(cop0
, (0xff),
2449 (exccode
<< CAUSEB_EXCCODE
));
2451 /* Set PC to the exception entry point */
2452 arch
->pc
= KVM_GUEST_KSEG0
+ 0x180;
2453 kvm_write_c0_guest_badvaddr(cop0
, vcpu
->arch
.host_cp0_badvaddr
);
2455 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2456 exccode
, kvm_read_c0_guest_epc(cop0
),
2457 kvm_read_c0_guest_badvaddr(cop0
));
2459 kvm_err("Trying to deliver EXC when EXL is already set\n");
2466 enum emulation_result
kvm_mips_check_privilege(unsigned long cause
,
2468 struct kvm_run
*run
,
2469 struct kvm_vcpu
*vcpu
)
2471 enum emulation_result er
= EMULATE_DONE
;
2472 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2473 unsigned long badvaddr
= vcpu
->arch
.host_cp0_badvaddr
;
2475 int usermode
= !KVM_GUEST_KERNEL_MODE(vcpu
);
2484 case EXCCODE_MSAFPE
:
2486 case EXCCODE_MSADIS
:
2490 if (((cause
& CAUSEF_CE
) >> CAUSEB_CE
) == 0)
2491 er
= EMULATE_PRIV_FAIL
;
2499 * We we are accessing Guest kernel space, then send an
2500 * address error exception to the guest
2502 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2503 kvm_debug("%s: LD MISS @ %#lx\n", __func__
,
2506 cause
|= (EXCCODE_ADEL
<< CAUSEB_EXCCODE
);
2507 er
= EMULATE_PRIV_FAIL
;
2513 * We we are accessing Guest kernel space, then send an
2514 * address error exception to the guest
2516 if (badvaddr
>= (unsigned long) KVM_GUEST_KSEG0
) {
2517 kvm_debug("%s: ST MISS @ %#lx\n", __func__
,
2520 cause
|= (EXCCODE_ADES
<< CAUSEB_EXCCODE
);
2521 er
= EMULATE_PRIV_FAIL
;
2526 kvm_debug("%s: address error ST @ %#lx\n", __func__
,
2528 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2530 cause
|= (EXCCODE_TLBS
<< CAUSEB_EXCCODE
);
2532 er
= EMULATE_PRIV_FAIL
;
2535 kvm_debug("%s: address error LD @ %#lx\n", __func__
,
2537 if ((badvaddr
& PAGE_MASK
) == KVM_GUEST_COMMPAGE_ADDR
) {
2539 cause
|= (EXCCODE_TLBL
<< CAUSEB_EXCCODE
);
2541 er
= EMULATE_PRIV_FAIL
;
2544 er
= EMULATE_PRIV_FAIL
;
2549 if (er
== EMULATE_PRIV_FAIL
)
2550 kvm_mips_emulate_exc(cause
, opc
, run
, vcpu
);
2556 * User Address (UA) fault, this could happen if
2557 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2558 * case we pass on the fault to the guest kernel and let it handle it.
2559 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2560 * case we inject the TLB from the Guest TLB into the shadow host TLB
2562 enum emulation_result
kvm_mips_handle_tlbmiss(unsigned long cause
,
2564 struct kvm_run
*run
,
2565 struct kvm_vcpu
*vcpu
)
2567 enum emulation_result er
= EMULATE_DONE
;
2568 uint32_t exccode
= (cause
>> CAUSEB_EXCCODE
) & 0x1f;
2569 unsigned long va
= vcpu
->arch
.host_cp0_badvaddr
;
2572 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2573 vcpu
->arch
.host_cp0_badvaddr
, vcpu
->arch
.host_cp0_entryhi
);
2576 * KVM would not have got the exception if this entry was valid in the
2577 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2578 * send the guest an exception. The guest exc handler should then inject
2579 * an entry into the guest TLB.
2581 index
= kvm_mips_guest_tlb_lookup(vcpu
,
2583 (kvm_read_c0_guest_entryhi(vcpu
->arch
.cop0
) &
2586 if (exccode
== EXCCODE_TLBL
) {
2587 er
= kvm_mips_emulate_tlbmiss_ld(cause
, opc
, run
, vcpu
);
2588 } else if (exccode
== EXCCODE_TLBS
) {
2589 er
= kvm_mips_emulate_tlbmiss_st(cause
, opc
, run
, vcpu
);
2591 kvm_err("%s: invalid exc code: %d\n", __func__
,
2596 struct kvm_mips_tlb
*tlb
= &vcpu
->arch
.guest_tlb
[index
];
2599 * Check if the entry is valid, if not then setup a TLB invalid
2600 * exception to the guest
2602 if (!TLB_IS_VALID(*tlb
, va
)) {
2603 if (exccode
== EXCCODE_TLBL
) {
2604 er
= kvm_mips_emulate_tlbinv_ld(cause
, opc
, run
,
2606 } else if (exccode
== EXCCODE_TLBS
) {
2607 er
= kvm_mips_emulate_tlbinv_st(cause
, opc
, run
,
2610 kvm_err("%s: invalid exc code: %d\n", __func__
,
2615 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2616 tlb
->tlb_hi
, tlb
->tlb_lo0
, tlb
->tlb_lo1
);
2618 * OK we have a Guest TLB entry, now inject it into the
2621 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
, tlb
, NULL
,