m68knommu: fix user a5 register being overwritten
[deliverable/linux.git] / arch / mips / kvm / emulate.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
18 #include <linux/fs.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
21 #include <asm/page.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu-info.h>
25 #include <asm/mmu_context.h>
26 #include <asm/tlbflush.h>
27 #include <asm/inst.h>
28
29 #undef CONFIG_MIPS_MT
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
32
33 #include "interrupt.h"
34 #include "commpage.h"
35
36 #include "trace.h"
37
38 /*
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
41 */
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
43 unsigned long instpc)
44 {
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
48 long epc = instpc;
49 long nextpc = KVM_INVALID_INST;
50
51 if (epc & 3)
52 goto unaligned;
53
54 /* Read the instruction */
55 insn.word = kvm_get_inst((u32 *) epc, vcpu);
56
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
59
60 switch (insn.i_format.opcode) {
61 /* jr and jalr are in r_format format. */
62 case spec_op:
63 switch (insn.r_format.func) {
64 case jalr_op:
65 arch->gprs[insn.r_format.rd] = epc + 8;
66 /* Fall through */
67 case jr_op:
68 nextpc = arch->gprs[insn.r_format.rs];
69 break;
70 }
71 break;
72
73 /*
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
77 */
78 case bcond_op:
79 switch (insn.i_format.rt) {
80 case bltz_op:
81 case bltzl_op:
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
84 else
85 epc += 8;
86 nextpc = epc;
87 break;
88
89 case bgez_op:
90 case bgezl_op:
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
93 else
94 epc += 8;
95 nextpc = epc;
96 break;
97
98 case bltzal_op:
99 case bltzall_op:
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
103 else
104 epc += 8;
105 nextpc = epc;
106 break;
107
108 case bgezal_op:
109 case bgezall_op:
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
113 else
114 epc += 8;
115 nextpc = epc;
116 break;
117 case bposge32_op:
118 if (!cpu_has_dsp)
119 goto sigill;
120
121 dspcontrol = rddsp(0x01);
122
123 if (dspcontrol >= 32)
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
125 else
126 epc += 8;
127 nextpc = epc;
128 break;
129 }
130 break;
131
132 /* These are unconditional and in j_format. */
133 case jal_op:
134 arch->gprs[31] = instpc + 8;
135 case j_op:
136 epc += 4;
137 epc >>= 28;
138 epc <<= 28;
139 epc |= (insn.j_format.target << 2);
140 nextpc = epc;
141 break;
142
143 /* These are conditional and in i_format. */
144 case beq_op:
145 case beql_op:
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
149 else
150 epc += 8;
151 nextpc = epc;
152 break;
153
154 case bne_op:
155 case bnel_op:
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
159 else
160 epc += 8;
161 nextpc = epc;
162 break;
163
164 case blez_op: /* POP06 */
165 #ifndef CONFIG_CPU_MIPSR6
166 case blezl_op: /* removed in R6 */
167 #endif
168 if (insn.i_format.rt != 0)
169 goto compact_branch;
170 if ((long)arch->gprs[insn.i_format.rs] <= 0)
171 epc = epc + 4 + (insn.i_format.simmediate << 2);
172 else
173 epc += 8;
174 nextpc = epc;
175 break;
176
177 case bgtz_op: /* POP07 */
178 #ifndef CONFIG_CPU_MIPSR6
179 case bgtzl_op: /* removed in R6 */
180 #endif
181 if (insn.i_format.rt != 0)
182 goto compact_branch;
183 if ((long)arch->gprs[insn.i_format.rs] > 0)
184 epc = epc + 4 + (insn.i_format.simmediate << 2);
185 else
186 epc += 8;
187 nextpc = epc;
188 break;
189
190 /* And now the FPA/cp1 branch instructions. */
191 case cop1_op:
192 kvm_err("%s: unsupported cop1_op\n", __func__);
193 break;
194
195 #ifdef CONFIG_CPU_MIPSR6
196 /* R6 added the following compact branches with forbidden slots */
197 case blezl_op: /* POP26 */
198 case bgtzl_op: /* POP27 */
199 /* only rt == 0 isn't compact branch */
200 if (insn.i_format.rt != 0)
201 goto compact_branch;
202 break;
203 case pop10_op:
204 case pop30_op:
205 /* only rs == rt == 0 is reserved, rest are compact branches */
206 if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
207 goto compact_branch;
208 break;
209 case pop66_op:
210 case pop76_op:
211 /* only rs == 0 isn't compact branch */
212 if (insn.i_format.rs != 0)
213 goto compact_branch;
214 break;
215 compact_branch:
216 /*
217 * If we've hit an exception on the forbidden slot, then
218 * the branch must not have been taken.
219 */
220 epc += 8;
221 nextpc = epc;
222 break;
223 #else
224 compact_branch:
225 /* Compact branches not supported before R6 */
226 break;
227 #endif
228 }
229
230 return nextpc;
231
232 unaligned:
233 kvm_err("%s: unaligned epc\n", __func__);
234 return nextpc;
235
236 sigill:
237 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
238 return nextpc;
239 }
240
241 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
242 {
243 unsigned long branch_pc;
244 enum emulation_result er = EMULATE_DONE;
245
246 if (cause & CAUSEF_BD) {
247 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
248 if (branch_pc == KVM_INVALID_INST) {
249 er = EMULATE_FAIL;
250 } else {
251 vcpu->arch.pc = branch_pc;
252 kvm_debug("BD update_pc(): New PC: %#lx\n",
253 vcpu->arch.pc);
254 }
255 } else
256 vcpu->arch.pc += 4;
257
258 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
259
260 return er;
261 }
262
263 /**
264 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
265 * @vcpu: Virtual CPU.
266 *
267 * Returns: 1 if the CP0_Count timer is disabled by either the guest
268 * CP0_Cause.DC bit or the count_ctl.DC bit.
269 * 0 otherwise (in which case CP0_Count timer is running).
270 */
271 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
272 {
273 struct mips_coproc *cop0 = vcpu->arch.cop0;
274
275 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
276 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
277 }
278
279 /**
280 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
281 *
282 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
283 *
284 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
285 */
286 static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
287 {
288 s64 now_ns, periods;
289 u64 delta;
290
291 now_ns = ktime_to_ns(now);
292 delta = now_ns + vcpu->arch.count_dyn_bias;
293
294 if (delta >= vcpu->arch.count_period) {
295 /* If delta is out of safe range the bias needs adjusting */
296 periods = div64_s64(now_ns, vcpu->arch.count_period);
297 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
298 /* Recalculate delta with new bias */
299 delta = now_ns + vcpu->arch.count_dyn_bias;
300 }
301
302 /*
303 * We've ensured that:
304 * delta < count_period
305 *
306 * Therefore the intermediate delta*count_hz will never overflow since
307 * at the boundary condition:
308 * delta = count_period
309 * delta = NSEC_PER_SEC * 2^32 / count_hz
310 * delta * count_hz = NSEC_PER_SEC * 2^32
311 */
312 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
313 }
314
315 /**
316 * kvm_mips_count_time() - Get effective current time.
317 * @vcpu: Virtual CPU.
318 *
319 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
320 * except when the master disable bit is set in count_ctl, in which case it is
321 * count_resume, i.e. the time that the count was disabled.
322 *
323 * Returns: Effective monotonic ktime for CP0_Count.
324 */
325 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
326 {
327 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
328 return vcpu->arch.count_resume;
329
330 return ktime_get();
331 }
332
333 /**
334 * kvm_mips_read_count_running() - Read the current count value as if running.
335 * @vcpu: Virtual CPU.
336 * @now: Kernel time to read CP0_Count at.
337 *
338 * Returns the current guest CP0_Count register at time @now and handles if the
339 * timer interrupt is pending and hasn't been handled yet.
340 *
341 * Returns: The current value of the guest CP0_Count register.
342 */
343 static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
344 {
345 struct mips_coproc *cop0 = vcpu->arch.cop0;
346 ktime_t expires, threshold;
347 u32 count, compare;
348 int running;
349
350 /* Calculate the biased and scaled guest CP0_Count */
351 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
352 compare = kvm_read_c0_guest_compare(cop0);
353
354 /*
355 * Find whether CP0_Count has reached the closest timer interrupt. If
356 * not, we shouldn't inject it.
357 */
358 if ((s32)(count - compare) < 0)
359 return count;
360
361 /*
362 * The CP0_Count we're going to return has already reached the closest
363 * timer interrupt. Quickly check if it really is a new interrupt by
364 * looking at whether the interval until the hrtimer expiry time is
365 * less than 1/4 of the timer period.
366 */
367 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
368 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
369 if (ktime_before(expires, threshold)) {
370 /*
371 * Cancel it while we handle it so there's no chance of
372 * interference with the timeout handler.
373 */
374 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
375
376 /* Nothing should be waiting on the timeout */
377 kvm_mips_callbacks->queue_timer_int(vcpu);
378
379 /*
380 * Restart the timer if it was running based on the expiry time
381 * we read, so that we don't push it back 2 periods.
382 */
383 if (running) {
384 expires = ktime_add_ns(expires,
385 vcpu->arch.count_period);
386 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
387 HRTIMER_MODE_ABS);
388 }
389 }
390
391 return count;
392 }
393
394 /**
395 * kvm_mips_read_count() - Read the current count value.
396 * @vcpu: Virtual CPU.
397 *
398 * Read the current guest CP0_Count value, taking into account whether the timer
399 * is stopped.
400 *
401 * Returns: The current guest CP0_Count value.
402 */
403 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
404 {
405 struct mips_coproc *cop0 = vcpu->arch.cop0;
406
407 /* If count disabled just read static copy of count */
408 if (kvm_mips_count_disabled(vcpu))
409 return kvm_read_c0_guest_count(cop0);
410
411 return kvm_mips_read_count_running(vcpu, ktime_get());
412 }
413
414 /**
415 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
416 * @vcpu: Virtual CPU.
417 * @count: Output pointer for CP0_Count value at point of freeze.
418 *
419 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
420 * at the point it was frozen. It is guaranteed that any pending interrupts at
421 * the point it was frozen are handled, and none after that point.
422 *
423 * This is useful where the time/CP0_Count is needed in the calculation of the
424 * new parameters.
425 *
426 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
427 *
428 * Returns: The ktime at the point of freeze.
429 */
430 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
431 {
432 ktime_t now;
433
434 /* stop hrtimer before finding time */
435 hrtimer_cancel(&vcpu->arch.comparecount_timer);
436 now = ktime_get();
437
438 /* find count at this point and handle pending hrtimer */
439 *count = kvm_mips_read_count_running(vcpu, now);
440
441 return now;
442 }
443
444 /**
445 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
446 * @vcpu: Virtual CPU.
447 * @now: ktime at point of resume.
448 * @count: CP0_Count at point of resume.
449 *
450 * Resumes the timer and updates the timer expiry based on @now and @count.
451 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
452 * parameters need to be changed.
453 *
454 * It is guaranteed that a timer interrupt immediately after resume will be
455 * handled, but not if CP_Compare is exactly at @count. That case is already
456 * handled by kvm_mips_freeze_timer().
457 *
458 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
459 */
460 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
461 ktime_t now, u32 count)
462 {
463 struct mips_coproc *cop0 = vcpu->arch.cop0;
464 u32 compare;
465 u64 delta;
466 ktime_t expire;
467
468 /* Calculate timeout (wrap 0 to 2^32) */
469 compare = kvm_read_c0_guest_compare(cop0);
470 delta = (u64)(u32)(compare - count - 1) + 1;
471 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
472 expire = ktime_add_ns(now, delta);
473
474 /* Update hrtimer to use new timeout */
475 hrtimer_cancel(&vcpu->arch.comparecount_timer);
476 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
477 }
478
479 /**
480 * kvm_mips_write_count() - Modify the count and update timer.
481 * @vcpu: Virtual CPU.
482 * @count: Guest CP0_Count value to set.
483 *
484 * Sets the CP0_Count value and updates the timer accordingly.
485 */
486 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
487 {
488 struct mips_coproc *cop0 = vcpu->arch.cop0;
489 ktime_t now;
490
491 /* Calculate bias */
492 now = kvm_mips_count_time(vcpu);
493 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
494
495 if (kvm_mips_count_disabled(vcpu))
496 /* The timer's disabled, adjust the static count */
497 kvm_write_c0_guest_count(cop0, count);
498 else
499 /* Update timeout */
500 kvm_mips_resume_hrtimer(vcpu, now, count);
501 }
502
503 /**
504 * kvm_mips_init_count() - Initialise timer.
505 * @vcpu: Virtual CPU.
506 *
507 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
508 * it going if it's enabled.
509 */
510 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
511 {
512 /* 100 MHz */
513 vcpu->arch.count_hz = 100*1000*1000;
514 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
515 vcpu->arch.count_hz);
516 vcpu->arch.count_dyn_bias = 0;
517
518 /* Starting at 0 */
519 kvm_mips_write_count(vcpu, 0);
520 }
521
522 /**
523 * kvm_mips_set_count_hz() - Update the frequency of the timer.
524 * @vcpu: Virtual CPU.
525 * @count_hz: Frequency of CP0_Count timer in Hz.
526 *
527 * Change the frequency of the CP0_Count timer. This is done atomically so that
528 * CP0_Count is continuous and no timer interrupt is lost.
529 *
530 * Returns: -EINVAL if @count_hz is out of range.
531 * 0 on success.
532 */
533 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
534 {
535 struct mips_coproc *cop0 = vcpu->arch.cop0;
536 int dc;
537 ktime_t now;
538 u32 count;
539
540 /* ensure the frequency is in a sensible range... */
541 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
542 return -EINVAL;
543 /* ... and has actually changed */
544 if (vcpu->arch.count_hz == count_hz)
545 return 0;
546
547 /* Safely freeze timer so we can keep it continuous */
548 dc = kvm_mips_count_disabled(vcpu);
549 if (dc) {
550 now = kvm_mips_count_time(vcpu);
551 count = kvm_read_c0_guest_count(cop0);
552 } else {
553 now = kvm_mips_freeze_hrtimer(vcpu, &count);
554 }
555
556 /* Update the frequency */
557 vcpu->arch.count_hz = count_hz;
558 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
559 vcpu->arch.count_dyn_bias = 0;
560
561 /* Calculate adjusted bias so dynamic count is unchanged */
562 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
563
564 /* Update and resume hrtimer */
565 if (!dc)
566 kvm_mips_resume_hrtimer(vcpu, now, count);
567 return 0;
568 }
569
570 /**
571 * kvm_mips_write_compare() - Modify compare and update timer.
572 * @vcpu: Virtual CPU.
573 * @compare: New CP0_Compare value.
574 * @ack: Whether to acknowledge timer interrupt.
575 *
576 * Update CP0_Compare to a new value and update the timeout.
577 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
578 * any pending timer interrupt is preserved.
579 */
580 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
581 {
582 struct mips_coproc *cop0 = vcpu->arch.cop0;
583 int dc;
584 u32 old_compare = kvm_read_c0_guest_compare(cop0);
585 ktime_t now;
586 u32 count;
587
588 /* if unchanged, must just be an ack */
589 if (old_compare == compare) {
590 if (!ack)
591 return;
592 kvm_mips_callbacks->dequeue_timer_int(vcpu);
593 kvm_write_c0_guest_compare(cop0, compare);
594 return;
595 }
596
597 /* freeze_hrtimer() takes care of timer interrupts <= count */
598 dc = kvm_mips_count_disabled(vcpu);
599 if (!dc)
600 now = kvm_mips_freeze_hrtimer(vcpu, &count);
601
602 if (ack)
603 kvm_mips_callbacks->dequeue_timer_int(vcpu);
604
605 kvm_write_c0_guest_compare(cop0, compare);
606
607 /* resume_hrtimer() takes care of timer interrupts > count */
608 if (!dc)
609 kvm_mips_resume_hrtimer(vcpu, now, count);
610 }
611
612 /**
613 * kvm_mips_count_disable() - Disable count.
614 * @vcpu: Virtual CPU.
615 *
616 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
617 * time will be handled but not after.
618 *
619 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
620 * count_ctl.DC has been set (count disabled).
621 *
622 * Returns: The time that the timer was stopped.
623 */
624 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
625 {
626 struct mips_coproc *cop0 = vcpu->arch.cop0;
627 u32 count;
628 ktime_t now;
629
630 /* Stop hrtimer */
631 hrtimer_cancel(&vcpu->arch.comparecount_timer);
632
633 /* Set the static count from the dynamic count, handling pending TI */
634 now = ktime_get();
635 count = kvm_mips_read_count_running(vcpu, now);
636 kvm_write_c0_guest_count(cop0, count);
637
638 return now;
639 }
640
641 /**
642 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
643 * @vcpu: Virtual CPU.
644 *
645 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
646 * before the final stop time will be handled if the timer isn't disabled by
647 * count_ctl.DC, but not after.
648 *
649 * Assumes CP0_Cause.DC is clear (count enabled).
650 */
651 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
652 {
653 struct mips_coproc *cop0 = vcpu->arch.cop0;
654
655 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
656 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
657 kvm_mips_count_disable(vcpu);
658 }
659
660 /**
661 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
662 * @vcpu: Virtual CPU.
663 *
664 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
665 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
666 * potentially before even returning, so the caller should be careful with
667 * ordering of CP0_Cause modifications so as not to lose it.
668 *
669 * Assumes CP0_Cause.DC is set (count disabled).
670 */
671 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
672 {
673 struct mips_coproc *cop0 = vcpu->arch.cop0;
674 u32 count;
675
676 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
677
678 /*
679 * Set the dynamic count to match the static count.
680 * This starts the hrtimer if count_ctl.DC allows it.
681 * Otherwise it conveniently updates the biases.
682 */
683 count = kvm_read_c0_guest_count(cop0);
684 kvm_mips_write_count(vcpu, count);
685 }
686
687 /**
688 * kvm_mips_set_count_ctl() - Update the count control KVM register.
689 * @vcpu: Virtual CPU.
690 * @count_ctl: Count control register new value.
691 *
692 * Set the count control KVM register. The timer is updated accordingly.
693 *
694 * Returns: -EINVAL if reserved bits are set.
695 * 0 on success.
696 */
697 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
698 {
699 struct mips_coproc *cop0 = vcpu->arch.cop0;
700 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
701 s64 delta;
702 ktime_t expire, now;
703 u32 count, compare;
704
705 /* Only allow defined bits to be changed */
706 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
707 return -EINVAL;
708
709 /* Apply new value */
710 vcpu->arch.count_ctl = count_ctl;
711
712 /* Master CP0_Count disable */
713 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
714 /* Is CP0_Cause.DC already disabling CP0_Count? */
715 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
716 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
717 /* Just record the current time */
718 vcpu->arch.count_resume = ktime_get();
719 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
720 /* disable timer and record current time */
721 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
722 } else {
723 /*
724 * Calculate timeout relative to static count at resume
725 * time (wrap 0 to 2^32).
726 */
727 count = kvm_read_c0_guest_count(cop0);
728 compare = kvm_read_c0_guest_compare(cop0);
729 delta = (u64)(u32)(compare - count - 1) + 1;
730 delta = div_u64(delta * NSEC_PER_SEC,
731 vcpu->arch.count_hz);
732 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
733
734 /* Handle pending interrupt */
735 now = ktime_get();
736 if (ktime_compare(now, expire) >= 0)
737 /* Nothing should be waiting on the timeout */
738 kvm_mips_callbacks->queue_timer_int(vcpu);
739
740 /* Resume hrtimer without changing bias */
741 count = kvm_mips_read_count_running(vcpu, now);
742 kvm_mips_resume_hrtimer(vcpu, now, count);
743 }
744 }
745
746 return 0;
747 }
748
749 /**
750 * kvm_mips_set_count_resume() - Update the count resume KVM register.
751 * @vcpu: Virtual CPU.
752 * @count_resume: Count resume register new value.
753 *
754 * Set the count resume KVM register.
755 *
756 * Returns: -EINVAL if out of valid range (0..now).
757 * 0 on success.
758 */
759 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
760 {
761 /*
762 * It doesn't make sense for the resume time to be in the future, as it
763 * would be possible for the next interrupt to be more than a full
764 * period in the future.
765 */
766 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
767 return -EINVAL;
768
769 vcpu->arch.count_resume = ns_to_ktime(count_resume);
770 return 0;
771 }
772
773 /**
774 * kvm_mips_count_timeout() - Push timer forward on timeout.
775 * @vcpu: Virtual CPU.
776 *
777 * Handle an hrtimer event by push the hrtimer forward a period.
778 *
779 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
780 */
781 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
782 {
783 /* Add the Count period to the current expiry time */
784 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
785 vcpu->arch.count_period);
786 return HRTIMER_RESTART;
787 }
788
789 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
790 {
791 struct mips_coproc *cop0 = vcpu->arch.cop0;
792 enum emulation_result er = EMULATE_DONE;
793
794 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
795 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
796 kvm_read_c0_guest_epc(cop0));
797 kvm_clear_c0_guest_status(cop0, ST0_EXL);
798 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
799
800 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
801 kvm_clear_c0_guest_status(cop0, ST0_ERL);
802 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
803 } else {
804 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
805 vcpu->arch.pc);
806 er = EMULATE_FAIL;
807 }
808
809 return er;
810 }
811
812 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
813 {
814 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
815 vcpu->arch.pending_exceptions);
816
817 ++vcpu->stat.wait_exits;
818 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
819 if (!vcpu->arch.pending_exceptions) {
820 vcpu->arch.wait = 1;
821 kvm_vcpu_block(vcpu);
822
823 /*
824 * We we are runnable, then definitely go off to user space to
825 * check if any I/O interrupts are pending.
826 */
827 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
828 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
829 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
830 }
831 }
832
833 return EMULATE_DONE;
834 }
835
836 /*
837 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
838 * we can catch this, if things ever change
839 */
840 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
841 {
842 struct mips_coproc *cop0 = vcpu->arch.cop0;
843 unsigned long pc = vcpu->arch.pc;
844
845 kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
846 return EMULATE_FAIL;
847 }
848
849 /* Write Guest TLB Entry @ Index */
850 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
851 {
852 struct mips_coproc *cop0 = vcpu->arch.cop0;
853 int index = kvm_read_c0_guest_index(cop0);
854 struct kvm_mips_tlb *tlb = NULL;
855 unsigned long pc = vcpu->arch.pc;
856
857 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
858 kvm_debug("%s: illegal index: %d\n", __func__, index);
859 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
860 pc, index, kvm_read_c0_guest_entryhi(cop0),
861 kvm_read_c0_guest_entrylo0(cop0),
862 kvm_read_c0_guest_entrylo1(cop0),
863 kvm_read_c0_guest_pagemask(cop0));
864 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
865 }
866
867 tlb = &vcpu->arch.guest_tlb[index];
868 /*
869 * Probe the shadow host TLB for the entry being overwritten, if one
870 * matches, invalidate it
871 */
872 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
873
874 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
875 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
876 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
877 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
878
879 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
880 pc, index, kvm_read_c0_guest_entryhi(cop0),
881 kvm_read_c0_guest_entrylo0(cop0),
882 kvm_read_c0_guest_entrylo1(cop0),
883 kvm_read_c0_guest_pagemask(cop0));
884
885 return EMULATE_DONE;
886 }
887
888 /* Write Guest TLB Entry @ Random Index */
889 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
890 {
891 struct mips_coproc *cop0 = vcpu->arch.cop0;
892 struct kvm_mips_tlb *tlb = NULL;
893 unsigned long pc = vcpu->arch.pc;
894 int index;
895
896 get_random_bytes(&index, sizeof(index));
897 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
898
899 tlb = &vcpu->arch.guest_tlb[index];
900
901 /*
902 * Probe the shadow host TLB for the entry being overwritten, if one
903 * matches, invalidate it
904 */
905 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
906
907 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
908 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
909 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
910 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
911
912 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
913 pc, index, kvm_read_c0_guest_entryhi(cop0),
914 kvm_read_c0_guest_entrylo0(cop0),
915 kvm_read_c0_guest_entrylo1(cop0));
916
917 return EMULATE_DONE;
918 }
919
920 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
921 {
922 struct mips_coproc *cop0 = vcpu->arch.cop0;
923 long entryhi = kvm_read_c0_guest_entryhi(cop0);
924 unsigned long pc = vcpu->arch.pc;
925 int index = -1;
926
927 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
928
929 kvm_write_c0_guest_index(cop0, index);
930
931 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
932 index);
933
934 return EMULATE_DONE;
935 }
936
937 /**
938 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
939 * @vcpu: Virtual CPU.
940 *
941 * Finds the mask of bits which are writable in the guest's Config1 CP0
942 * register, by userland (currently read-only to the guest).
943 */
944 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
945 {
946 unsigned int mask = 0;
947
948 /* Permit FPU to be present if FPU is supported */
949 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
950 mask |= MIPS_CONF1_FP;
951
952 return mask;
953 }
954
955 /**
956 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
957 * @vcpu: Virtual CPU.
958 *
959 * Finds the mask of bits which are writable in the guest's Config3 CP0
960 * register, by userland (currently read-only to the guest).
961 */
962 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
963 {
964 /* Config4 and ULRI are optional */
965 unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
966
967 /* Permit MSA to be present if MSA is supported */
968 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
969 mask |= MIPS_CONF3_MSA;
970
971 return mask;
972 }
973
974 /**
975 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
976 * @vcpu: Virtual CPU.
977 *
978 * Finds the mask of bits which are writable in the guest's Config4 CP0
979 * register, by userland (currently read-only to the guest).
980 */
981 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
982 {
983 /* Config5 is optional */
984 unsigned int mask = MIPS_CONF_M;
985
986 /* KScrExist */
987 mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
988
989 return mask;
990 }
991
992 /**
993 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
994 * @vcpu: Virtual CPU.
995 *
996 * Finds the mask of bits which are writable in the guest's Config5 CP0
997 * register, by the guest itself.
998 */
999 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1000 {
1001 unsigned int mask = 0;
1002
1003 /* Permit MSAEn changes if MSA supported and enabled */
1004 if (kvm_mips_guest_has_msa(&vcpu->arch))
1005 mask |= MIPS_CONF5_MSAEN;
1006
1007 /*
1008 * Permit guest FPU mode changes if FPU is enabled and the relevant
1009 * feature exists according to FIR register.
1010 */
1011 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1012 if (cpu_has_fre)
1013 mask |= MIPS_CONF5_FRE;
1014 /* We don't support UFR or UFE */
1015 }
1016
1017 return mask;
1018 }
1019
1020 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1021 u32 *opc, u32 cause,
1022 struct kvm_run *run,
1023 struct kvm_vcpu *vcpu)
1024 {
1025 struct mips_coproc *cop0 = vcpu->arch.cop0;
1026 enum emulation_result er = EMULATE_DONE;
1027 u32 rt, rd, sel;
1028 unsigned long curr_pc;
1029
1030 /*
1031 * Update PC and hold onto current PC in case there is
1032 * an error and we want to rollback the PC
1033 */
1034 curr_pc = vcpu->arch.pc;
1035 er = update_pc(vcpu, cause);
1036 if (er == EMULATE_FAIL)
1037 return er;
1038
1039 if (inst.co_format.co) {
1040 switch (inst.co_format.func) {
1041 case tlbr_op: /* Read indexed TLB entry */
1042 er = kvm_mips_emul_tlbr(vcpu);
1043 break;
1044 case tlbwi_op: /* Write indexed */
1045 er = kvm_mips_emul_tlbwi(vcpu);
1046 break;
1047 case tlbwr_op: /* Write random */
1048 er = kvm_mips_emul_tlbwr(vcpu);
1049 break;
1050 case tlbp_op: /* TLB Probe */
1051 er = kvm_mips_emul_tlbp(vcpu);
1052 break;
1053 case rfe_op:
1054 kvm_err("!!!COP0_RFE!!!\n");
1055 break;
1056 case eret_op:
1057 er = kvm_mips_emul_eret(vcpu);
1058 goto dont_update_pc;
1059 case wait_op:
1060 er = kvm_mips_emul_wait(vcpu);
1061 break;
1062 }
1063 } else {
1064 rt = inst.c0r_format.rt;
1065 rd = inst.c0r_format.rd;
1066 sel = inst.c0r_format.sel;
1067
1068 switch (inst.c0r_format.rs) {
1069 case mfc_op:
1070 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1071 cop0->stat[rd][sel]++;
1072 #endif
1073 /* Get reg */
1074 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1075 vcpu->arch.gprs[rt] =
1076 (s32)kvm_mips_read_count(vcpu);
1077 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1078 vcpu->arch.gprs[rt] = 0x0;
1079 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1080 kvm_mips_trans_mfc0(inst, opc, vcpu);
1081 #endif
1082 } else {
1083 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1084
1085 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1086 kvm_mips_trans_mfc0(inst, opc, vcpu);
1087 #endif
1088 }
1089
1090 trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1091 KVM_TRACE_COP0(rd, sel),
1092 vcpu->arch.gprs[rt]);
1093 break;
1094
1095 case dmfc_op:
1096 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1097
1098 trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1099 KVM_TRACE_COP0(rd, sel),
1100 vcpu->arch.gprs[rt]);
1101 break;
1102
1103 case mtc_op:
1104 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1105 cop0->stat[rd][sel]++;
1106 #endif
1107 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1108 KVM_TRACE_COP0(rd, sel),
1109 vcpu->arch.gprs[rt]);
1110
1111 if ((rd == MIPS_CP0_TLB_INDEX)
1112 && (vcpu->arch.gprs[rt] >=
1113 KVM_MIPS_GUEST_TLB_SIZE)) {
1114 kvm_err("Invalid TLB Index: %ld",
1115 vcpu->arch.gprs[rt]);
1116 er = EMULATE_FAIL;
1117 break;
1118 }
1119 #define C0_EBASE_CORE_MASK 0xff
1120 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1121 /* Preserve CORE number */
1122 kvm_change_c0_guest_ebase(cop0,
1123 ~(C0_EBASE_CORE_MASK),
1124 vcpu->arch.gprs[rt]);
1125 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1126 kvm_read_c0_guest_ebase(cop0));
1127 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1128 u32 nasid =
1129 vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1130 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1131 ((kvm_read_c0_guest_entryhi(cop0) &
1132 KVM_ENTRYHI_ASID) != nasid)) {
1133 trace_kvm_asid_change(vcpu,
1134 kvm_read_c0_guest_entryhi(cop0)
1135 & KVM_ENTRYHI_ASID,
1136 nasid);
1137
1138 /* Blow away the shadow host TLBs */
1139 kvm_mips_flush_host_tlb(1);
1140 }
1141 kvm_write_c0_guest_entryhi(cop0,
1142 vcpu->arch.gprs[rt]);
1143 }
1144 /* Are we writing to COUNT */
1145 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1146 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1147 goto done;
1148 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1149 /* If we are writing to COMPARE */
1150 /* Clear pending timer interrupt, if any */
1151 kvm_mips_write_compare(vcpu,
1152 vcpu->arch.gprs[rt],
1153 true);
1154 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1155 unsigned int old_val, val, change;
1156
1157 old_val = kvm_read_c0_guest_status(cop0);
1158 val = vcpu->arch.gprs[rt];
1159 change = val ^ old_val;
1160
1161 /* Make sure that the NMI bit is never set */
1162 val &= ~ST0_NMI;
1163
1164 /*
1165 * Don't allow CU1 or FR to be set unless FPU
1166 * capability enabled and exists in guest
1167 * configuration.
1168 */
1169 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1170 val &= ~(ST0_CU1 | ST0_FR);
1171
1172 /*
1173 * Also don't allow FR to be set if host doesn't
1174 * support it.
1175 */
1176 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1177 val &= ~ST0_FR;
1178
1179
1180 /* Handle changes in FPU mode */
1181 preempt_disable();
1182
1183 /*
1184 * FPU and Vector register state is made
1185 * UNPREDICTABLE by a change of FR, so don't
1186 * even bother saving it.
1187 */
1188 if (change & ST0_FR)
1189 kvm_drop_fpu(vcpu);
1190
1191 /*
1192 * If MSA state is already live, it is undefined
1193 * how it interacts with FR=0 FPU state, and we
1194 * don't want to hit reserved instruction
1195 * exceptions trying to save the MSA state later
1196 * when CU=1 && FR=1, so play it safe and save
1197 * it first.
1198 */
1199 if (change & ST0_CU1 && !(val & ST0_FR) &&
1200 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1201 kvm_lose_fpu(vcpu);
1202
1203 /*
1204 * Propagate CU1 (FPU enable) changes
1205 * immediately if the FPU context is already
1206 * loaded. When disabling we leave the context
1207 * loaded so it can be quickly enabled again in
1208 * the near future.
1209 */
1210 if (change & ST0_CU1 &&
1211 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1212 change_c0_status(ST0_CU1, val);
1213
1214 preempt_enable();
1215
1216 kvm_write_c0_guest_status(cop0, val);
1217
1218 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1219 /*
1220 * If FPU present, we need CU1/FR bits to take
1221 * effect fairly soon.
1222 */
1223 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1224 kvm_mips_trans_mtc0(inst, opc, vcpu);
1225 #endif
1226 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1227 unsigned int old_val, val, change, wrmask;
1228
1229 old_val = kvm_read_c0_guest_config5(cop0);
1230 val = vcpu->arch.gprs[rt];
1231
1232 /* Only a few bits are writable in Config5 */
1233 wrmask = kvm_mips_config5_wrmask(vcpu);
1234 change = (val ^ old_val) & wrmask;
1235 val = old_val ^ change;
1236
1237
1238 /* Handle changes in FPU/MSA modes */
1239 preempt_disable();
1240
1241 /*
1242 * Propagate FRE changes immediately if the FPU
1243 * context is already loaded.
1244 */
1245 if (change & MIPS_CONF5_FRE &&
1246 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1247 change_c0_config5(MIPS_CONF5_FRE, val);
1248
1249 /*
1250 * Propagate MSAEn changes immediately if the
1251 * MSA context is already loaded. When disabling
1252 * we leave the context loaded so it can be
1253 * quickly enabled again in the near future.
1254 */
1255 if (change & MIPS_CONF5_MSAEN &&
1256 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1257 change_c0_config5(MIPS_CONF5_MSAEN,
1258 val);
1259
1260 preempt_enable();
1261
1262 kvm_write_c0_guest_config5(cop0, val);
1263 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1264 u32 old_cause, new_cause;
1265
1266 old_cause = kvm_read_c0_guest_cause(cop0);
1267 new_cause = vcpu->arch.gprs[rt];
1268 /* Update R/W bits */
1269 kvm_change_c0_guest_cause(cop0, 0x08800300,
1270 new_cause);
1271 /* DC bit enabling/disabling timer? */
1272 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1273 if (new_cause & CAUSEF_DC)
1274 kvm_mips_count_disable_cause(vcpu);
1275 else
1276 kvm_mips_count_enable_cause(vcpu);
1277 }
1278 } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1279 u32 mask = MIPS_HWRENA_CPUNUM |
1280 MIPS_HWRENA_SYNCISTEP |
1281 MIPS_HWRENA_CC |
1282 MIPS_HWRENA_CCRES;
1283
1284 if (kvm_read_c0_guest_config3(cop0) &
1285 MIPS_CONF3_ULRI)
1286 mask |= MIPS_HWRENA_ULR;
1287 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1288 } else {
1289 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1290 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1291 kvm_mips_trans_mtc0(inst, opc, vcpu);
1292 #endif
1293 }
1294 break;
1295
1296 case dmtc_op:
1297 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1298 vcpu->arch.pc, rt, rd, sel);
1299 trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1300 KVM_TRACE_COP0(rd, sel),
1301 vcpu->arch.gprs[rt]);
1302 er = EMULATE_FAIL;
1303 break;
1304
1305 case mfmc0_op:
1306 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1307 cop0->stat[MIPS_CP0_STATUS][0]++;
1308 #endif
1309 if (rt != 0)
1310 vcpu->arch.gprs[rt] =
1311 kvm_read_c0_guest_status(cop0);
1312 /* EI */
1313 if (inst.mfmc0_format.sc) {
1314 kvm_debug("[%#lx] mfmc0_op: EI\n",
1315 vcpu->arch.pc);
1316 kvm_set_c0_guest_status(cop0, ST0_IE);
1317 } else {
1318 kvm_debug("[%#lx] mfmc0_op: DI\n",
1319 vcpu->arch.pc);
1320 kvm_clear_c0_guest_status(cop0, ST0_IE);
1321 }
1322
1323 break;
1324
1325 case wrpgpr_op:
1326 {
1327 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1328 u32 pss =
1329 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1330 /*
1331 * We don't support any shadow register sets, so
1332 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1333 */
1334 if (css || pss) {
1335 er = EMULATE_FAIL;
1336 break;
1337 }
1338 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1339 vcpu->arch.gprs[rt]);
1340 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1341 }
1342 break;
1343 default:
1344 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1345 vcpu->arch.pc, inst.c0r_format.rs);
1346 er = EMULATE_FAIL;
1347 break;
1348 }
1349 }
1350
1351 done:
1352 /* Rollback PC only if emulation was unsuccessful */
1353 if (er == EMULATE_FAIL)
1354 vcpu->arch.pc = curr_pc;
1355
1356 dont_update_pc:
1357 /*
1358 * This is for special instructions whose emulation
1359 * updates the PC, so do not overwrite the PC under
1360 * any circumstances
1361 */
1362
1363 return er;
1364 }
1365
1366 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1367 u32 cause,
1368 struct kvm_run *run,
1369 struct kvm_vcpu *vcpu)
1370 {
1371 enum emulation_result er = EMULATE_DO_MMIO;
1372 u32 rt;
1373 u32 bytes;
1374 void *data = run->mmio.data;
1375 unsigned long curr_pc;
1376
1377 /*
1378 * Update PC and hold onto current PC in case there is
1379 * an error and we want to rollback the PC
1380 */
1381 curr_pc = vcpu->arch.pc;
1382 er = update_pc(vcpu, cause);
1383 if (er == EMULATE_FAIL)
1384 return er;
1385
1386 rt = inst.i_format.rt;
1387
1388 switch (inst.i_format.opcode) {
1389 case sb_op:
1390 bytes = 1;
1391 if (bytes > sizeof(run->mmio.data)) {
1392 kvm_err("%s: bad MMIO length: %d\n", __func__,
1393 run->mmio.len);
1394 }
1395 run->mmio.phys_addr =
1396 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1397 host_cp0_badvaddr);
1398 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1399 er = EMULATE_FAIL;
1400 break;
1401 }
1402 run->mmio.len = bytes;
1403 run->mmio.is_write = 1;
1404 vcpu->mmio_needed = 1;
1405 vcpu->mmio_is_write = 1;
1406 *(u8 *) data = vcpu->arch.gprs[rt];
1407 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1408 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1409 *(u8 *) data);
1410
1411 break;
1412
1413 case sw_op:
1414 bytes = 4;
1415 if (bytes > sizeof(run->mmio.data)) {
1416 kvm_err("%s: bad MMIO length: %d\n", __func__,
1417 run->mmio.len);
1418 }
1419 run->mmio.phys_addr =
1420 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1421 host_cp0_badvaddr);
1422 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1423 er = EMULATE_FAIL;
1424 break;
1425 }
1426
1427 run->mmio.len = bytes;
1428 run->mmio.is_write = 1;
1429 vcpu->mmio_needed = 1;
1430 vcpu->mmio_is_write = 1;
1431 *(u32 *) data = vcpu->arch.gprs[rt];
1432
1433 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1434 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1435 vcpu->arch.gprs[rt], *(u32 *) data);
1436 break;
1437
1438 case sh_op:
1439 bytes = 2;
1440 if (bytes > sizeof(run->mmio.data)) {
1441 kvm_err("%s: bad MMIO length: %d\n", __func__,
1442 run->mmio.len);
1443 }
1444 run->mmio.phys_addr =
1445 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1446 host_cp0_badvaddr);
1447 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1448 er = EMULATE_FAIL;
1449 break;
1450 }
1451
1452 run->mmio.len = bytes;
1453 run->mmio.is_write = 1;
1454 vcpu->mmio_needed = 1;
1455 vcpu->mmio_is_write = 1;
1456 *(u16 *) data = vcpu->arch.gprs[rt];
1457
1458 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1459 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1460 vcpu->arch.gprs[rt], *(u32 *) data);
1461 break;
1462
1463 default:
1464 kvm_err("Store not yet supported (inst=0x%08x)\n",
1465 inst.word);
1466 er = EMULATE_FAIL;
1467 break;
1468 }
1469
1470 /* Rollback PC if emulation was unsuccessful */
1471 if (er == EMULATE_FAIL)
1472 vcpu->arch.pc = curr_pc;
1473
1474 return er;
1475 }
1476
1477 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1478 u32 cause, struct kvm_run *run,
1479 struct kvm_vcpu *vcpu)
1480 {
1481 enum emulation_result er = EMULATE_DO_MMIO;
1482 u32 op, rt;
1483 u32 bytes;
1484
1485 rt = inst.i_format.rt;
1486 op = inst.i_format.opcode;
1487
1488 vcpu->arch.pending_load_cause = cause;
1489 vcpu->arch.io_gpr = rt;
1490
1491 switch (op) {
1492 case lw_op:
1493 bytes = 4;
1494 if (bytes > sizeof(run->mmio.data)) {
1495 kvm_err("%s: bad MMIO length: %d\n", __func__,
1496 run->mmio.len);
1497 er = EMULATE_FAIL;
1498 break;
1499 }
1500 run->mmio.phys_addr =
1501 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1502 host_cp0_badvaddr);
1503 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1504 er = EMULATE_FAIL;
1505 break;
1506 }
1507
1508 run->mmio.len = bytes;
1509 run->mmio.is_write = 0;
1510 vcpu->mmio_needed = 1;
1511 vcpu->mmio_is_write = 0;
1512 break;
1513
1514 case lh_op:
1515 case lhu_op:
1516 bytes = 2;
1517 if (bytes > sizeof(run->mmio.data)) {
1518 kvm_err("%s: bad MMIO length: %d\n", __func__,
1519 run->mmio.len);
1520 er = EMULATE_FAIL;
1521 break;
1522 }
1523 run->mmio.phys_addr =
1524 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1525 host_cp0_badvaddr);
1526 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1527 er = EMULATE_FAIL;
1528 break;
1529 }
1530
1531 run->mmio.len = bytes;
1532 run->mmio.is_write = 0;
1533 vcpu->mmio_needed = 1;
1534 vcpu->mmio_is_write = 0;
1535
1536 if (op == lh_op)
1537 vcpu->mmio_needed = 2;
1538 else
1539 vcpu->mmio_needed = 1;
1540
1541 break;
1542
1543 case lbu_op:
1544 case lb_op:
1545 bytes = 1;
1546 if (bytes > sizeof(run->mmio.data)) {
1547 kvm_err("%s: bad MMIO length: %d\n", __func__,
1548 run->mmio.len);
1549 er = EMULATE_FAIL;
1550 break;
1551 }
1552 run->mmio.phys_addr =
1553 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1554 host_cp0_badvaddr);
1555 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1556 er = EMULATE_FAIL;
1557 break;
1558 }
1559
1560 run->mmio.len = bytes;
1561 run->mmio.is_write = 0;
1562 vcpu->mmio_is_write = 0;
1563
1564 if (op == lb_op)
1565 vcpu->mmio_needed = 2;
1566 else
1567 vcpu->mmio_needed = 1;
1568
1569 break;
1570
1571 default:
1572 kvm_err("Load not yet supported (inst=0x%08x)\n",
1573 inst.word);
1574 er = EMULATE_FAIL;
1575 break;
1576 }
1577
1578 return er;
1579 }
1580
1581 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1582 u32 *opc, u32 cause,
1583 struct kvm_run *run,
1584 struct kvm_vcpu *vcpu)
1585 {
1586 struct mips_coproc *cop0 = vcpu->arch.cop0;
1587 enum emulation_result er = EMULATE_DONE;
1588 u32 cache, op_inst, op, base;
1589 s16 offset;
1590 struct kvm_vcpu_arch *arch = &vcpu->arch;
1591 unsigned long va;
1592 unsigned long curr_pc;
1593
1594 /*
1595 * Update PC and hold onto current PC in case there is
1596 * an error and we want to rollback the PC
1597 */
1598 curr_pc = vcpu->arch.pc;
1599 er = update_pc(vcpu, cause);
1600 if (er == EMULATE_FAIL)
1601 return er;
1602
1603 base = inst.i_format.rs;
1604 op_inst = inst.i_format.rt;
1605 if (cpu_has_mips_r6)
1606 offset = inst.spec3_format.simmediate;
1607 else
1608 offset = inst.i_format.simmediate;
1609 cache = op_inst & CacheOp_Cache;
1610 op = op_inst & CacheOp_Op;
1611
1612 va = arch->gprs[base] + offset;
1613
1614 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1615 cache, op, base, arch->gprs[base], offset);
1616
1617 /*
1618 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1619 * invalidate the caches entirely by stepping through all the
1620 * ways/indexes
1621 */
1622 if (op == Index_Writeback_Inv) {
1623 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1624 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1625 arch->gprs[base], offset);
1626
1627 if (cache == Cache_D)
1628 r4k_blast_dcache();
1629 else if (cache == Cache_I)
1630 r4k_blast_icache();
1631 else {
1632 kvm_err("%s: unsupported CACHE INDEX operation\n",
1633 __func__);
1634 return EMULATE_FAIL;
1635 }
1636
1637 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1638 kvm_mips_trans_cache_index(inst, opc, vcpu);
1639 #endif
1640 goto done;
1641 }
1642
1643 preempt_disable();
1644 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1645 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1646 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1647 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1648 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1649 int index;
1650
1651 /* If an entry already exists then skip */
1652 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1653 goto skip_fault;
1654
1655 /*
1656 * If address not in the guest TLB, then give the guest a fault,
1657 * the resulting handler will do the right thing
1658 */
1659 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1660 (kvm_read_c0_guest_entryhi
1661 (cop0) & KVM_ENTRYHI_ASID));
1662
1663 if (index < 0) {
1664 vcpu->arch.host_cp0_badvaddr = va;
1665 vcpu->arch.pc = curr_pc;
1666 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1667 vcpu);
1668 preempt_enable();
1669 goto dont_update_pc;
1670 } else {
1671 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1672 /*
1673 * Check if the entry is valid, if not then setup a TLB
1674 * invalid exception to the guest
1675 */
1676 if (!TLB_IS_VALID(*tlb, va)) {
1677 vcpu->arch.host_cp0_badvaddr = va;
1678 vcpu->arch.pc = curr_pc;
1679 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1680 run, vcpu);
1681 preempt_enable();
1682 goto dont_update_pc;
1683 } else {
1684 /*
1685 * We fault an entry from the guest tlb to the
1686 * shadow host TLB
1687 */
1688 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
1689 }
1690 }
1691 } else {
1692 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1693 cache, op, base, arch->gprs[base], offset);
1694 er = EMULATE_FAIL;
1695 preempt_enable();
1696 goto done;
1697
1698 }
1699
1700 skip_fault:
1701 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1702 if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1703 flush_dcache_line(va);
1704
1705 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1706 /*
1707 * Replace the CACHE instruction, with a SYNCI, not the same,
1708 * but avoids a trap
1709 */
1710 kvm_mips_trans_cache_va(inst, opc, vcpu);
1711 #endif
1712 } else if (op_inst == Hit_Invalidate_I) {
1713 flush_dcache_line(va);
1714 flush_icache_line(va);
1715
1716 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1717 /* Replace the CACHE instruction, with a SYNCI */
1718 kvm_mips_trans_cache_va(inst, opc, vcpu);
1719 #endif
1720 } else {
1721 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1722 cache, op, base, arch->gprs[base], offset);
1723 er = EMULATE_FAIL;
1724 }
1725
1726 preempt_enable();
1727 done:
1728 /* Rollback PC only if emulation was unsuccessful */
1729 if (er == EMULATE_FAIL)
1730 vcpu->arch.pc = curr_pc;
1731
1732 dont_update_pc:
1733 /*
1734 * This is for exceptions whose emulation updates the PC, so do not
1735 * overwrite the PC under any circumstances
1736 */
1737
1738 return er;
1739 }
1740
1741 enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1742 struct kvm_run *run,
1743 struct kvm_vcpu *vcpu)
1744 {
1745 union mips_instruction inst;
1746 enum emulation_result er = EMULATE_DONE;
1747
1748 /* Fetch the instruction. */
1749 if (cause & CAUSEF_BD)
1750 opc += 1;
1751
1752 inst.word = kvm_get_inst(opc, vcpu);
1753
1754 switch (inst.r_format.opcode) {
1755 case cop0_op:
1756 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1757 break;
1758 case sb_op:
1759 case sh_op:
1760 case sw_op:
1761 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1762 break;
1763 case lb_op:
1764 case lbu_op:
1765 case lhu_op:
1766 case lh_op:
1767 case lw_op:
1768 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1769 break;
1770
1771 #ifndef CONFIG_CPU_MIPSR6
1772 case cache_op:
1773 ++vcpu->stat.cache_exits;
1774 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1775 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1776 break;
1777 #else
1778 case spec3_op:
1779 switch (inst.spec3_format.func) {
1780 case cache6_op:
1781 ++vcpu->stat.cache_exits;
1782 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1783 er = kvm_mips_emulate_cache(inst, opc, cause, run,
1784 vcpu);
1785 break;
1786 default:
1787 goto unknown;
1788 };
1789 break;
1790 unknown:
1791 #endif
1792
1793 default:
1794 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1795 inst.word);
1796 kvm_arch_vcpu_dump_regs(vcpu);
1797 er = EMULATE_FAIL;
1798 break;
1799 }
1800
1801 return er;
1802 }
1803
1804 enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1805 u32 *opc,
1806 struct kvm_run *run,
1807 struct kvm_vcpu *vcpu)
1808 {
1809 struct mips_coproc *cop0 = vcpu->arch.cop0;
1810 struct kvm_vcpu_arch *arch = &vcpu->arch;
1811 enum emulation_result er = EMULATE_DONE;
1812
1813 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1814 /* save old pc */
1815 kvm_write_c0_guest_epc(cop0, arch->pc);
1816 kvm_set_c0_guest_status(cop0, ST0_EXL);
1817
1818 if (cause & CAUSEF_BD)
1819 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1820 else
1821 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1822
1823 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1824
1825 kvm_change_c0_guest_cause(cop0, (0xff),
1826 (EXCCODE_SYS << CAUSEB_EXCCODE));
1827
1828 /* Set PC to the exception entry point */
1829 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1830
1831 } else {
1832 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1833 er = EMULATE_FAIL;
1834 }
1835
1836 return er;
1837 }
1838
1839 enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1840 u32 *opc,
1841 struct kvm_run *run,
1842 struct kvm_vcpu *vcpu)
1843 {
1844 struct mips_coproc *cop0 = vcpu->arch.cop0;
1845 struct kvm_vcpu_arch *arch = &vcpu->arch;
1846 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1847 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1848
1849 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1850 /* save old pc */
1851 kvm_write_c0_guest_epc(cop0, arch->pc);
1852 kvm_set_c0_guest_status(cop0, ST0_EXL);
1853
1854 if (cause & CAUSEF_BD)
1855 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1856 else
1857 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1858
1859 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1860 arch->pc);
1861
1862 /* set pc to the exception entry point */
1863 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1864
1865 } else {
1866 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1867 arch->pc);
1868
1869 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1870 }
1871
1872 kvm_change_c0_guest_cause(cop0, (0xff),
1873 (EXCCODE_TLBL << CAUSEB_EXCCODE));
1874
1875 /* setup badvaddr, context and entryhi registers for the guest */
1876 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1877 /* XXXKYMA: is the context register used by linux??? */
1878 kvm_write_c0_guest_entryhi(cop0, entryhi);
1879 /* Blow away the shadow host TLBs */
1880 kvm_mips_flush_host_tlb(1);
1881
1882 return EMULATE_DONE;
1883 }
1884
1885 enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1886 u32 *opc,
1887 struct kvm_run *run,
1888 struct kvm_vcpu *vcpu)
1889 {
1890 struct mips_coproc *cop0 = vcpu->arch.cop0;
1891 struct kvm_vcpu_arch *arch = &vcpu->arch;
1892 unsigned long entryhi =
1893 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1894 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1895
1896 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1897 /* save old pc */
1898 kvm_write_c0_guest_epc(cop0, arch->pc);
1899 kvm_set_c0_guest_status(cop0, ST0_EXL);
1900
1901 if (cause & CAUSEF_BD)
1902 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1903 else
1904 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1905
1906 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1907 arch->pc);
1908
1909 /* set pc to the exception entry point */
1910 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1911
1912 } else {
1913 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1914 arch->pc);
1915 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1916 }
1917
1918 kvm_change_c0_guest_cause(cop0, (0xff),
1919 (EXCCODE_TLBL << CAUSEB_EXCCODE));
1920
1921 /* setup badvaddr, context and entryhi registers for the guest */
1922 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1923 /* XXXKYMA: is the context register used by linux??? */
1924 kvm_write_c0_guest_entryhi(cop0, entryhi);
1925 /* Blow away the shadow host TLBs */
1926 kvm_mips_flush_host_tlb(1);
1927
1928 return EMULATE_DONE;
1929 }
1930
1931 enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
1932 u32 *opc,
1933 struct kvm_run *run,
1934 struct kvm_vcpu *vcpu)
1935 {
1936 struct mips_coproc *cop0 = vcpu->arch.cop0;
1937 struct kvm_vcpu_arch *arch = &vcpu->arch;
1938 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1939 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1940
1941 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1942 /* save old pc */
1943 kvm_write_c0_guest_epc(cop0, arch->pc);
1944 kvm_set_c0_guest_status(cop0, ST0_EXL);
1945
1946 if (cause & CAUSEF_BD)
1947 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1948 else
1949 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1950
1951 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1952 arch->pc);
1953
1954 /* Set PC to the exception entry point */
1955 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1956 } else {
1957 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1958 arch->pc);
1959 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1960 }
1961
1962 kvm_change_c0_guest_cause(cop0, (0xff),
1963 (EXCCODE_TLBS << CAUSEB_EXCCODE));
1964
1965 /* setup badvaddr, context and entryhi registers for the guest */
1966 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1967 /* XXXKYMA: is the context register used by linux??? */
1968 kvm_write_c0_guest_entryhi(cop0, entryhi);
1969 /* Blow away the shadow host TLBs */
1970 kvm_mips_flush_host_tlb(1);
1971
1972 return EMULATE_DONE;
1973 }
1974
1975 enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
1976 u32 *opc,
1977 struct kvm_run *run,
1978 struct kvm_vcpu *vcpu)
1979 {
1980 struct mips_coproc *cop0 = vcpu->arch.cop0;
1981 struct kvm_vcpu_arch *arch = &vcpu->arch;
1982 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1983 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1984
1985 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1986 /* save old pc */
1987 kvm_write_c0_guest_epc(cop0, arch->pc);
1988 kvm_set_c0_guest_status(cop0, ST0_EXL);
1989
1990 if (cause & CAUSEF_BD)
1991 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1992 else
1993 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1994
1995 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1996 arch->pc);
1997
1998 /* Set PC to the exception entry point */
1999 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2000 } else {
2001 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2002 arch->pc);
2003 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2004 }
2005
2006 kvm_change_c0_guest_cause(cop0, (0xff),
2007 (EXCCODE_TLBS << CAUSEB_EXCCODE));
2008
2009 /* setup badvaddr, context and entryhi registers for the guest */
2010 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2011 /* XXXKYMA: is the context register used by linux??? */
2012 kvm_write_c0_guest_entryhi(cop0, entryhi);
2013 /* Blow away the shadow host TLBs */
2014 kvm_mips_flush_host_tlb(1);
2015
2016 return EMULATE_DONE;
2017 }
2018
2019 /* TLBMOD: store into address matching TLB with Dirty bit off */
2020 enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
2021 struct kvm_run *run,
2022 struct kvm_vcpu *vcpu)
2023 {
2024 enum emulation_result er = EMULATE_DONE;
2025 #ifdef DEBUG
2026 struct mips_coproc *cop0 = vcpu->arch.cop0;
2027 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2028 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2029 int index;
2030
2031 /* If address not in the guest TLB, then we are in trouble */
2032 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2033 if (index < 0) {
2034 /* XXXKYMA Invalidate and retry */
2035 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
2036 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2037 __func__, entryhi);
2038 kvm_mips_dump_guest_tlbs(vcpu);
2039 kvm_mips_dump_host_tlbs();
2040 return EMULATE_FAIL;
2041 }
2042 #endif
2043
2044 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2045 return er;
2046 }
2047
2048 enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2049 u32 *opc,
2050 struct kvm_run *run,
2051 struct kvm_vcpu *vcpu)
2052 {
2053 struct mips_coproc *cop0 = vcpu->arch.cop0;
2054 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2055 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2056 struct kvm_vcpu_arch *arch = &vcpu->arch;
2057
2058 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2059 /* save old pc */
2060 kvm_write_c0_guest_epc(cop0, arch->pc);
2061 kvm_set_c0_guest_status(cop0, ST0_EXL);
2062
2063 if (cause & CAUSEF_BD)
2064 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2065 else
2066 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2067
2068 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2069 arch->pc);
2070
2071 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2072 } else {
2073 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2074 arch->pc);
2075 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2076 }
2077
2078 kvm_change_c0_guest_cause(cop0, (0xff),
2079 (EXCCODE_MOD << CAUSEB_EXCCODE));
2080
2081 /* setup badvaddr, context and entryhi registers for the guest */
2082 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2083 /* XXXKYMA: is the context register used by linux??? */
2084 kvm_write_c0_guest_entryhi(cop0, entryhi);
2085 /* Blow away the shadow host TLBs */
2086 kvm_mips_flush_host_tlb(1);
2087
2088 return EMULATE_DONE;
2089 }
2090
2091 enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2092 u32 *opc,
2093 struct kvm_run *run,
2094 struct kvm_vcpu *vcpu)
2095 {
2096 struct mips_coproc *cop0 = vcpu->arch.cop0;
2097 struct kvm_vcpu_arch *arch = &vcpu->arch;
2098
2099 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2100 /* save old pc */
2101 kvm_write_c0_guest_epc(cop0, arch->pc);
2102 kvm_set_c0_guest_status(cop0, ST0_EXL);
2103
2104 if (cause & CAUSEF_BD)
2105 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2106 else
2107 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2108
2109 }
2110
2111 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2112
2113 kvm_change_c0_guest_cause(cop0, (0xff),
2114 (EXCCODE_CPU << CAUSEB_EXCCODE));
2115 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2116
2117 return EMULATE_DONE;
2118 }
2119
2120 enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2121 u32 *opc,
2122 struct kvm_run *run,
2123 struct kvm_vcpu *vcpu)
2124 {
2125 struct mips_coproc *cop0 = vcpu->arch.cop0;
2126 struct kvm_vcpu_arch *arch = &vcpu->arch;
2127 enum emulation_result er = EMULATE_DONE;
2128
2129 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2130 /* save old pc */
2131 kvm_write_c0_guest_epc(cop0, arch->pc);
2132 kvm_set_c0_guest_status(cop0, ST0_EXL);
2133
2134 if (cause & CAUSEF_BD)
2135 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2136 else
2137 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2138
2139 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2140
2141 kvm_change_c0_guest_cause(cop0, (0xff),
2142 (EXCCODE_RI << CAUSEB_EXCCODE));
2143
2144 /* Set PC to the exception entry point */
2145 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2146
2147 } else {
2148 kvm_err("Trying to deliver RI when EXL is already set\n");
2149 er = EMULATE_FAIL;
2150 }
2151
2152 return er;
2153 }
2154
2155 enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2156 u32 *opc,
2157 struct kvm_run *run,
2158 struct kvm_vcpu *vcpu)
2159 {
2160 struct mips_coproc *cop0 = vcpu->arch.cop0;
2161 struct kvm_vcpu_arch *arch = &vcpu->arch;
2162 enum emulation_result er = EMULATE_DONE;
2163
2164 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2165 /* save old pc */
2166 kvm_write_c0_guest_epc(cop0, arch->pc);
2167 kvm_set_c0_guest_status(cop0, ST0_EXL);
2168
2169 if (cause & CAUSEF_BD)
2170 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2171 else
2172 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2173
2174 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2175
2176 kvm_change_c0_guest_cause(cop0, (0xff),
2177 (EXCCODE_BP << CAUSEB_EXCCODE));
2178
2179 /* Set PC to the exception entry point */
2180 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2181
2182 } else {
2183 kvm_err("Trying to deliver BP when EXL is already set\n");
2184 er = EMULATE_FAIL;
2185 }
2186
2187 return er;
2188 }
2189
2190 enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2191 u32 *opc,
2192 struct kvm_run *run,
2193 struct kvm_vcpu *vcpu)
2194 {
2195 struct mips_coproc *cop0 = vcpu->arch.cop0;
2196 struct kvm_vcpu_arch *arch = &vcpu->arch;
2197 enum emulation_result er = EMULATE_DONE;
2198
2199 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2200 /* save old pc */
2201 kvm_write_c0_guest_epc(cop0, arch->pc);
2202 kvm_set_c0_guest_status(cop0, ST0_EXL);
2203
2204 if (cause & CAUSEF_BD)
2205 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2206 else
2207 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2208
2209 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2210
2211 kvm_change_c0_guest_cause(cop0, (0xff),
2212 (EXCCODE_TR << CAUSEB_EXCCODE));
2213
2214 /* Set PC to the exception entry point */
2215 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2216
2217 } else {
2218 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2219 er = EMULATE_FAIL;
2220 }
2221
2222 return er;
2223 }
2224
2225 enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2226 u32 *opc,
2227 struct kvm_run *run,
2228 struct kvm_vcpu *vcpu)
2229 {
2230 struct mips_coproc *cop0 = vcpu->arch.cop0;
2231 struct kvm_vcpu_arch *arch = &vcpu->arch;
2232 enum emulation_result er = EMULATE_DONE;
2233
2234 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2235 /* save old pc */
2236 kvm_write_c0_guest_epc(cop0, arch->pc);
2237 kvm_set_c0_guest_status(cop0, ST0_EXL);
2238
2239 if (cause & CAUSEF_BD)
2240 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2241 else
2242 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2243
2244 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2245
2246 kvm_change_c0_guest_cause(cop0, (0xff),
2247 (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2248
2249 /* Set PC to the exception entry point */
2250 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2251
2252 } else {
2253 kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2254 er = EMULATE_FAIL;
2255 }
2256
2257 return er;
2258 }
2259
2260 enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2261 u32 *opc,
2262 struct kvm_run *run,
2263 struct kvm_vcpu *vcpu)
2264 {
2265 struct mips_coproc *cop0 = vcpu->arch.cop0;
2266 struct kvm_vcpu_arch *arch = &vcpu->arch;
2267 enum emulation_result er = EMULATE_DONE;
2268
2269 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2270 /* save old pc */
2271 kvm_write_c0_guest_epc(cop0, arch->pc);
2272 kvm_set_c0_guest_status(cop0, ST0_EXL);
2273
2274 if (cause & CAUSEF_BD)
2275 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2276 else
2277 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2278
2279 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2280
2281 kvm_change_c0_guest_cause(cop0, (0xff),
2282 (EXCCODE_FPE << CAUSEB_EXCCODE));
2283
2284 /* Set PC to the exception entry point */
2285 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2286
2287 } else {
2288 kvm_err("Trying to deliver FPE when EXL is already set\n");
2289 er = EMULATE_FAIL;
2290 }
2291
2292 return er;
2293 }
2294
2295 enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2296 u32 *opc,
2297 struct kvm_run *run,
2298 struct kvm_vcpu *vcpu)
2299 {
2300 struct mips_coproc *cop0 = vcpu->arch.cop0;
2301 struct kvm_vcpu_arch *arch = &vcpu->arch;
2302 enum emulation_result er = EMULATE_DONE;
2303
2304 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2305 /* save old pc */
2306 kvm_write_c0_guest_epc(cop0, arch->pc);
2307 kvm_set_c0_guest_status(cop0, ST0_EXL);
2308
2309 if (cause & CAUSEF_BD)
2310 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2311 else
2312 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2313
2314 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2315
2316 kvm_change_c0_guest_cause(cop0, (0xff),
2317 (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2318
2319 /* Set PC to the exception entry point */
2320 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2321
2322 } else {
2323 kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2324 er = EMULATE_FAIL;
2325 }
2326
2327 return er;
2328 }
2329
2330 enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2331 struct kvm_run *run,
2332 struct kvm_vcpu *vcpu)
2333 {
2334 struct mips_coproc *cop0 = vcpu->arch.cop0;
2335 struct kvm_vcpu_arch *arch = &vcpu->arch;
2336 enum emulation_result er = EMULATE_DONE;
2337 unsigned long curr_pc;
2338 union mips_instruction inst;
2339
2340 /*
2341 * Update PC and hold onto current PC in case there is
2342 * an error and we want to rollback the PC
2343 */
2344 curr_pc = vcpu->arch.pc;
2345 er = update_pc(vcpu, cause);
2346 if (er == EMULATE_FAIL)
2347 return er;
2348
2349 /* Fetch the instruction. */
2350 if (cause & CAUSEF_BD)
2351 opc += 1;
2352
2353 inst.word = kvm_get_inst(opc, vcpu);
2354
2355 if (inst.word == KVM_INVALID_INST) {
2356 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2357 return EMULATE_FAIL;
2358 }
2359
2360 if (inst.r_format.opcode == spec3_op &&
2361 inst.r_format.func == rdhwr_op &&
2362 inst.r_format.rs == 0 &&
2363 (inst.r_format.re >> 3) == 0) {
2364 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2365 int rd = inst.r_format.rd;
2366 int rt = inst.r_format.rt;
2367 int sel = inst.r_format.re & 0x7;
2368
2369 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2370 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2371 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2372 rd, opc);
2373 goto emulate_ri;
2374 }
2375 switch (rd) {
2376 case MIPS_HWR_CPUNUM: /* CPU number */
2377 arch->gprs[rt] = vcpu->vcpu_id;
2378 break;
2379 case MIPS_HWR_SYNCISTEP: /* SYNCI length */
2380 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2381 current_cpu_data.icache.linesz);
2382 break;
2383 case MIPS_HWR_CC: /* Read count register */
2384 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2385 break;
2386 case MIPS_HWR_CCRES: /* Count register resolution */
2387 switch (current_cpu_data.cputype) {
2388 case CPU_20KC:
2389 case CPU_25KF:
2390 arch->gprs[rt] = 1;
2391 break;
2392 default:
2393 arch->gprs[rt] = 2;
2394 }
2395 break;
2396 case MIPS_HWR_ULR: /* Read UserLocal register */
2397 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2398 break;
2399
2400 default:
2401 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2402 goto emulate_ri;
2403 }
2404
2405 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2406 vcpu->arch.gprs[rt]);
2407 } else {
2408 kvm_debug("Emulate RI not supported @ %p: %#x\n",
2409 opc, inst.word);
2410 goto emulate_ri;
2411 }
2412
2413 return EMULATE_DONE;
2414
2415 emulate_ri:
2416 /*
2417 * Rollback PC (if in branch delay slot then the PC already points to
2418 * branch target), and pass the RI exception to the guest OS.
2419 */
2420 vcpu->arch.pc = curr_pc;
2421 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2422 }
2423
2424 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2425 struct kvm_run *run)
2426 {
2427 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2428 enum emulation_result er = EMULATE_DONE;
2429
2430 if (run->mmio.len > sizeof(*gpr)) {
2431 kvm_err("Bad MMIO length: %d", run->mmio.len);
2432 er = EMULATE_FAIL;
2433 goto done;
2434 }
2435
2436 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2437 if (er == EMULATE_FAIL)
2438 return er;
2439
2440 switch (run->mmio.len) {
2441 case 4:
2442 *gpr = *(s32 *) run->mmio.data;
2443 break;
2444
2445 case 2:
2446 if (vcpu->mmio_needed == 2)
2447 *gpr = *(s16 *) run->mmio.data;
2448 else
2449 *gpr = *(u16 *)run->mmio.data;
2450
2451 break;
2452 case 1:
2453 if (vcpu->mmio_needed == 2)
2454 *gpr = *(s8 *) run->mmio.data;
2455 else
2456 *gpr = *(u8 *) run->mmio.data;
2457 break;
2458 }
2459
2460 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2461 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2462 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2463 vcpu->mmio_needed);
2464
2465 done:
2466 return er;
2467 }
2468
2469 static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2470 u32 *opc,
2471 struct kvm_run *run,
2472 struct kvm_vcpu *vcpu)
2473 {
2474 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2475 struct mips_coproc *cop0 = vcpu->arch.cop0;
2476 struct kvm_vcpu_arch *arch = &vcpu->arch;
2477 enum emulation_result er = EMULATE_DONE;
2478
2479 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2480 /* save old pc */
2481 kvm_write_c0_guest_epc(cop0, arch->pc);
2482 kvm_set_c0_guest_status(cop0, ST0_EXL);
2483
2484 if (cause & CAUSEF_BD)
2485 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2486 else
2487 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2488
2489 kvm_change_c0_guest_cause(cop0, (0xff),
2490 (exccode << CAUSEB_EXCCODE));
2491
2492 /* Set PC to the exception entry point */
2493 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2494 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2495
2496 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2497 exccode, kvm_read_c0_guest_epc(cop0),
2498 kvm_read_c0_guest_badvaddr(cop0));
2499 } else {
2500 kvm_err("Trying to deliver EXC when EXL is already set\n");
2501 er = EMULATE_FAIL;
2502 }
2503
2504 return er;
2505 }
2506
2507 enum emulation_result kvm_mips_check_privilege(u32 cause,
2508 u32 *opc,
2509 struct kvm_run *run,
2510 struct kvm_vcpu *vcpu)
2511 {
2512 enum emulation_result er = EMULATE_DONE;
2513 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2514 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2515
2516 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2517
2518 if (usermode) {
2519 switch (exccode) {
2520 case EXCCODE_INT:
2521 case EXCCODE_SYS:
2522 case EXCCODE_BP:
2523 case EXCCODE_RI:
2524 case EXCCODE_TR:
2525 case EXCCODE_MSAFPE:
2526 case EXCCODE_FPE:
2527 case EXCCODE_MSADIS:
2528 break;
2529
2530 case EXCCODE_CPU:
2531 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2532 er = EMULATE_PRIV_FAIL;
2533 break;
2534
2535 case EXCCODE_MOD:
2536 break;
2537
2538 case EXCCODE_TLBL:
2539 /*
2540 * We we are accessing Guest kernel space, then send an
2541 * address error exception to the guest
2542 */
2543 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2544 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2545 badvaddr);
2546 cause &= ~0xff;
2547 cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2548 er = EMULATE_PRIV_FAIL;
2549 }
2550 break;
2551
2552 case EXCCODE_TLBS:
2553 /*
2554 * We we are accessing Guest kernel space, then send an
2555 * address error exception to the guest
2556 */
2557 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2558 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2559 badvaddr);
2560 cause &= ~0xff;
2561 cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2562 er = EMULATE_PRIV_FAIL;
2563 }
2564 break;
2565
2566 case EXCCODE_ADES:
2567 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2568 badvaddr);
2569 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2570 cause &= ~0xff;
2571 cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2572 }
2573 er = EMULATE_PRIV_FAIL;
2574 break;
2575 case EXCCODE_ADEL:
2576 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2577 badvaddr);
2578 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2579 cause &= ~0xff;
2580 cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2581 }
2582 er = EMULATE_PRIV_FAIL;
2583 break;
2584 default:
2585 er = EMULATE_PRIV_FAIL;
2586 break;
2587 }
2588 }
2589
2590 if (er == EMULATE_PRIV_FAIL)
2591 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2592
2593 return er;
2594 }
2595
2596 /*
2597 * User Address (UA) fault, this could happen if
2598 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2599 * case we pass on the fault to the guest kernel and let it handle it.
2600 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2601 * case we inject the TLB from the Guest TLB into the shadow host TLB
2602 */
2603 enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2604 u32 *opc,
2605 struct kvm_run *run,
2606 struct kvm_vcpu *vcpu)
2607 {
2608 enum emulation_result er = EMULATE_DONE;
2609 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2610 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2611 int index;
2612
2613 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2614 vcpu->arch.host_cp0_badvaddr);
2615
2616 /*
2617 * KVM would not have got the exception if this entry was valid in the
2618 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2619 * send the guest an exception. The guest exc handler should then inject
2620 * an entry into the guest TLB.
2621 */
2622 index = kvm_mips_guest_tlb_lookup(vcpu,
2623 (va & VPN2_MASK) |
2624 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2625 KVM_ENTRYHI_ASID));
2626 if (index < 0) {
2627 if (exccode == EXCCODE_TLBL) {
2628 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2629 } else if (exccode == EXCCODE_TLBS) {
2630 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2631 } else {
2632 kvm_err("%s: invalid exc code: %d\n", __func__,
2633 exccode);
2634 er = EMULATE_FAIL;
2635 }
2636 } else {
2637 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2638
2639 /*
2640 * Check if the entry is valid, if not then setup a TLB invalid
2641 * exception to the guest
2642 */
2643 if (!TLB_IS_VALID(*tlb, va)) {
2644 if (exccode == EXCCODE_TLBL) {
2645 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2646 vcpu);
2647 } else if (exccode == EXCCODE_TLBS) {
2648 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2649 vcpu);
2650 } else {
2651 kvm_err("%s: invalid exc code: %d\n", __func__,
2652 exccode);
2653 er = EMULATE_FAIL;
2654 }
2655 } else {
2656 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2657 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2658 /*
2659 * OK we have a Guest TLB entry, now inject it into the
2660 * shadow host TLB
2661 */
2662 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
2663 }
2664 }
2665
2666 return er;
2667 }
This page took 0.11971 seconds and 5 git commands to generate.