Merge branch 'parisc-4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[deliverable/linux.git] / arch / mips / kvm / trap_emul.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16
17 #include <linux/kvm_host.h>
18
19 #include "interrupt.h"
20
21 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
22 {
23 gpa_t gpa;
24 gva_t kseg = KSEGX(gva);
25
26 if ((kseg == CKSEG0) || (kseg == CKSEG1))
27 gpa = CPHYSADDR(gva);
28 else {
29 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
30 kvm_mips_dump_host_tlbs();
31 gpa = KVM_INVALID_ADDR;
32 }
33
34 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
35
36 return gpa;
37 }
38
39 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
40 {
41 struct mips_coproc *cop0 = vcpu->arch.cop0;
42 struct kvm_run *run = vcpu->run;
43 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
44 u32 cause = vcpu->arch.host_cp0_cause;
45 enum emulation_result er = EMULATE_DONE;
46 int ret = RESUME_GUEST;
47
48 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
49 /* FPU Unusable */
50 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
51 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
52 /*
53 * Unusable/no FPU in guest:
54 * deliver guest COP1 Unusable Exception
55 */
56 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
57 } else {
58 /* Restore FPU state */
59 kvm_own_fpu(vcpu);
60 er = EMULATE_DONE;
61 }
62 } else {
63 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
64 }
65
66 switch (er) {
67 case EMULATE_DONE:
68 ret = RESUME_GUEST;
69 break;
70
71 case EMULATE_FAIL:
72 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
73 ret = RESUME_HOST;
74 break;
75
76 case EMULATE_WAIT:
77 run->exit_reason = KVM_EXIT_INTR;
78 ret = RESUME_HOST;
79 break;
80
81 default:
82 BUG();
83 }
84 return ret;
85 }
86
87 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
88 {
89 struct kvm_run *run = vcpu->run;
90 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
91 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
92 u32 cause = vcpu->arch.host_cp0_cause;
93 enum emulation_result er = EMULATE_DONE;
94 int ret = RESUME_GUEST;
95
96 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
97 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
98 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
99 cause, opc, badvaddr);
100 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
101
102 if (er == EMULATE_DONE)
103 ret = RESUME_GUEST;
104 else {
105 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
106 ret = RESUME_HOST;
107 }
108 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
109 /*
110 * XXXKYMA: The guest kernel does not expect to get this fault
111 * when we are not using HIGHMEM. Need to address this in a
112 * HIGHMEM kernel
113 */
114 kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
115 cause, opc, badvaddr);
116 kvm_mips_dump_host_tlbs();
117 kvm_arch_vcpu_dump_regs(vcpu);
118 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119 ret = RESUME_HOST;
120 } else {
121 kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
122 cause, opc, badvaddr);
123 kvm_mips_dump_host_tlbs();
124 kvm_arch_vcpu_dump_regs(vcpu);
125 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
126 ret = RESUME_HOST;
127 }
128 return ret;
129 }
130
131 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
132 {
133 struct kvm_run *run = vcpu->run;
134 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
135 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
136 u32 cause = vcpu->arch.host_cp0_cause;
137 enum emulation_result er = EMULATE_DONE;
138 int ret = RESUME_GUEST;
139
140 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
141 && KVM_GUEST_KERNEL_MODE(vcpu)) {
142 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
143 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
144 ret = RESUME_HOST;
145 }
146 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
147 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
148 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
149 store ? "ST" : "LD", cause, opc, badvaddr);
150
151 /*
152 * User Address (UA) fault, this could happen if
153 * (1) TLB entry not present/valid in both Guest and shadow host
154 * TLBs, in this case we pass on the fault to the guest
155 * kernel and let it handle it.
156 * (2) TLB entry is present in the Guest TLB but not in the
157 * shadow, in this case we inject the TLB from the Guest TLB
158 * into the shadow host TLB
159 */
160
161 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
162 if (er == EMULATE_DONE)
163 ret = RESUME_GUEST;
164 else {
165 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
166 ret = RESUME_HOST;
167 }
168 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
169 /*
170 * All KSEG0 faults are handled by KVM, as the guest kernel does
171 * not expect to ever get them
172 */
173 if (kvm_mips_handle_kseg0_tlb_fault
174 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
175 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
176 ret = RESUME_HOST;
177 }
178 } else {
179 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
180 store ? "ST" : "LD", cause, opc, badvaddr);
181 kvm_mips_dump_host_tlbs();
182 kvm_arch_vcpu_dump_regs(vcpu);
183 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
184 ret = RESUME_HOST;
185 }
186 return ret;
187 }
188
189 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
190 {
191 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
192 }
193
194 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
195 {
196 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
197 }
198
199 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
200 {
201 struct kvm_run *run = vcpu->run;
202 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
203 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
204 u32 cause = vcpu->arch.host_cp0_cause;
205 enum emulation_result er = EMULATE_DONE;
206 int ret = RESUME_GUEST;
207
208 if (KVM_GUEST_KERNEL_MODE(vcpu)
209 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
210 kvm_debug("Emulate Store to MMIO space\n");
211 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
212 if (er == EMULATE_FAIL) {
213 kvm_err("Emulate Store to MMIO space failed\n");
214 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
215 ret = RESUME_HOST;
216 } else {
217 run->exit_reason = KVM_EXIT_MMIO;
218 ret = RESUME_HOST;
219 }
220 } else {
221 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
222 cause, opc, badvaddr);
223 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
224 ret = RESUME_HOST;
225 }
226 return ret;
227 }
228
229 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
230 {
231 struct kvm_run *run = vcpu->run;
232 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
233 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
234 u32 cause = vcpu->arch.host_cp0_cause;
235 enum emulation_result er = EMULATE_DONE;
236 int ret = RESUME_GUEST;
237
238 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
239 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
240 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
241 if (er == EMULATE_FAIL) {
242 kvm_err("Emulate Load from MMIO space failed\n");
243 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
244 ret = RESUME_HOST;
245 } else {
246 run->exit_reason = KVM_EXIT_MMIO;
247 ret = RESUME_HOST;
248 }
249 } else {
250 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
251 cause, opc, badvaddr);
252 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
253 ret = RESUME_HOST;
254 er = EMULATE_FAIL;
255 }
256 return ret;
257 }
258
259 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
260 {
261 struct kvm_run *run = vcpu->run;
262 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
263 u32 cause = vcpu->arch.host_cp0_cause;
264 enum emulation_result er = EMULATE_DONE;
265 int ret = RESUME_GUEST;
266
267 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
268 if (er == EMULATE_DONE)
269 ret = RESUME_GUEST;
270 else {
271 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
272 ret = RESUME_HOST;
273 }
274 return ret;
275 }
276
277 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
278 {
279 struct kvm_run *run = vcpu->run;
280 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
281 u32 cause = vcpu->arch.host_cp0_cause;
282 enum emulation_result er = EMULATE_DONE;
283 int ret = RESUME_GUEST;
284
285 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
286 if (er == EMULATE_DONE)
287 ret = RESUME_GUEST;
288 else {
289 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
290 ret = RESUME_HOST;
291 }
292 return ret;
293 }
294
295 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
296 {
297 struct kvm_run *run = vcpu->run;
298 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
299 u32 cause = vcpu->arch.host_cp0_cause;
300 enum emulation_result er = EMULATE_DONE;
301 int ret = RESUME_GUEST;
302
303 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
304 if (er == EMULATE_DONE)
305 ret = RESUME_GUEST;
306 else {
307 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
308 ret = RESUME_HOST;
309 }
310 return ret;
311 }
312
313 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
314 {
315 struct kvm_run *run = vcpu->run;
316 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
317 u32 cause = vcpu->arch.host_cp0_cause;
318 enum emulation_result er = EMULATE_DONE;
319 int ret = RESUME_GUEST;
320
321 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
322 if (er == EMULATE_DONE) {
323 ret = RESUME_GUEST;
324 } else {
325 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
326 ret = RESUME_HOST;
327 }
328 return ret;
329 }
330
331 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
332 {
333 struct kvm_run *run = vcpu->run;
334 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
335 u32 cause = vcpu->arch.host_cp0_cause;
336 enum emulation_result er = EMULATE_DONE;
337 int ret = RESUME_GUEST;
338
339 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
340 if (er == EMULATE_DONE) {
341 ret = RESUME_GUEST;
342 } else {
343 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
344 ret = RESUME_HOST;
345 }
346 return ret;
347 }
348
349 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
350 {
351 struct kvm_run *run = vcpu->run;
352 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
353 u32 cause = vcpu->arch.host_cp0_cause;
354 enum emulation_result er = EMULATE_DONE;
355 int ret = RESUME_GUEST;
356
357 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
358 if (er == EMULATE_DONE) {
359 ret = RESUME_GUEST;
360 } else {
361 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
362 ret = RESUME_HOST;
363 }
364 return ret;
365 }
366
367 /**
368 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
369 * @vcpu: Virtual CPU context.
370 *
371 * Handle when the guest attempts to use MSA when it is disabled.
372 */
373 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
374 {
375 struct mips_coproc *cop0 = vcpu->arch.cop0;
376 struct kvm_run *run = vcpu->run;
377 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
378 u32 cause = vcpu->arch.host_cp0_cause;
379 enum emulation_result er = EMULATE_DONE;
380 int ret = RESUME_GUEST;
381
382 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
383 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
384 /*
385 * No MSA in guest, or FPU enabled and not in FR=1 mode,
386 * guest reserved instruction exception
387 */
388 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
389 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
390 /* MSA disabled by guest, guest MSA disabled exception */
391 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
392 } else {
393 /* Restore MSA/FPU state */
394 kvm_own_msa(vcpu);
395 er = EMULATE_DONE;
396 }
397
398 switch (er) {
399 case EMULATE_DONE:
400 ret = RESUME_GUEST;
401 break;
402
403 case EMULATE_FAIL:
404 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
405 ret = RESUME_HOST;
406 break;
407
408 default:
409 BUG();
410 }
411 return ret;
412 }
413
414 static int kvm_trap_emul_vm_init(struct kvm *kvm)
415 {
416 return 0;
417 }
418
419 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
420 {
421 vcpu->arch.kscratch_enabled = 0xfc;
422
423 return 0;
424 }
425
426 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
427 {
428 struct mips_coproc *cop0 = vcpu->arch.cop0;
429 u32 config, config1;
430 int vcpu_id = vcpu->vcpu_id;
431
432 /*
433 * Arch specific stuff, set up config registers properly so that the
434 * guest will come up as expected
435 */
436 #ifndef CONFIG_CPU_MIPSR6
437 /* r2-r5, simulate a MIPS 24kc */
438 kvm_write_c0_guest_prid(cop0, 0x00019300);
439 #else
440 /* r6+, simulate a generic QEMU machine */
441 kvm_write_c0_guest_prid(cop0, 0x00010000);
442 #endif
443 /*
444 * Have config1, Cacheable, noncoherent, write-back, write allocate.
445 * Endianness, arch revision & virtually tagged icache should match
446 * host.
447 */
448 config = read_c0_config() & MIPS_CONF_AR;
449 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
450 #ifdef CONFIG_CPU_BIG_ENDIAN
451 config |= CONF_BE;
452 #endif
453 if (cpu_has_vtag_icache)
454 config |= MIPS_CONF_VI;
455 kvm_write_c0_guest_config(cop0, config);
456
457 /* Read the cache characteristics from the host Config1 Register */
458 config1 = (read_c0_config1() & ~0x7f);
459
460 /* Set up MMU size */
461 config1 &= ~(0x3f << 25);
462 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
463
464 /* We unset some bits that we aren't emulating */
465 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
466 MIPS_CONF1_WR | MIPS_CONF1_CA);
467 kvm_write_c0_guest_config1(cop0, config1);
468
469 /* Have config3, no tertiary/secondary caches implemented */
470 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
471 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
472
473 /* Have config4, UserLocal */
474 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
475
476 /* Have config5 */
477 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
478
479 /* No config6 */
480 kvm_write_c0_guest_config5(cop0, 0);
481
482 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
483 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
484
485 /*
486 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
487 */
488 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
489
490 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
491 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
492 (vcpu_id & MIPS_EBASE_CPUNUM));
493
494 return 0;
495 }
496
497 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
498 {
499 return 0;
500 }
501
502 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
503 u64 __user *indices)
504 {
505 return 0;
506 }
507
508 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
509 const struct kvm_one_reg *reg,
510 s64 *v)
511 {
512 switch (reg->id) {
513 case KVM_REG_MIPS_CP0_COUNT:
514 *v = kvm_mips_read_count(vcpu);
515 break;
516 case KVM_REG_MIPS_COUNT_CTL:
517 *v = vcpu->arch.count_ctl;
518 break;
519 case KVM_REG_MIPS_COUNT_RESUME:
520 *v = ktime_to_ns(vcpu->arch.count_resume);
521 break;
522 case KVM_REG_MIPS_COUNT_HZ:
523 *v = vcpu->arch.count_hz;
524 break;
525 default:
526 return -EINVAL;
527 }
528 return 0;
529 }
530
531 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
532 const struct kvm_one_reg *reg,
533 s64 v)
534 {
535 struct mips_coproc *cop0 = vcpu->arch.cop0;
536 int ret = 0;
537 unsigned int cur, change;
538
539 switch (reg->id) {
540 case KVM_REG_MIPS_CP0_COUNT:
541 kvm_mips_write_count(vcpu, v);
542 break;
543 case KVM_REG_MIPS_CP0_COMPARE:
544 kvm_mips_write_compare(vcpu, v, false);
545 break;
546 case KVM_REG_MIPS_CP0_CAUSE:
547 /*
548 * If the timer is stopped or started (DC bit) it must look
549 * atomic with changes to the interrupt pending bits (TI, IRQ5).
550 * A timer interrupt should not happen in between.
551 */
552 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
553 if (v & CAUSEF_DC) {
554 /* disable timer first */
555 kvm_mips_count_disable_cause(vcpu);
556 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
557 } else {
558 /* enable timer last */
559 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
560 kvm_mips_count_enable_cause(vcpu);
561 }
562 } else {
563 kvm_write_c0_guest_cause(cop0, v);
564 }
565 break;
566 case KVM_REG_MIPS_CP0_CONFIG:
567 /* read-only for now */
568 break;
569 case KVM_REG_MIPS_CP0_CONFIG1:
570 cur = kvm_read_c0_guest_config1(cop0);
571 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
572 if (change) {
573 v = cur ^ change;
574 kvm_write_c0_guest_config1(cop0, v);
575 }
576 break;
577 case KVM_REG_MIPS_CP0_CONFIG2:
578 /* read-only for now */
579 break;
580 case KVM_REG_MIPS_CP0_CONFIG3:
581 cur = kvm_read_c0_guest_config3(cop0);
582 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
583 if (change) {
584 v = cur ^ change;
585 kvm_write_c0_guest_config3(cop0, v);
586 }
587 break;
588 case KVM_REG_MIPS_CP0_CONFIG4:
589 cur = kvm_read_c0_guest_config4(cop0);
590 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
591 if (change) {
592 v = cur ^ change;
593 kvm_write_c0_guest_config4(cop0, v);
594 }
595 break;
596 case KVM_REG_MIPS_CP0_CONFIG5:
597 cur = kvm_read_c0_guest_config5(cop0);
598 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
599 if (change) {
600 v = cur ^ change;
601 kvm_write_c0_guest_config5(cop0, v);
602 }
603 break;
604 case KVM_REG_MIPS_COUNT_CTL:
605 ret = kvm_mips_set_count_ctl(vcpu, v);
606 break;
607 case KVM_REG_MIPS_COUNT_RESUME:
608 ret = kvm_mips_set_count_resume(vcpu, v);
609 break;
610 case KVM_REG_MIPS_COUNT_HZ:
611 ret = kvm_mips_set_count_hz(vcpu, v);
612 break;
613 default:
614 return -EINVAL;
615 }
616 return ret;
617 }
618
619 static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
620 {
621 kvm_lose_fpu(vcpu);
622
623 return 0;
624 }
625
626 static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
627 {
628 return 0;
629 }
630
631 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
632 /* exit handlers */
633 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
634 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
635 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
636 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
637 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
638 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
639 .handle_syscall = kvm_trap_emul_handle_syscall,
640 .handle_res_inst = kvm_trap_emul_handle_res_inst,
641 .handle_break = kvm_trap_emul_handle_break,
642 .handle_trap = kvm_trap_emul_handle_trap,
643 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
644 .handle_fpe = kvm_trap_emul_handle_fpe,
645 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
646
647 .vm_init = kvm_trap_emul_vm_init,
648 .vcpu_init = kvm_trap_emul_vcpu_init,
649 .vcpu_setup = kvm_trap_emul_vcpu_setup,
650 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
651 .queue_timer_int = kvm_mips_queue_timer_int_cb,
652 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
653 .queue_io_int = kvm_mips_queue_io_int_cb,
654 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
655 .irq_deliver = kvm_mips_irq_deliver_cb,
656 .irq_clear = kvm_mips_irq_clear_cb,
657 .num_regs = kvm_trap_emul_num_regs,
658 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
659 .get_one_reg = kvm_trap_emul_get_one_reg,
660 .set_one_reg = kvm_trap_emul_set_one_reg,
661 .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
662 .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
663 };
664
665 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
666 {
667 *install_callbacks = &kvm_trap_emul_callbacks;
668 return 0;
669 }
This page took 0.052676 seconds and 6 git commands to generate.