MIPS: KVM: List FPU/MSA registers
[deliverable/linux.git] / arch / mips / kvm / mips.c
CommitLineData
669e846e
SL
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
d116e812 10 */
669e846e
SL
11
12#include <linux/errno.h>
13#include <linux/err.h>
98e91b84 14#include <linux/kdebug.h>
669e846e
SL
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
f798217d 19#include <asm/fpu.h>
669e846e
SL
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/mmu_context.h>
c4c6f2ca 23#include <asm/pgtable.h>
669e846e
SL
24
25#include <linux/kvm_host.h>
26
d7d5b05f
DCZ
27#include "interrupt.h"
28#include "commpage.h"
669e846e
SL
29
30#define CREATE_TRACE_POINTS
31#include "trace.h"
32
33#ifndef VECTORSPACING
34#define VECTORSPACING 0x100 /* for EI/VI mode */
35#endif
36
d116e812 37#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
669e846e 38struct kvm_stats_debugfs_item debugfs_entries[] = {
d116e812
DCZ
39 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
40 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
41 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
42 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
43 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
44 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
45 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
46 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
47 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
48 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
49 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
50 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
51 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
0a560427 52 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
c2537ed9 53 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
1c0cd66a 54 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
c2537ed9 55 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
d116e812 56 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
f7819512 57 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
62bea5bf 58 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
3491caf2 59 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
d116e812 60 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
669e846e
SL
61 {NULL}
62};
63
64static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
65{
66 int i;
d116e812 67
669e846e
SL
68 for_each_possible_cpu(i) {
69 vcpu->arch.guest_kernel_asid[i] = 0;
70 vcpu->arch.guest_user_asid[i] = 0;
71 }
d116e812 72
669e846e
SL
73 return 0;
74}
75
d116e812
DCZ
76/*
77 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
78 * Config7, so we are "runnable" if interrupts are pending
669e846e
SL
79 */
80int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
81{
82 return !!(vcpu->arch.pending_exceptions);
83}
84
85int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
86{
87 return 1;
88}
89
13a34e06 90int kvm_arch_hardware_enable(void)
669e846e
SL
91{
92 return 0;
93}
94
669e846e
SL
95int kvm_arch_hardware_setup(void)
96{
97 return 0;
98}
99
669e846e
SL
100void kvm_arch_check_processor_compat(void *rtn)
101{
d98403a5 102 *(int *)rtn = 0;
669e846e
SL
103}
104
105static void kvm_mips_init_tlbs(struct kvm *kvm)
106{
107 unsigned long wired;
108
d116e812
DCZ
109 /*
110 * Add a wired entry to the TLB, it is used to map the commpage to
111 * the Guest kernel
112 */
669e846e
SL
113 wired = read_c0_wired();
114 write_c0_wired(wired + 1);
115 mtc0_tlbw_hazard();
116 kvm->arch.commpage_tlb = wired;
117
118 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
119 kvm->arch.commpage_tlb);
120}
121
122static void kvm_mips_init_vm_percpu(void *arg)
123{
124 struct kvm *kvm = (struct kvm *)arg;
125
126 kvm_mips_init_tlbs(kvm);
127 kvm_mips_callbacks->vm_init(kvm);
128
129}
130
131int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
132{
133 if (atomic_inc_return(&kvm_mips_instance) == 1) {
6e95bfd2
JH
134 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
135 __func__);
669e846e
SL
136 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
137 }
138
669e846e
SL
139 return 0;
140}
141
142void kvm_mips_free_vcpus(struct kvm *kvm)
143{
144 unsigned int i;
145 struct kvm_vcpu *vcpu;
146
147 /* Put the pages we reserved for the guest pmap */
148 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
9befad23 150 kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
669e846e 151 }
c6c0a663 152 kfree(kvm->arch.guest_pmap);
669e846e
SL
153
154 kvm_for_each_vcpu(i, vcpu, kvm) {
155 kvm_arch_vcpu_free(vcpu);
156 }
157
158 mutex_lock(&kvm->lock);
159
160 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
161 kvm->vcpus[i] = NULL;
162
163 atomic_set(&kvm->online_vcpus, 0);
164
165 mutex_unlock(&kvm->lock);
166}
167
669e846e
SL
168static void kvm_mips_uninit_tlbs(void *arg)
169{
170 /* Restore wired count */
171 write_c0_wired(0);
172 mtc0_tlbw_hazard();
173 /* Clear out all the TLBs */
174 kvm_local_flush_tlb_all();
175}
176
177void kvm_arch_destroy_vm(struct kvm *kvm)
178{
179 kvm_mips_free_vcpus(kvm);
180
181 /* If this is the last instance, restore wired count */
182 if (atomic_dec_return(&kvm_mips_instance) == 0) {
6e95bfd2
JH
183 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
184 __func__);
669e846e
SL
185 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
186 }
187}
188
d116e812
DCZ
189long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
190 unsigned long arg)
669e846e 191{
ed829857 192 return -ENOIOCTLCMD;
669e846e
SL
193}
194
5587027c
AK
195int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
196 unsigned long npages)
669e846e
SL
197{
198 return 0;
199}
200
201int kvm_arch_prepare_memory_region(struct kvm *kvm,
d116e812 202 struct kvm_memory_slot *memslot,
09170a49 203 const struct kvm_userspace_memory_region *mem,
d116e812 204 enum kvm_mr_change change)
669e846e
SL
205{
206 return 0;
207}
208
209void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 210 const struct kvm_userspace_memory_region *mem,
d116e812 211 const struct kvm_memory_slot *old,
f36f3f28 212 const struct kvm_memory_slot *new,
d116e812 213 enum kvm_mr_change change)
669e846e
SL
214{
215 unsigned long npages = 0;
d98403a5 216 int i;
669e846e
SL
217
218 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
219 __func__, kvm, mem->slot, mem->guest_phys_addr,
220 mem->memory_size, mem->userspace_addr);
221
222 /* Setup Guest PMAP table */
223 if (!kvm->arch.guest_pmap) {
224 if (mem->slot == 0)
225 npages = mem->memory_size >> PAGE_SHIFT;
226
227 if (npages) {
228 kvm->arch.guest_pmap_npages = npages;
229 kvm->arch.guest_pmap =
230 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
231
232 if (!kvm->arch.guest_pmap) {
f7fdcb60 233 kvm_err("Failed to allocate guest PMAP\n");
d98403a5 234 return;
669e846e
SL
235 }
236
6e95bfd2
JH
237 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
238 npages, kvm->arch.guest_pmap);
669e846e
SL
239
240 /* Now setup the page table */
d116e812 241 for (i = 0; i < npages; i++)
669e846e 242 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
669e846e
SL
243 }
244 }
669e846e
SL
245}
246
669e846e
SL
247struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
248{
669e846e
SL
249 int err, size, offset;
250 void *gebase;
251 int i;
252
253 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
254
255 if (!vcpu) {
256 err = -ENOMEM;
257 goto out;
258 }
259
260 err = kvm_vcpu_init(vcpu, kvm, id);
261
262 if (err)
263 goto out_free_cpu;
264
6e95bfd2 265 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
669e846e 266
d116e812
DCZ
267 /*
268 * Allocate space for host mode exception handlers that handle
669e846e
SL
269 * guest mode exits
270 */
d116e812 271 if (cpu_has_veic || cpu_has_vint)
669e846e 272 size = 0x200 + VECTORSPACING * 64;
d116e812 273 else
7006e2df 274 size = 0x4000;
669e846e 275
669e846e
SL
276 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
277
278 if (!gebase) {
279 err = -ENOMEM;
585bb8f9 280 goto out_uninit_cpu;
669e846e 281 }
6e95bfd2
JH
282 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
283 ALIGN(size, PAGE_SIZE), gebase);
669e846e
SL
284
285 /* Save new ebase */
286 vcpu->arch.guest_ebase = gebase;
287
288 /* Copy L1 Guest Exception handler to correct offset */
289
290 /* TLB Refill, EXL = 0 */
291 memcpy(gebase, mips32_exception,
292 mips32_exceptionEnd - mips32_exception);
293
294 /* General Exception Entry point */
295 memcpy(gebase + 0x180, mips32_exception,
296 mips32_exceptionEnd - mips32_exception);
297
298 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
299 for (i = 0; i < 8; i++) {
300 kvm_debug("L1 Vectored handler @ %p\n",
301 gebase + 0x200 + (i * VECTORSPACING));
302 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
303 mips32_exceptionEnd - mips32_exception);
304 }
305
306 /* General handler, relocate to unmapped space for sanity's sake */
307 offset = 0x2000;
6e95bfd2
JH
308 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
309 gebase + offset,
310 mips32_GuestExceptionEnd - mips32_GuestException);
669e846e
SL
311
312 memcpy(gebase + offset, mips32_GuestException,
313 mips32_GuestExceptionEnd - mips32_GuestException);
314
797179bc
JH
315#ifdef MODULE
316 offset += mips32_GuestExceptionEnd - mips32_GuestException;
317 memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
318 __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
319 vcpu->arch.vcpu_run = gebase + offset;
320#else
321 vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
322#endif
323
669e846e 324 /* Invalidate the icache for these ranges */
facaaec1
JH
325 local_flush_icache_range((unsigned long)gebase,
326 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
669e846e 327
d116e812
DCZ
328 /*
329 * Allocate comm page for guest kernel, a TLB will be reserved for
330 * mapping GVA @ 0xFFFF8000 to this page
331 */
669e846e
SL
332 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
333
334 if (!vcpu->arch.kseg0_commpage) {
335 err = -ENOMEM;
336 goto out_free_gebase;
337 }
338
6e95bfd2 339 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
669e846e
SL
340 kvm_mips_commpage_init(vcpu);
341
342 /* Init */
343 vcpu->arch.last_sched_cpu = -1;
344
345 /* Start off the timer */
e30492bb 346 kvm_mips_init_count(vcpu);
669e846e
SL
347
348 return vcpu;
349
350out_free_gebase:
351 kfree(gebase);
352
585bb8f9
JH
353out_uninit_cpu:
354 kvm_vcpu_uninit(vcpu);
355
669e846e
SL
356out_free_cpu:
357 kfree(vcpu);
358
359out:
360 return ERR_PTR(err);
361}
362
363void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
364{
365 hrtimer_cancel(&vcpu->arch.comparecount_timer);
366
367 kvm_vcpu_uninit(vcpu);
368
369 kvm_mips_dump_stats(vcpu);
370
c6c0a663
JH
371 kfree(vcpu->arch.guest_ebase);
372 kfree(vcpu->arch.kseg0_commpage);
8c9eb041 373 kfree(vcpu);
669e846e
SL
374}
375
376void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
377{
378 kvm_arch_vcpu_free(vcpu);
379}
380
d116e812
DCZ
381int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
382 struct kvm_guest_debug *dbg)
669e846e 383{
ed829857 384 return -ENOIOCTLCMD;
669e846e
SL
385}
386
387int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
388{
389 int r = 0;
390 sigset_t sigsaved;
391
392 if (vcpu->sigset_active)
393 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
394
395 if (vcpu->mmio_needed) {
396 if (!vcpu->mmio_is_write)
397 kvm_mips_complete_mmio_load(vcpu, run);
398 vcpu->mmio_needed = 0;
399 }
400
f798217d
JH
401 lose_fpu(1);
402
044f0f03 403 local_irq_disable();
669e846e
SL
404 /* Check if we have any exceptions/interrupts pending */
405 kvm_mips_deliver_interrupts(vcpu,
406 kvm_read_c0_guest_cause(vcpu->arch.cop0));
407
ccf73aaf 408 __kvm_guest_enter();
669e846e 409
c4c6f2ca
JH
410 /* Disable hardware page table walking while in guest */
411 htw_stop();
412
93258604 413 trace_kvm_enter(vcpu);
797179bc 414 r = vcpu->arch.vcpu_run(run, vcpu);
93258604 415 trace_kvm_out(vcpu);
669e846e 416
c4c6f2ca
JH
417 /* Re-enable HTW before enabling interrupts */
418 htw_start();
419
ccf73aaf 420 __kvm_guest_exit();
669e846e
SL
421 local_irq_enable();
422
423 if (vcpu->sigset_active)
424 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
425
426 return r;
427}
428
d116e812
DCZ
429int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
430 struct kvm_mips_interrupt *irq)
669e846e
SL
431{
432 int intr = (int)irq->irq;
433 struct kvm_vcpu *dvcpu = NULL;
434
435 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
436 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
437 (int)intr);
438
439 if (irq->cpu == -1)
440 dvcpu = vcpu;
441 else
442 dvcpu = vcpu->kvm->vcpus[irq->cpu];
443
444 if (intr == 2 || intr == 3 || intr == 4) {
445 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
446
447 } else if (intr == -2 || intr == -3 || intr == -4) {
448 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
449 } else {
450 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
451 irq->cpu, irq->irq);
452 return -EINVAL;
453 }
454
455 dvcpu->arch.wait = 0;
456
8577370f
MT
457 if (swait_active(&dvcpu->wq))
458 swake_up(&dvcpu->wq);
669e846e
SL
459
460 return 0;
461}
462
d116e812
DCZ
463int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
464 struct kvm_mp_state *mp_state)
669e846e 465{
ed829857 466 return -ENOIOCTLCMD;
669e846e
SL
467}
468
d116e812
DCZ
469int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
470 struct kvm_mp_state *mp_state)
669e846e 471{
ed829857 472 return -ENOIOCTLCMD;
669e846e
SL
473}
474
4c73fb2b
DD
475static u64 kvm_mips_get_one_regs[] = {
476 KVM_REG_MIPS_R0,
477 KVM_REG_MIPS_R1,
478 KVM_REG_MIPS_R2,
479 KVM_REG_MIPS_R3,
480 KVM_REG_MIPS_R4,
481 KVM_REG_MIPS_R5,
482 KVM_REG_MIPS_R6,
483 KVM_REG_MIPS_R7,
484 KVM_REG_MIPS_R8,
485 KVM_REG_MIPS_R9,
486 KVM_REG_MIPS_R10,
487 KVM_REG_MIPS_R11,
488 KVM_REG_MIPS_R12,
489 KVM_REG_MIPS_R13,
490 KVM_REG_MIPS_R14,
491 KVM_REG_MIPS_R15,
492 KVM_REG_MIPS_R16,
493 KVM_REG_MIPS_R17,
494 KVM_REG_MIPS_R18,
495 KVM_REG_MIPS_R19,
496 KVM_REG_MIPS_R20,
497 KVM_REG_MIPS_R21,
498 KVM_REG_MIPS_R22,
499 KVM_REG_MIPS_R23,
500 KVM_REG_MIPS_R24,
501 KVM_REG_MIPS_R25,
502 KVM_REG_MIPS_R26,
503 KVM_REG_MIPS_R27,
504 KVM_REG_MIPS_R28,
505 KVM_REG_MIPS_R29,
506 KVM_REG_MIPS_R30,
507 KVM_REG_MIPS_R31,
508
509 KVM_REG_MIPS_HI,
510 KVM_REG_MIPS_LO,
511 KVM_REG_MIPS_PC,
512
513 KVM_REG_MIPS_CP0_INDEX,
514 KVM_REG_MIPS_CP0_CONTEXT,
7767b7d2 515 KVM_REG_MIPS_CP0_USERLOCAL,
4c73fb2b
DD
516 KVM_REG_MIPS_CP0_PAGEMASK,
517 KVM_REG_MIPS_CP0_WIRED,
16fd5c1d 518 KVM_REG_MIPS_CP0_HWRENA,
4c73fb2b 519 KVM_REG_MIPS_CP0_BADVADDR,
f8be02da 520 KVM_REG_MIPS_CP0_COUNT,
4c73fb2b 521 KVM_REG_MIPS_CP0_ENTRYHI,
f8be02da 522 KVM_REG_MIPS_CP0_COMPARE,
4c73fb2b
DD
523 KVM_REG_MIPS_CP0_STATUS,
524 KVM_REG_MIPS_CP0_CAUSE,
fb6df0cd 525 KVM_REG_MIPS_CP0_EPC,
1068eaaf 526 KVM_REG_MIPS_CP0_PRID,
4c73fb2b
DD
527 KVM_REG_MIPS_CP0_CONFIG,
528 KVM_REG_MIPS_CP0_CONFIG1,
529 KVM_REG_MIPS_CP0_CONFIG2,
530 KVM_REG_MIPS_CP0_CONFIG3,
c771607a
JH
531 KVM_REG_MIPS_CP0_CONFIG4,
532 KVM_REG_MIPS_CP0_CONFIG5,
4c73fb2b 533 KVM_REG_MIPS_CP0_CONFIG7,
f8239342
JH
534 KVM_REG_MIPS_CP0_ERROREPC,
535
536 KVM_REG_MIPS_COUNT_CTL,
537 KVM_REG_MIPS_COUNT_RESUME,
f74a8e22 538 KVM_REG_MIPS_COUNT_HZ,
4c73fb2b
DD
539};
540
e5775930
JH
541static u64 kvm_mips_get_one_regs_fpu[] = {
542 KVM_REG_MIPS_FCR_IR,
543 KVM_REG_MIPS_FCR_CSR,
544};
545
546static u64 kvm_mips_get_one_regs_msa[] = {
547 KVM_REG_MIPS_MSA_IR,
548 KVM_REG_MIPS_MSA_CSR,
549};
550
f5c43bd4
JH
551static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
552{
553 unsigned long ret;
554
555 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
e5775930
JH
556 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
557 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
558 /* odd doubles */
559 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
560 ret += 16;
561 }
562 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
563 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
f5c43bd4
JH
564 ret += kvm_mips_callbacks->num_regs(vcpu);
565
566 return ret;
567}
568
569static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
570{
e5775930
JH
571 u64 index;
572 unsigned int i;
573
f5c43bd4
JH
574 if (copy_to_user(indices, kvm_mips_get_one_regs,
575 sizeof(kvm_mips_get_one_regs)))
576 return -EFAULT;
577 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
578
e5775930
JH
579 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
580 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
581 sizeof(kvm_mips_get_one_regs_fpu)))
582 return -EFAULT;
583 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
584
585 for (i = 0; i < 32; ++i) {
586 index = KVM_REG_MIPS_FPR_32(i);
587 if (copy_to_user(indices, &index, sizeof(index)))
588 return -EFAULT;
589 ++indices;
590
591 /* skip odd doubles if no F64 */
592 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
593 continue;
594
595 index = KVM_REG_MIPS_FPR_64(i);
596 if (copy_to_user(indices, &index, sizeof(index)))
597 return -EFAULT;
598 ++indices;
599 }
600 }
601
602 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
603 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
604 sizeof(kvm_mips_get_one_regs_msa)))
605 return -EFAULT;
606 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
607
608 for (i = 0; i < 32; ++i) {
609 index = KVM_REG_MIPS_VEC_128(i);
610 if (copy_to_user(indices, &index, sizeof(index)))
611 return -EFAULT;
612 ++indices;
613 }
614 }
615
f5c43bd4
JH
616 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
617}
618
4c73fb2b
DD
619static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
620 const struct kvm_one_reg *reg)
621{
4c73fb2b 622 struct mips_coproc *cop0 = vcpu->arch.cop0;
379245cd 623 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
f8be02da 624 int ret;
4c73fb2b 625 s64 v;
ab86bd60 626 s64 vs[2];
379245cd 627 unsigned int idx;
4c73fb2b
DD
628
629 switch (reg->id) {
379245cd 630 /* General purpose registers */
4c73fb2b
DD
631 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
632 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
633 break;
634 case KVM_REG_MIPS_HI:
635 v = (long)vcpu->arch.hi;
636 break;
637 case KVM_REG_MIPS_LO:
638 v = (long)vcpu->arch.lo;
639 break;
640 case KVM_REG_MIPS_PC:
641 v = (long)vcpu->arch.pc;
642 break;
643
379245cd
JH
644 /* Floating point registers */
645 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
646 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
647 return -EINVAL;
648 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
649 /* Odd singles in top of even double when FR=0 */
650 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
651 v = get_fpr32(&fpu->fpr[idx], 0);
652 else
653 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
654 break;
655 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
656 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
657 return -EINVAL;
658 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
659 /* Can't access odd doubles in FR=0 mode */
660 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
661 return -EINVAL;
662 v = get_fpr64(&fpu->fpr[idx], 0);
663 break;
664 case KVM_REG_MIPS_FCR_IR:
665 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
666 return -EINVAL;
667 v = boot_cpu_data.fpu_id;
668 break;
669 case KVM_REG_MIPS_FCR_CSR:
670 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
671 return -EINVAL;
672 v = fpu->fcr31;
673 break;
674
ab86bd60
JH
675 /* MIPS SIMD Architecture (MSA) registers */
676 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
677 if (!kvm_mips_guest_has_msa(&vcpu->arch))
678 return -EINVAL;
679 /* Can't access MSA registers in FR=0 mode */
680 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
681 return -EINVAL;
682 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
683#ifdef CONFIG_CPU_LITTLE_ENDIAN
684 /* least significant byte first */
685 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
686 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
687#else
688 /* most significant byte first */
689 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
690 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
691#endif
692 break;
693 case KVM_REG_MIPS_MSA_IR:
694 if (!kvm_mips_guest_has_msa(&vcpu->arch))
695 return -EINVAL;
696 v = boot_cpu_data.msa_id;
697 break;
698 case KVM_REG_MIPS_MSA_CSR:
699 if (!kvm_mips_guest_has_msa(&vcpu->arch))
700 return -EINVAL;
701 v = fpu->msacsr;
702 break;
703
379245cd 704 /* Co-processor 0 registers */
4c73fb2b
DD
705 case KVM_REG_MIPS_CP0_INDEX:
706 v = (long)kvm_read_c0_guest_index(cop0);
707 break;
708 case KVM_REG_MIPS_CP0_CONTEXT:
709 v = (long)kvm_read_c0_guest_context(cop0);
710 break;
7767b7d2
JH
711 case KVM_REG_MIPS_CP0_USERLOCAL:
712 v = (long)kvm_read_c0_guest_userlocal(cop0);
713 break;
4c73fb2b
DD
714 case KVM_REG_MIPS_CP0_PAGEMASK:
715 v = (long)kvm_read_c0_guest_pagemask(cop0);
716 break;
717 case KVM_REG_MIPS_CP0_WIRED:
718 v = (long)kvm_read_c0_guest_wired(cop0);
719 break;
16fd5c1d
JH
720 case KVM_REG_MIPS_CP0_HWRENA:
721 v = (long)kvm_read_c0_guest_hwrena(cop0);
722 break;
4c73fb2b
DD
723 case KVM_REG_MIPS_CP0_BADVADDR:
724 v = (long)kvm_read_c0_guest_badvaddr(cop0);
725 break;
726 case KVM_REG_MIPS_CP0_ENTRYHI:
727 v = (long)kvm_read_c0_guest_entryhi(cop0);
728 break;
f8be02da
JH
729 case KVM_REG_MIPS_CP0_COMPARE:
730 v = (long)kvm_read_c0_guest_compare(cop0);
731 break;
4c73fb2b
DD
732 case KVM_REG_MIPS_CP0_STATUS:
733 v = (long)kvm_read_c0_guest_status(cop0);
734 break;
735 case KVM_REG_MIPS_CP0_CAUSE:
736 v = (long)kvm_read_c0_guest_cause(cop0);
737 break;
fb6df0cd
JH
738 case KVM_REG_MIPS_CP0_EPC:
739 v = (long)kvm_read_c0_guest_epc(cop0);
740 break;
1068eaaf
JH
741 case KVM_REG_MIPS_CP0_PRID:
742 v = (long)kvm_read_c0_guest_prid(cop0);
743 break;
4c73fb2b
DD
744 case KVM_REG_MIPS_CP0_CONFIG:
745 v = (long)kvm_read_c0_guest_config(cop0);
746 break;
747 case KVM_REG_MIPS_CP0_CONFIG1:
748 v = (long)kvm_read_c0_guest_config1(cop0);
749 break;
750 case KVM_REG_MIPS_CP0_CONFIG2:
751 v = (long)kvm_read_c0_guest_config2(cop0);
752 break;
753 case KVM_REG_MIPS_CP0_CONFIG3:
754 v = (long)kvm_read_c0_guest_config3(cop0);
755 break;
c771607a
JH
756 case KVM_REG_MIPS_CP0_CONFIG4:
757 v = (long)kvm_read_c0_guest_config4(cop0);
758 break;
759 case KVM_REG_MIPS_CP0_CONFIG5:
760 v = (long)kvm_read_c0_guest_config5(cop0);
761 break;
4c73fb2b
DD
762 case KVM_REG_MIPS_CP0_CONFIG7:
763 v = (long)kvm_read_c0_guest_config7(cop0);
764 break;
e93d4c15
JH
765 case KVM_REG_MIPS_CP0_ERROREPC:
766 v = (long)kvm_read_c0_guest_errorepc(cop0);
767 break;
f8be02da 768 /* registers to be handled specially */
cc68d22f 769 default:
f8be02da
JH
770 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
771 if (ret)
772 return ret;
773 break;
4c73fb2b 774 }
681865d4
DD
775 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
776 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
d116e812 777
681865d4
DD
778 return put_user(v, uaddr64);
779 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
780 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
781 u32 v32 = (u32)v;
d116e812 782
681865d4 783 return put_user(v32, uaddr32);
ab86bd60
JH
784 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
785 void __user *uaddr = (void __user *)(long)reg->addr;
786
0178fd7d 787 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
681865d4
DD
788 } else {
789 return -EINVAL;
790 }
4c73fb2b
DD
791}
792
793static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
794 const struct kvm_one_reg *reg)
795{
4c73fb2b 796 struct mips_coproc *cop0 = vcpu->arch.cop0;
379245cd
JH
797 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
798 s64 v;
ab86bd60 799 s64 vs[2];
379245cd 800 unsigned int idx;
4c73fb2b 801
681865d4
DD
802 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
803 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
804
805 if (get_user(v, uaddr64) != 0)
806 return -EFAULT;
807 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
808 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
809 s32 v32;
810
811 if (get_user(v32, uaddr32) != 0)
812 return -EFAULT;
813 v = (s64)v32;
ab86bd60
JH
814 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
815 void __user *uaddr = (void __user *)(long)reg->addr;
816
0178fd7d 817 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
681865d4
DD
818 } else {
819 return -EINVAL;
820 }
4c73fb2b
DD
821
822 switch (reg->id) {
379245cd 823 /* General purpose registers */
4c73fb2b
DD
824 case KVM_REG_MIPS_R0:
825 /* Silently ignore requests to set $0 */
826 break;
827 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
828 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
829 break;
830 case KVM_REG_MIPS_HI:
831 vcpu->arch.hi = v;
832 break;
833 case KVM_REG_MIPS_LO:
834 vcpu->arch.lo = v;
835 break;
836 case KVM_REG_MIPS_PC:
837 vcpu->arch.pc = v;
838 break;
839
379245cd
JH
840 /* Floating point registers */
841 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
842 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
843 return -EINVAL;
844 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
845 /* Odd singles in top of even double when FR=0 */
846 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
847 set_fpr32(&fpu->fpr[idx], 0, v);
848 else
849 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
850 break;
851 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
852 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
853 return -EINVAL;
854 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
855 /* Can't access odd doubles in FR=0 mode */
856 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
857 return -EINVAL;
858 set_fpr64(&fpu->fpr[idx], 0, v);
859 break;
860 case KVM_REG_MIPS_FCR_IR:
861 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
862 return -EINVAL;
863 /* Read-only */
864 break;
865 case KVM_REG_MIPS_FCR_CSR:
866 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
867 return -EINVAL;
868 fpu->fcr31 = v;
869 break;
870
ab86bd60
JH
871 /* MIPS SIMD Architecture (MSA) registers */
872 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
873 if (!kvm_mips_guest_has_msa(&vcpu->arch))
874 return -EINVAL;
875 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
876#ifdef CONFIG_CPU_LITTLE_ENDIAN
877 /* least significant byte first */
878 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
879 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
880#else
881 /* most significant byte first */
882 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
883 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
884#endif
885 break;
886 case KVM_REG_MIPS_MSA_IR:
887 if (!kvm_mips_guest_has_msa(&vcpu->arch))
888 return -EINVAL;
889 /* Read-only */
890 break;
891 case KVM_REG_MIPS_MSA_CSR:
892 if (!kvm_mips_guest_has_msa(&vcpu->arch))
893 return -EINVAL;
894 fpu->msacsr = v;
895 break;
896
379245cd 897 /* Co-processor 0 registers */
4c73fb2b
DD
898 case KVM_REG_MIPS_CP0_INDEX:
899 kvm_write_c0_guest_index(cop0, v);
900 break;
901 case KVM_REG_MIPS_CP0_CONTEXT:
902 kvm_write_c0_guest_context(cop0, v);
903 break;
7767b7d2
JH
904 case KVM_REG_MIPS_CP0_USERLOCAL:
905 kvm_write_c0_guest_userlocal(cop0, v);
906 break;
4c73fb2b
DD
907 case KVM_REG_MIPS_CP0_PAGEMASK:
908 kvm_write_c0_guest_pagemask(cop0, v);
909 break;
910 case KVM_REG_MIPS_CP0_WIRED:
911 kvm_write_c0_guest_wired(cop0, v);
912 break;
16fd5c1d
JH
913 case KVM_REG_MIPS_CP0_HWRENA:
914 kvm_write_c0_guest_hwrena(cop0, v);
915 break;
4c73fb2b
DD
916 case KVM_REG_MIPS_CP0_BADVADDR:
917 kvm_write_c0_guest_badvaddr(cop0, v);
918 break;
919 case KVM_REG_MIPS_CP0_ENTRYHI:
920 kvm_write_c0_guest_entryhi(cop0, v);
921 break;
922 case KVM_REG_MIPS_CP0_STATUS:
923 kvm_write_c0_guest_status(cop0, v);
924 break;
fb6df0cd
JH
925 case KVM_REG_MIPS_CP0_EPC:
926 kvm_write_c0_guest_epc(cop0, v);
927 break;
1068eaaf
JH
928 case KVM_REG_MIPS_CP0_PRID:
929 kvm_write_c0_guest_prid(cop0, v);
930 break;
4c73fb2b
DD
931 case KVM_REG_MIPS_CP0_ERROREPC:
932 kvm_write_c0_guest_errorepc(cop0, v);
933 break;
f8be02da 934 /* registers to be handled specially */
4c73fb2b 935 default:
cc68d22f 936 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
4c73fb2b
DD
937 }
938 return 0;
939}
940
5fafd874
JH
941static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
942 struct kvm_enable_cap *cap)
943{
944 int r = 0;
945
946 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
947 return -EINVAL;
948 if (cap->flags)
949 return -EINVAL;
950 if (cap->args[0])
951 return -EINVAL;
952
953 switch (cap->cap) {
954 case KVM_CAP_MIPS_FPU:
955 vcpu->arch.fpu_enabled = true;
956 break;
d952bd07
JH
957 case KVM_CAP_MIPS_MSA:
958 vcpu->arch.msa_enabled = true;
959 break;
5fafd874
JH
960 default:
961 r = -EINVAL;
962 break;
963 }
964
965 return r;
966}
967
d116e812
DCZ
968long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
969 unsigned long arg)
669e846e
SL
970{
971 struct kvm_vcpu *vcpu = filp->private_data;
972 void __user *argp = (void __user *)arg;
973 long r;
669e846e
SL
974
975 switch (ioctl) {
4c73fb2b
DD
976 case KVM_SET_ONE_REG:
977 case KVM_GET_ONE_REG: {
978 struct kvm_one_reg reg;
d116e812 979
4c73fb2b
DD
980 if (copy_from_user(&reg, argp, sizeof(reg)))
981 return -EFAULT;
982 if (ioctl == KVM_SET_ONE_REG)
983 return kvm_mips_set_reg(vcpu, &reg);
984 else
985 return kvm_mips_get_reg(vcpu, &reg);
986 }
987 case KVM_GET_REG_LIST: {
988 struct kvm_reg_list __user *user_list = argp;
4c73fb2b
DD
989 struct kvm_reg_list reg_list;
990 unsigned n;
991
992 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
993 return -EFAULT;
994 n = reg_list.n;
f5c43bd4 995 reg_list.n = kvm_mips_num_regs(vcpu);
4c73fb2b
DD
996 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
997 return -EFAULT;
998 if (n < reg_list.n)
999 return -E2BIG;
f5c43bd4 1000 return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
4c73fb2b 1001 }
669e846e
SL
1002 case KVM_NMI:
1003 /* Treat the NMI as a CPU reset */
1004 r = kvm_mips_reset_vcpu(vcpu);
1005 break;
1006 case KVM_INTERRUPT:
1007 {
1008 struct kvm_mips_interrupt irq;
d116e812 1009
669e846e
SL
1010 r = -EFAULT;
1011 if (copy_from_user(&irq, argp, sizeof(irq)))
1012 goto out;
1013
669e846e
SL
1014 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
1015 irq.irq);
1016
1017 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1018 break;
1019 }
5fafd874
JH
1020 case KVM_ENABLE_CAP: {
1021 struct kvm_enable_cap cap;
1022
1023 r = -EFAULT;
1024 if (copy_from_user(&cap, argp, sizeof(cap)))
1025 goto out;
1026 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1027 break;
1028 }
669e846e 1029 default:
4c73fb2b 1030 r = -ENOIOCTLCMD;
669e846e
SL
1031 }
1032
1033out:
1034 return r;
1035}
1036
d116e812 1037/* Get (and clear) the dirty memory log for a memory slot. */
669e846e
SL
1038int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1039{
9f6b8029 1040 struct kvm_memslots *slots;
669e846e
SL
1041 struct kvm_memory_slot *memslot;
1042 unsigned long ga, ga_end;
1043 int is_dirty = 0;
1044 int r;
1045 unsigned long n;
1046
1047 mutex_lock(&kvm->slots_lock);
1048
1049 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1050 if (r)
1051 goto out;
1052
1053 /* If nothing is dirty, don't bother messing with page tables. */
1054 if (is_dirty) {
9f6b8029
PB
1055 slots = kvm_memslots(kvm);
1056 memslot = id_to_memslot(slots, log->slot);
669e846e
SL
1057
1058 ga = memslot->base_gfn << PAGE_SHIFT;
1059 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1060
6ad78a5c
DCZ
1061 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
1062 ga_end);
669e846e
SL
1063
1064 n = kvm_dirty_bitmap_bytes(memslot);
1065 memset(memslot->dirty_bitmap, 0, n);
1066 }
1067
1068 r = 0;
1069out:
1070 mutex_unlock(&kvm->slots_lock);
1071 return r;
1072
1073}
1074
1075long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1076{
1077 long r;
1078
1079 switch (ioctl) {
1080 default:
ed829857 1081 r = -ENOIOCTLCMD;
669e846e
SL
1082 }
1083
1084 return r;
1085}
1086
1087int kvm_arch_init(void *opaque)
1088{
669e846e
SL
1089 if (kvm_mips_callbacks) {
1090 kvm_err("kvm: module already exists\n");
1091 return -EEXIST;
1092 }
1093
d98403a5 1094 return kvm_mips_emulation_init(&kvm_mips_callbacks);
669e846e
SL
1095}
1096
1097void kvm_arch_exit(void)
1098{
1099 kvm_mips_callbacks = NULL;
1100}
1101
d116e812
DCZ
1102int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1103 struct kvm_sregs *sregs)
669e846e 1104{
ed829857 1105 return -ENOIOCTLCMD;
669e846e
SL
1106}
1107
d116e812
DCZ
1108int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1109 struct kvm_sregs *sregs)
669e846e 1110{
ed829857 1111 return -ENOIOCTLCMD;
669e846e
SL
1112}
1113
31928aa5 1114void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
669e846e 1115{
669e846e
SL
1116}
1117
1118int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1119{
ed829857 1120 return -ENOIOCTLCMD;
669e846e
SL
1121}
1122
1123int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1124{
ed829857 1125 return -ENOIOCTLCMD;
669e846e
SL
1126}
1127
1128int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1129{
1130 return VM_FAULT_SIGBUS;
1131}
1132
784aa3d7 1133int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
669e846e
SL
1134{
1135 int r;
1136
1137 switch (ext) {
4c73fb2b 1138 case KVM_CAP_ONE_REG:
5fafd874 1139 case KVM_CAP_ENABLE_CAP:
4c73fb2b
DD
1140 r = 1;
1141 break;
669e846e
SL
1142 case KVM_CAP_COALESCED_MMIO:
1143 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1144 break;
5fafd874 1145 case KVM_CAP_MIPS_FPU:
556f2a52
JH
1146 /* We don't handle systems with inconsistent cpu_has_fpu */
1147 r = !!raw_cpu_has_fpu;
5fafd874 1148 break;
d952bd07
JH
1149 case KVM_CAP_MIPS_MSA:
1150 /*
1151 * We don't support MSA vector partitioning yet:
1152 * 1) It would require explicit support which can't be tested
1153 * yet due to lack of support in current hardware.
1154 * 2) It extends the state that would need to be saved/restored
1155 * by e.g. QEMU for migration.
1156 *
1157 * When vector partitioning hardware becomes available, support
1158 * could be added by requiring a flag when enabling
1159 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1160 * to save/restore the appropriate extra state.
1161 */
1162 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1163 break;
669e846e
SL
1164 default:
1165 r = 0;
1166 break;
1167 }
1168 return r;
669e846e
SL
1169}
1170
1171int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1172{
1173 return kvm_mips_pending_timer(vcpu);
1174}
1175
1176int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1177{
1178 int i;
1179 struct mips_coproc *cop0;
1180
1181 if (!vcpu)
1182 return -1;
1183
6ad78a5c
DCZ
1184 kvm_debug("VCPU Register Dump:\n");
1185 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1186 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
669e846e
SL
1187
1188 for (i = 0; i < 32; i += 4) {
6ad78a5c 1189 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
669e846e
SL
1190 vcpu->arch.gprs[i],
1191 vcpu->arch.gprs[i + 1],
1192 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1193 }
6ad78a5c
DCZ
1194 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1195 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
669e846e
SL
1196
1197 cop0 = vcpu->arch.cop0;
6ad78a5c
DCZ
1198 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
1199 kvm_read_c0_guest_status(cop0),
1200 kvm_read_c0_guest_cause(cop0));
669e846e 1201
6ad78a5c 1202 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
669e846e
SL
1203
1204 return 0;
1205}
1206
1207int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1208{
1209 int i;
1210
8d17dd04 1211 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
bf32ebf6 1212 vcpu->arch.gprs[i] = regs->gpr[i];
8d17dd04 1213 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
669e846e
SL
1214 vcpu->arch.hi = regs->hi;
1215 vcpu->arch.lo = regs->lo;
1216 vcpu->arch.pc = regs->pc;
1217
4c73fb2b 1218 return 0;
669e846e
SL
1219}
1220
1221int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1222{
1223 int i;
1224
8d17dd04 1225 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
bf32ebf6 1226 regs->gpr[i] = vcpu->arch.gprs[i];
669e846e
SL
1227
1228 regs->hi = vcpu->arch.hi;
1229 regs->lo = vcpu->arch.lo;
1230 regs->pc = vcpu->arch.pc;
1231
4c73fb2b 1232 return 0;
669e846e
SL
1233}
1234
0fae34f4 1235static void kvm_mips_comparecount_func(unsigned long data)
669e846e
SL
1236{
1237 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1238
1239 kvm_mips_callbacks->queue_timer_int(vcpu);
1240
1241 vcpu->arch.wait = 0;
8577370f
MT
1242 if (swait_active(&vcpu->wq))
1243 swake_up(&vcpu->wq);
669e846e
SL
1244}
1245
d116e812 1246/* low level hrtimer wake routine */
0fae34f4 1247static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
669e846e
SL
1248{
1249 struct kvm_vcpu *vcpu;
1250
1251 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1252 kvm_mips_comparecount_func((unsigned long) vcpu);
e30492bb 1253 return kvm_mips_count_timeout(vcpu);
669e846e
SL
1254}
1255
1256int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1257{
1258 kvm_mips_callbacks->vcpu_init(vcpu);
1259 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1260 HRTIMER_MODE_REL);
1261 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
669e846e
SL
1262 return 0;
1263}
1264
d116e812
DCZ
1265int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1266 struct kvm_translation *tr)
669e846e
SL
1267{
1268 return 0;
1269}
1270
1271/* Initial guest state */
1272int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1273{
1274 return kvm_mips_callbacks->vcpu_setup(vcpu);
1275}
1276
d116e812 1277static void kvm_mips_set_c0_status(void)
669e846e 1278{
8cffd197 1279 u32 status = read_c0_status();
669e846e 1280
669e846e
SL
1281 if (cpu_has_dsp)
1282 status |= (ST0_MX);
1283
1284 write_c0_status(status);
1285 ehb();
1286}
1287
1288/*
1289 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1290 */
1291int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1292{
8cffd197
JH
1293 u32 cause = vcpu->arch.host_cp0_cause;
1294 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1295 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
669e846e
SL
1296 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1297 enum emulation_result er = EMULATE_DONE;
1298 int ret = RESUME_GUEST;
1299
c4c6f2ca
JH
1300 /* re-enable HTW before enabling interrupts */
1301 htw_start();
1302
669e846e
SL
1303 /* Set a default exit reason */
1304 run->exit_reason = KVM_EXIT_UNKNOWN;
1305 run->ready_for_interrupt_injection = 1;
1306
d116e812
DCZ
1307 /*
1308 * Set the appropriate status bits based on host CPU features,
1309 * before we hit the scheduler
1310 */
669e846e
SL
1311 kvm_mips_set_c0_status();
1312
1313 local_irq_enable();
1314
1315 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1316 cause, opc, run, vcpu);
1e09e86a 1317 trace_kvm_exit(vcpu, exccode);
669e846e 1318
d116e812
DCZ
1319 /*
1320 * Do a privilege check, if in UM most of these exit conditions end up
669e846e
SL
1321 * causing an exception to be delivered to the Guest Kernel
1322 */
1323 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1324 if (er == EMULATE_PRIV_FAIL) {
1325 goto skip_emul;
1326 } else if (er == EMULATE_FAIL) {
1327 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1328 ret = RESUME_HOST;
1329 goto skip_emul;
1330 }
1331
1332 switch (exccode) {
16d100db
JH
1333 case EXCCODE_INT:
1334 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
669e846e
SL
1335
1336 ++vcpu->stat.int_exits;
669e846e 1337
d116e812 1338 if (need_resched())
669e846e 1339 cond_resched();
669e846e
SL
1340
1341 ret = RESUME_GUEST;
1342 break;
1343
16d100db
JH
1344 case EXCCODE_CPU:
1345 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
669e846e
SL
1346
1347 ++vcpu->stat.cop_unusable_exits;
669e846e
SL
1348 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1349 /* XXXKYMA: Might need to return to user space */
d116e812 1350 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
669e846e 1351 ret = RESUME_HOST;
669e846e
SL
1352 break;
1353
16d100db 1354 case EXCCODE_MOD:
669e846e 1355 ++vcpu->stat.tlbmod_exits;
669e846e
SL
1356 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1357 break;
1358
16d100db 1359 case EXCCODE_TLBS:
d116e812
DCZ
1360 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1361 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1362 badvaddr);
669e846e
SL
1363
1364 ++vcpu->stat.tlbmiss_st_exits;
669e846e
SL
1365 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1366 break;
1367
16d100db 1368 case EXCCODE_TLBL:
669e846e
SL
1369 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1370 cause, opc, badvaddr);
1371
1372 ++vcpu->stat.tlbmiss_ld_exits;
669e846e
SL
1373 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1374 break;
1375
16d100db 1376 case EXCCODE_ADES:
669e846e 1377 ++vcpu->stat.addrerr_st_exits;
669e846e
SL
1378 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1379 break;
1380
16d100db 1381 case EXCCODE_ADEL:
669e846e 1382 ++vcpu->stat.addrerr_ld_exits;
669e846e
SL
1383 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1384 break;
1385
16d100db 1386 case EXCCODE_SYS:
669e846e 1387 ++vcpu->stat.syscall_exits;
669e846e
SL
1388 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1389 break;
1390
16d100db 1391 case EXCCODE_RI:
669e846e 1392 ++vcpu->stat.resvd_inst_exits;
669e846e
SL
1393 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1394 break;
1395
16d100db 1396 case EXCCODE_BP:
669e846e 1397 ++vcpu->stat.break_inst_exits;
669e846e
SL
1398 ret = kvm_mips_callbacks->handle_break(vcpu);
1399 break;
1400
16d100db 1401 case EXCCODE_TR:
0a560427 1402 ++vcpu->stat.trap_inst_exits;
0a560427
JH
1403 ret = kvm_mips_callbacks->handle_trap(vcpu);
1404 break;
1405
16d100db 1406 case EXCCODE_MSAFPE:
c2537ed9 1407 ++vcpu->stat.msa_fpe_exits;
c2537ed9
JH
1408 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1409 break;
1410
16d100db 1411 case EXCCODE_FPE:
1c0cd66a 1412 ++vcpu->stat.fpe_exits;
1c0cd66a
JH
1413 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1414 break;
1415
16d100db 1416 case EXCCODE_MSADIS:
c2537ed9 1417 ++vcpu->stat.msa_disabled_exits;
98119ad5
JH
1418 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1419 break;
1420
669e846e 1421 default:
d116e812
DCZ
1422 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1423 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1424 kvm_read_c0_guest_status(vcpu->arch.cop0));
669e846e
SL
1425 kvm_arch_vcpu_dump_regs(vcpu);
1426 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1427 ret = RESUME_HOST;
1428 break;
1429
1430 }
1431
1432skip_emul:
1433 local_irq_disable();
1434
1435 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1436 kvm_mips_deliver_interrupts(vcpu, cause);
1437
1438 if (!(ret & RESUME_HOST)) {
d116e812 1439 /* Only check for signals if not already exiting to userspace */
669e846e
SL
1440 if (signal_pending(current)) {
1441 run->exit_reason = KVM_EXIT_INTR;
1442 ret = (-EINTR << 2) | RESUME_HOST;
1443 ++vcpu->stat.signal_exits;
1e09e86a 1444 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
669e846e
SL
1445 }
1446 }
1447
98e91b84 1448 if (ret == RESUME_GUEST) {
93258604
JH
1449 trace_kvm_reenter(vcpu);
1450
98e91b84 1451 /*
539cb89f
JH
1452 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1453 * is live), restore FCR31 / MSACSR.
98e91b84
JH
1454 *
1455 * This should be before returning to the guest exception
539cb89f
JH
1456 * vector, as it may well cause an [MSA] FP exception if there
1457 * are pending exception bits unmasked. (see
98e91b84
JH
1458 * kvm_mips_csr_die_notifier() for how that is handled).
1459 */
1460 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1461 read_c0_status() & ST0_CU1)
1462 __kvm_restore_fcsr(&vcpu->arch);
539cb89f
JH
1463
1464 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1465 read_c0_config5() & MIPS_CONF5_MSAEN)
1466 __kvm_restore_msacsr(&vcpu->arch);
98e91b84
JH
1467 }
1468
c4c6f2ca
JH
1469 /* Disable HTW before returning to guest or host */
1470 htw_stop();
1471
669e846e
SL
1472 return ret;
1473}
1474
98e91b84
JH
1475/* Enable FPU for guest and restore context */
1476void kvm_own_fpu(struct kvm_vcpu *vcpu)
1477{
1478 struct mips_coproc *cop0 = vcpu->arch.cop0;
1479 unsigned int sr, cfg5;
1480
1481 preempt_disable();
1482
539cb89f
JH
1483 sr = kvm_read_c0_guest_status(cop0);
1484
1485 /*
1486 * If MSA state is already live, it is undefined how it interacts with
1487 * FR=0 FPU state, and we don't want to hit reserved instruction
1488 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1489 * play it safe and save it first.
1490 *
1491 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1492 * get called when guest CU1 is set, however we can't trust the guest
1493 * not to clobber the status register directly via the commpage.
1494 */
1495 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
f943176a 1496 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
539cb89f
JH
1497 kvm_lose_fpu(vcpu);
1498
98e91b84
JH
1499 /*
1500 * Enable FPU for guest
1501 * We set FR and FRE according to guest context
1502 */
98e91b84
JH
1503 change_c0_status(ST0_CU1 | ST0_FR, sr);
1504 if (cpu_has_fre) {
1505 cfg5 = kvm_read_c0_guest_config5(cop0);
1506 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1507 }
1508 enable_fpu_hazard();
1509
1510 /* If guest FPU state not active, restore it now */
f943176a 1511 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
98e91b84 1512 __kvm_restore_fpu(&vcpu->arch);
f943176a 1513 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
04ebebf4
JH
1514 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1515 } else {
1516 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
98e91b84
JH
1517 }
1518
1519 preempt_enable();
1520}
1521
539cb89f
JH
1522#ifdef CONFIG_CPU_HAS_MSA
1523/* Enable MSA for guest and restore context */
1524void kvm_own_msa(struct kvm_vcpu *vcpu)
1525{
1526 struct mips_coproc *cop0 = vcpu->arch.cop0;
1527 unsigned int sr, cfg5;
1528
1529 preempt_disable();
1530
1531 /*
1532 * Enable FPU if enabled in guest, since we're restoring FPU context
1533 * anyway. We set FR and FRE according to guest context.
1534 */
1535 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1536 sr = kvm_read_c0_guest_status(cop0);
1537
1538 /*
1539 * If FR=0 FPU state is already live, it is undefined how it
1540 * interacts with MSA state, so play it safe and save it first.
1541 */
1542 if (!(sr & ST0_FR) &&
f943176a
JH
1543 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1544 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
539cb89f
JH
1545 kvm_lose_fpu(vcpu);
1546
1547 change_c0_status(ST0_CU1 | ST0_FR, sr);
1548 if (sr & ST0_CU1 && cpu_has_fre) {
1549 cfg5 = kvm_read_c0_guest_config5(cop0);
1550 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1551 }
1552 }
1553
1554 /* Enable MSA for guest */
1555 set_c0_config5(MIPS_CONF5_MSAEN);
1556 enable_fpu_hazard();
1557
f943176a
JH
1558 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1559 case KVM_MIPS_AUX_FPU:
539cb89f
JH
1560 /*
1561 * Guest FPU state already loaded, only restore upper MSA state
1562 */
1563 __kvm_restore_msa_upper(&vcpu->arch);
f943176a 1564 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
04ebebf4 1565 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
539cb89f
JH
1566 break;
1567 case 0:
1568 /* Neither FPU or MSA already active, restore full MSA state */
1569 __kvm_restore_msa(&vcpu->arch);
f943176a 1570 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
539cb89f 1571 if (kvm_mips_guest_has_fpu(&vcpu->arch))
f943176a 1572 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
04ebebf4
JH
1573 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1574 KVM_TRACE_AUX_FPU_MSA);
539cb89f
JH
1575 break;
1576 default:
04ebebf4 1577 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
539cb89f
JH
1578 break;
1579 }
1580
1581 preempt_enable();
1582}
1583#endif
1584
1585/* Drop FPU & MSA without saving it */
98e91b84
JH
1586void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1587{
1588 preempt_disable();
f943176a 1589 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
539cb89f 1590 disable_msa();
04ebebf4 1591 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
f943176a 1592 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
539cb89f 1593 }
f943176a 1594 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
98e91b84 1595 clear_c0_status(ST0_CU1 | ST0_FR);
04ebebf4 1596 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
f943176a 1597 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
98e91b84
JH
1598 }
1599 preempt_enable();
1600}
1601
539cb89f 1602/* Save and disable FPU & MSA */
98e91b84
JH
1603void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1604{
1605 /*
539cb89f
JH
1606 * FPU & MSA get disabled in root context (hardware) when it is disabled
1607 * in guest context (software), but the register state in the hardware
1608 * may still be in use. This is why we explicitly re-enable the hardware
98e91b84
JH
1609 * before saving.
1610 */
1611
1612 preempt_disable();
f943176a 1613 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
539cb89f
JH
1614 set_c0_config5(MIPS_CONF5_MSAEN);
1615 enable_fpu_hazard();
1616
1617 __kvm_save_msa(&vcpu->arch);
04ebebf4 1618 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
539cb89f
JH
1619
1620 /* Disable MSA & FPU */
1621 disable_msa();
f943176a 1622 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
539cb89f 1623 clear_c0_status(ST0_CU1 | ST0_FR);
4ac33429
JH
1624 disable_fpu_hazard();
1625 }
f943176a
JH
1626 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1627 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
98e91b84
JH
1628 set_c0_status(ST0_CU1);
1629 enable_fpu_hazard();
1630
1631 __kvm_save_fpu(&vcpu->arch);
f943176a 1632 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
04ebebf4 1633 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
98e91b84
JH
1634
1635 /* Disable FPU */
1636 clear_c0_status(ST0_CU1 | ST0_FR);
4ac33429 1637 disable_fpu_hazard();
98e91b84
JH
1638 }
1639 preempt_enable();
1640}
1641
1642/*
539cb89f
JH
1643 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1644 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1645 * exception if cause bits are set in the value being written.
98e91b84
JH
1646 */
1647static int kvm_mips_csr_die_notify(struct notifier_block *self,
1648 unsigned long cmd, void *ptr)
1649{
1650 struct die_args *args = (struct die_args *)ptr;
1651 struct pt_regs *regs = args->regs;
1652 unsigned long pc;
1653
539cb89f
JH
1654 /* Only interested in FPE and MSAFPE */
1655 if (cmd != DIE_FP && cmd != DIE_MSAFP)
98e91b84
JH
1656 return NOTIFY_DONE;
1657
1658 /* Return immediately if guest context isn't active */
1659 if (!(current->flags & PF_VCPU))
1660 return NOTIFY_DONE;
1661
1662 /* Should never get here from user mode */
1663 BUG_ON(user_mode(regs));
1664
1665 pc = instruction_pointer(regs);
1666 switch (cmd) {
1667 case DIE_FP:
1668 /* match 2nd instruction in __kvm_restore_fcsr */
1669 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1670 return NOTIFY_DONE;
1671 break;
539cb89f
JH
1672 case DIE_MSAFP:
1673 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1674 if (!cpu_has_msa ||
1675 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1676 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1677 return NOTIFY_DONE;
1678 break;
98e91b84
JH
1679 }
1680
1681 /* Move PC forward a little and continue executing */
1682 instruction_pointer(regs) += 4;
1683
1684 return NOTIFY_STOP;
1685}
1686
1687static struct notifier_block kvm_mips_csr_die_notifier = {
1688 .notifier_call = kvm_mips_csr_die_notify,
1689};
1690
2db9d233 1691static int __init kvm_mips_init(void)
669e846e
SL
1692{
1693 int ret;
1694
1695 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1696
1697 if (ret)
1698 return ret;
1699
98e91b84
JH
1700 register_die_notifier(&kvm_mips_csr_die_notifier);
1701
669e846e
SL
1702 return 0;
1703}
1704
2db9d233 1705static void __exit kvm_mips_exit(void)
669e846e
SL
1706{
1707 kvm_exit();
1708
98e91b84 1709 unregister_die_notifier(&kvm_mips_csr_die_notifier);
669e846e
SL
1710}
1711
1712module_init(kvm_mips_init);
1713module_exit(kvm_mips_exit);
1714
1715EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
This page took 0.218225 seconds and 5 git commands to generate.