MIPS: KVM: Remove unneeded volatile
[deliverable/linux.git] / arch / mips / kvm / kvm_trap_emul.c
CommitLineData
f5c236dd 1/*
d116e812
DCZ
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
f5c236dd
SL
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16
17#include <linux/kvm_host.h>
18
19#include "kvm_mips_opcode.h"
20#include "kvm_mips_int.h"
21
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
25 uint32_t kseg = KSEGX(gva);
26
27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
28 gpa = CPHYSADDR(gva);
29 else {
6ad78a5c 30 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
f5c236dd
SL
31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR;
33 }
34
f5c236dd 35 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
f5c236dd
SL
36
37 return gpa;
38}
39
f5c236dd
SL
40static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
41{
42 struct kvm_run *run = vcpu->run;
43 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
44 unsigned long cause = vcpu->arch.host_cp0_cause;
45 enum emulation_result er = EMULATE_DONE;
46 int ret = RESUME_GUEST;
47
d116e812 48 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
f5c236dd 49 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
d116e812 50 else
f5c236dd
SL
51 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
52
53 switch (er) {
54 case EMULATE_DONE:
55 ret = RESUME_GUEST;
56 break;
57
58 case EMULATE_FAIL:
59 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
60 ret = RESUME_HOST;
61 break;
62
63 case EMULATE_WAIT:
64 run->exit_reason = KVM_EXIT_INTR;
65 ret = RESUME_HOST;
66 break;
67
68 default:
69 BUG();
70 }
71 return ret;
72}
73
74static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
75{
76 struct kvm_run *run = vcpu->run;
77 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
78 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
79 unsigned long cause = vcpu->arch.host_cp0_cause;
80 enum emulation_result er = EMULATE_DONE;
81 int ret = RESUME_GUEST;
82
83 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
84 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
d116e812
DCZ
85 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
86 cause, opc, badvaddr);
f5c236dd
SL
87 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
88
89 if (er == EMULATE_DONE)
90 ret = RESUME_GUEST;
91 else {
92 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
93 ret = RESUME_HOST;
94 }
95 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
d116e812
DCZ
96 /*
97 * XXXKYMA: The guest kernel does not expect to get this fault
98 * when we are not using HIGHMEM. Need to address this in a
99 * HIGHMEM kernel
f5c236dd 100 */
6ad78a5c
DCZ
101 kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
102 cause, opc, badvaddr);
f5c236dd
SL
103 kvm_mips_dump_host_tlbs();
104 kvm_arch_vcpu_dump_regs(vcpu);
105 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
106 ret = RESUME_HOST;
107 } else {
6ad78a5c
DCZ
108 kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
109 cause, opc, badvaddr);
f5c236dd
SL
110 kvm_mips_dump_host_tlbs();
111 kvm_arch_vcpu_dump_regs(vcpu);
112 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
113 ret = RESUME_HOST;
114 }
115 return ret;
116}
117
118static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
119{
120 struct kvm_run *run = vcpu->run;
121 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
122 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
123 unsigned long cause = vcpu->arch.host_cp0_cause;
124 enum emulation_result er = EMULATE_DONE;
125 int ret = RESUME_GUEST;
126
127 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
128 && KVM_GUEST_KERNEL_MODE(vcpu)) {
129 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
130 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
131 ret = RESUME_HOST;
132 }
133 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
134 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
d116e812
DCZ
135 kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
136 cause, opc, badvaddr);
f5c236dd
SL
137 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
138 if (er == EMULATE_DONE)
139 ret = RESUME_GUEST;
140 else {
141 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
142 ret = RESUME_HOST;
143 }
144 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
d116e812
DCZ
145 /*
146 * All KSEG0 faults are handled by KVM, as the guest kernel does
147 * not expect to ever get them
f5c236dd
SL
148 */
149 if (kvm_mips_handle_kseg0_tlb_fault
150 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
151 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
152 ret = RESUME_HOST;
153 }
154 } else {
d116e812
DCZ
155 kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
156 cause, opc, badvaddr);
f5c236dd
SL
157 kvm_mips_dump_host_tlbs();
158 kvm_arch_vcpu_dump_regs(vcpu);
159 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160 ret = RESUME_HOST;
161 }
162 return ret;
163}
164
165static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
166{
167 struct kvm_run *run = vcpu->run;
168 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
169 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
170 unsigned long cause = vcpu->arch.host_cp0_cause;
171 enum emulation_result er = EMULATE_DONE;
172 int ret = RESUME_GUEST;
173
174 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
175 && KVM_GUEST_KERNEL_MODE(vcpu)) {
176 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
177 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
178 ret = RESUME_HOST;
179 }
180 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
181 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
f5c236dd
SL
182 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
183 vcpu->arch.pc, badvaddr);
f5c236dd 184
d116e812
DCZ
185 /*
186 * User Address (UA) fault, this could happen if
187 * (1) TLB entry not present/valid in both Guest and shadow host
188 * TLBs, in this case we pass on the fault to the guest
189 * kernel and let it handle it.
190 * (2) TLB entry is present in the Guest TLB but not in the
191 * shadow, in this case we inject the TLB from the Guest TLB
192 * into the shadow host TLB
f5c236dd
SL
193 */
194
195 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
196 if (er == EMULATE_DONE)
197 ret = RESUME_GUEST;
198 else {
199 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
200 ret = RESUME_HOST;
201 }
202 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
203 if (kvm_mips_handle_kseg0_tlb_fault
204 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
205 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
206 ret = RESUME_HOST;
207 }
208 } else {
6ad78a5c
DCZ
209 kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
210 cause, opc, badvaddr);
f5c236dd
SL
211 kvm_mips_dump_host_tlbs();
212 kvm_arch_vcpu_dump_regs(vcpu);
213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214 ret = RESUME_HOST;
215 }
216 return ret;
217}
218
219static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
220{
221 struct kvm_run *run = vcpu->run;
222 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
223 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
224 unsigned long cause = vcpu->arch.host_cp0_cause;
225 enum emulation_result er = EMULATE_DONE;
226 int ret = RESUME_GUEST;
227
228 if (KVM_GUEST_KERNEL_MODE(vcpu)
229 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
f5c236dd 230 kvm_debug("Emulate Store to MMIO space\n");
f5c236dd
SL
231 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
232 if (er == EMULATE_FAIL) {
6ad78a5c 233 kvm_err("Emulate Store to MMIO space failed\n");
f5c236dd
SL
234 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
235 ret = RESUME_HOST;
236 } else {
237 run->exit_reason = KVM_EXIT_MMIO;
238 ret = RESUME_HOST;
239 }
240 } else {
6ad78a5c
DCZ
241 kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
242 cause, opc, badvaddr);
f5c236dd
SL
243 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
244 ret = RESUME_HOST;
245 }
246 return ret;
247}
248
249static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
250{
251 struct kvm_run *run = vcpu->run;
252 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
253 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
254 unsigned long cause = vcpu->arch.host_cp0_cause;
255 enum emulation_result er = EMULATE_DONE;
256 int ret = RESUME_GUEST;
257
258 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
f5c236dd 259 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
f5c236dd
SL
260 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
261 if (er == EMULATE_FAIL) {
6ad78a5c 262 kvm_err("Emulate Load from MMIO space failed\n");
f5c236dd
SL
263 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
264 ret = RESUME_HOST;
265 } else {
266 run->exit_reason = KVM_EXIT_MMIO;
267 ret = RESUME_HOST;
268 }
269 } else {
6ad78a5c
DCZ
270 kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
271 cause, opc, badvaddr);
f5c236dd
SL
272 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
273 ret = RESUME_HOST;
274 er = EMULATE_FAIL;
275 }
276 return ret;
277}
278
279static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
280{
281 struct kvm_run *run = vcpu->run;
282 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
283 unsigned long cause = vcpu->arch.host_cp0_cause;
284 enum emulation_result er = EMULATE_DONE;
285 int ret = RESUME_GUEST;
286
287 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
288 if (er == EMULATE_DONE)
289 ret = RESUME_GUEST;
290 else {
291 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
292 ret = RESUME_HOST;
293 }
294 return ret;
295}
296
297static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
298{
299 struct kvm_run *run = vcpu->run;
300 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
301 unsigned long cause = vcpu->arch.host_cp0_cause;
302 enum emulation_result er = EMULATE_DONE;
303 int ret = RESUME_GUEST;
304
305 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
306 if (er == EMULATE_DONE)
307 ret = RESUME_GUEST;
308 else {
309 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
310 ret = RESUME_HOST;
311 }
312 return ret;
313}
314
315static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
316{
317 struct kvm_run *run = vcpu->run;
318 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
319 unsigned long cause = vcpu->arch.host_cp0_cause;
320 enum emulation_result er = EMULATE_DONE;
321 int ret = RESUME_GUEST;
322
323 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
324 if (er == EMULATE_DONE)
325 ret = RESUME_GUEST;
326 else {
327 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
328 ret = RESUME_HOST;
329 }
330 return ret;
331}
332
f5c236dd
SL
333static int kvm_trap_emul_vm_init(struct kvm *kvm)
334{
335 return 0;
336}
337
338static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
339{
340 return 0;
341}
342
343static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
344{
345 struct mips_coproc *cop0 = vcpu->arch.cop0;
346 uint32_t config1;
347 int vcpu_id = vcpu->vcpu_id;
348
d116e812
DCZ
349 /*
350 * Arch specific stuff, set up config registers properly so that the
351 * guest will come up as expected, for now we simulate a MIPS 24kc
f5c236dd
SL
352 */
353 kvm_write_c0_guest_prid(cop0, 0x00019300);
354 kvm_write_c0_guest_config(cop0,
355 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
356 (MMU_TYPE_R4000 << CP0C0_MT));
357
358 /* Read the cache characteristics from the host Config1 Register */
359 config1 = (read_c0_config1() & ~0x7f);
360
361 /* Set up MMU size */
362 config1 &= ~(0x3f << 25);
363 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
364
365 /* We unset some bits that we aren't emulating */
366 config1 &=
367 ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
368 (1 << CP0C1_WR) | (1 << CP0C1_CA));
369 kvm_write_c0_guest_config1(cop0, config1);
370
371 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
372 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
d116e812
DCZ
373 kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
374 (1 << CP0C3_ULRI));
f5c236dd
SL
375
376 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
377 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
378
d116e812
DCZ
379 /*
380 * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
381 */
f5c236dd
SL
382 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
383
384 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
385 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
386
387 return 0;
388}
389
f8be02da
JH
390static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
391 const struct kvm_one_reg *reg,
392 s64 *v)
393{
394 switch (reg->id) {
395 case KVM_REG_MIPS_CP0_COUNT:
e30492bb 396 *v = kvm_mips_read_count(vcpu);
f8be02da 397 break;
f8239342
JH
398 case KVM_REG_MIPS_COUNT_CTL:
399 *v = vcpu->arch.count_ctl;
400 break;
401 case KVM_REG_MIPS_COUNT_RESUME:
402 *v = ktime_to_ns(vcpu->arch.count_resume);
403 break;
f74a8e22
JH
404 case KVM_REG_MIPS_COUNT_HZ:
405 *v = vcpu->arch.count_hz;
406 break;
f8be02da
JH
407 default:
408 return -EINVAL;
409 }
410 return 0;
411}
412
413static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
414 const struct kvm_one_reg *reg,
415 s64 v)
416{
417 struct mips_coproc *cop0 = vcpu->arch.cop0;
f8239342 418 int ret = 0;
f8be02da
JH
419
420 switch (reg->id) {
421 case KVM_REG_MIPS_CP0_COUNT:
e30492bb 422 kvm_mips_write_count(vcpu, v);
f8be02da
JH
423 break;
424 case KVM_REG_MIPS_CP0_COMPARE:
e30492bb
JH
425 kvm_mips_write_compare(vcpu, v);
426 break;
427 case KVM_REG_MIPS_CP0_CAUSE:
428 /*
429 * If the timer is stopped or started (DC bit) it must look
430 * atomic with changes to the interrupt pending bits (TI, IRQ5).
431 * A timer interrupt should not happen in between.
432 */
433 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
434 if (v & CAUSEF_DC) {
435 /* disable timer first */
436 kvm_mips_count_disable_cause(vcpu);
437 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
438 } else {
439 /* enable timer last */
440 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
441 kvm_mips_count_enable_cause(vcpu);
442 }
443 } else {
444 kvm_write_c0_guest_cause(cop0, v);
445 }
f8be02da 446 break;
f8239342
JH
447 case KVM_REG_MIPS_COUNT_CTL:
448 ret = kvm_mips_set_count_ctl(vcpu, v);
449 break;
450 case KVM_REG_MIPS_COUNT_RESUME:
451 ret = kvm_mips_set_count_resume(vcpu, v);
452 break;
f74a8e22
JH
453 case KVM_REG_MIPS_COUNT_HZ:
454 ret = kvm_mips_set_count_hz(vcpu, v);
455 break;
f8be02da
JH
456 default:
457 return -EINVAL;
458 }
f8239342 459 return ret;
f8be02da
JH
460}
461
f5c236dd
SL
462static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
463 /* exit handlers */
464 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
465 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
466 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
467 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
468 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
469 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
470 .handle_syscall = kvm_trap_emul_handle_syscall,
471 .handle_res_inst = kvm_trap_emul_handle_res_inst,
472 .handle_break = kvm_trap_emul_handle_break,
473
474 .vm_init = kvm_trap_emul_vm_init,
475 .vcpu_init = kvm_trap_emul_vcpu_init,
476 .vcpu_setup = kvm_trap_emul_vcpu_setup,
477 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
478 .queue_timer_int = kvm_mips_queue_timer_int_cb,
479 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
480 .queue_io_int = kvm_mips_queue_io_int_cb,
481 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
482 .irq_deliver = kvm_mips_irq_deliver_cb,
483 .irq_clear = kvm_mips_irq_clear_cb,
f8be02da
JH
484 .get_one_reg = kvm_trap_emul_get_one_reg,
485 .set_one_reg = kvm_trap_emul_set_one_reg,
f5c236dd
SL
486};
487
488int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
489{
490 *install_callbacks = &kvm_trap_emul_callbacks;
491 return 0;
492}
This page took 0.09685 seconds and 5 git commands to generate.