Commit | Line | Data |
---|---|---|
e685c689 | 1 | /* |
d116e812 DCZ |
2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: Instruction/Exception emulation | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
10 | */ | |
e685c689 SL |
11 | |
12 | #include <linux/errno.h> | |
13 | #include <linux/err.h> | |
e30492bb | 14 | #include <linux/ktime.h> |
e685c689 SL |
15 | #include <linux/kvm_host.h> |
16 | #include <linux/module.h> | |
17 | #include <linux/vmalloc.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/bootmem.h> | |
20 | #include <linux/random.h> | |
21 | #include <asm/page.h> | |
22 | #include <asm/cacheflush.h> | |
23 | #include <asm/cpu-info.h> | |
24 | #include <asm/mmu_context.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/inst.h> | |
27 | ||
28 | #undef CONFIG_MIPS_MT | |
29 | #include <asm/r4kcache.h> | |
30 | #define CONFIG_MIPS_MT | |
31 | ||
d7d5b05f DCZ |
32 | #include "opcode.h" |
33 | #include "interrupt.h" | |
34 | #include "commpage.h" | |
e685c689 SL |
35 | |
36 | #include "trace.h" | |
37 | ||
38 | /* | |
39 | * Compute the return address and do emulate branch simulation, if required. | |
40 | * This function should be called only in branch delay slot active. | |
41 | */ | |
42 | unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |
43 | unsigned long instpc) | |
44 | { | |
45 | unsigned int dspcontrol; | |
46 | union mips_instruction insn; | |
47 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
48 | long epc = instpc; | |
49 | long nextpc = KVM_INVALID_INST; | |
50 | ||
51 | if (epc & 3) | |
52 | goto unaligned; | |
53 | ||
d116e812 | 54 | /* Read the instruction */ |
e685c689 SL |
55 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); |
56 | ||
57 | if (insn.word == KVM_INVALID_INST) | |
58 | return KVM_INVALID_INST; | |
59 | ||
60 | switch (insn.i_format.opcode) { | |
d116e812 | 61 | /* jr and jalr are in r_format format. */ |
e685c689 SL |
62 | case spec_op: |
63 | switch (insn.r_format.func) { | |
64 | case jalr_op: | |
65 | arch->gprs[insn.r_format.rd] = epc + 8; | |
66 | /* Fall through */ | |
67 | case jr_op: | |
68 | nextpc = arch->gprs[insn.r_format.rs]; | |
69 | break; | |
70 | } | |
71 | break; | |
72 | ||
73 | /* | |
74 | * This group contains: | |
75 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | |
76 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | |
77 | */ | |
78 | case bcond_op: | |
79 | switch (insn.i_format.rt) { | |
80 | case bltz_op: | |
81 | case bltzl_op: | |
82 | if ((long)arch->gprs[insn.i_format.rs] < 0) | |
83 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
84 | else | |
85 | epc += 8; | |
86 | nextpc = epc; | |
87 | break; | |
88 | ||
89 | case bgez_op: | |
90 | case bgezl_op: | |
91 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | |
92 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
93 | else | |
94 | epc += 8; | |
95 | nextpc = epc; | |
96 | break; | |
97 | ||
98 | case bltzal_op: | |
99 | case bltzall_op: | |
100 | arch->gprs[31] = epc + 8; | |
101 | if ((long)arch->gprs[insn.i_format.rs] < 0) | |
102 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
103 | else | |
104 | epc += 8; | |
105 | nextpc = epc; | |
106 | break; | |
107 | ||
108 | case bgezal_op: | |
109 | case bgezall_op: | |
110 | arch->gprs[31] = epc + 8; | |
111 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | |
112 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
113 | else | |
114 | epc += 8; | |
115 | nextpc = epc; | |
116 | break; | |
117 | case bposge32_op: | |
118 | if (!cpu_has_dsp) | |
119 | goto sigill; | |
120 | ||
121 | dspcontrol = rddsp(0x01); | |
122 | ||
d116e812 | 123 | if (dspcontrol >= 32) |
e685c689 | 124 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
d116e812 | 125 | else |
e685c689 SL |
126 | epc += 8; |
127 | nextpc = epc; | |
128 | break; | |
129 | } | |
130 | break; | |
131 | ||
d116e812 | 132 | /* These are unconditional and in j_format. */ |
e685c689 SL |
133 | case jal_op: |
134 | arch->gprs[31] = instpc + 8; | |
135 | case j_op: | |
136 | epc += 4; | |
137 | epc >>= 28; | |
138 | epc <<= 28; | |
139 | epc |= (insn.j_format.target << 2); | |
140 | nextpc = epc; | |
141 | break; | |
142 | ||
d116e812 | 143 | /* These are conditional and in i_format. */ |
e685c689 SL |
144 | case beq_op: |
145 | case beql_op: | |
146 | if (arch->gprs[insn.i_format.rs] == | |
147 | arch->gprs[insn.i_format.rt]) | |
148 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
149 | else | |
150 | epc += 8; | |
151 | nextpc = epc; | |
152 | break; | |
153 | ||
154 | case bne_op: | |
155 | case bnel_op: | |
156 | if (arch->gprs[insn.i_format.rs] != | |
157 | arch->gprs[insn.i_format.rt]) | |
158 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
159 | else | |
160 | epc += 8; | |
161 | nextpc = epc; | |
162 | break; | |
163 | ||
164 | case blez_op: /* not really i_format */ | |
165 | case blezl_op: | |
166 | /* rt field assumed to be zero */ | |
167 | if ((long)arch->gprs[insn.i_format.rs] <= 0) | |
168 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
169 | else | |
170 | epc += 8; | |
171 | nextpc = epc; | |
172 | break; | |
173 | ||
174 | case bgtz_op: | |
175 | case bgtzl_op: | |
176 | /* rt field assumed to be zero */ | |
177 | if ((long)arch->gprs[insn.i_format.rs] > 0) | |
178 | epc = epc + 4 + (insn.i_format.simmediate << 2); | |
179 | else | |
180 | epc += 8; | |
181 | nextpc = epc; | |
182 | break; | |
183 | ||
d116e812 | 184 | /* And now the FPA/cp1 branch instructions. */ |
e685c689 | 185 | case cop1_op: |
6ad78a5c | 186 | kvm_err("%s: unsupported cop1_op\n", __func__); |
e685c689 SL |
187 | break; |
188 | } | |
189 | ||
190 | return nextpc; | |
191 | ||
192 | unaligned: | |
6ad78a5c | 193 | kvm_err("%s: unaligned epc\n", __func__); |
e685c689 SL |
194 | return nextpc; |
195 | ||
196 | sigill: | |
6ad78a5c | 197 | kvm_err("%s: DSP branch but not DSP ASE\n", __func__); |
e685c689 SL |
198 | return nextpc; |
199 | } | |
200 | ||
201 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) | |
202 | { | |
203 | unsigned long branch_pc; | |
204 | enum emulation_result er = EMULATE_DONE; | |
205 | ||
206 | if (cause & CAUSEF_BD) { | |
207 | branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); | |
208 | if (branch_pc == KVM_INVALID_INST) { | |
209 | er = EMULATE_FAIL; | |
210 | } else { | |
211 | vcpu->arch.pc = branch_pc; | |
d116e812 DCZ |
212 | kvm_debug("BD update_pc(): New PC: %#lx\n", |
213 | vcpu->arch.pc); | |
e685c689 SL |
214 | } |
215 | } else | |
216 | vcpu->arch.pc += 4; | |
217 | ||
218 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); | |
219 | ||
220 | return er; | |
221 | } | |
222 | ||
e30492bb JH |
223 | /** |
224 | * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. | |
225 | * @vcpu: Virtual CPU. | |
e685c689 | 226 | * |
f8239342 JH |
227 | * Returns: 1 if the CP0_Count timer is disabled by either the guest |
228 | * CP0_Cause.DC bit or the count_ctl.DC bit. | |
e30492bb | 229 | * 0 otherwise (in which case CP0_Count timer is running). |
e685c689 | 230 | */ |
e30492bb | 231 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) |
e685c689 SL |
232 | { |
233 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
d116e812 | 234 | |
f8239342 JH |
235 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || |
236 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); | |
e30492bb | 237 | } |
e685c689 | 238 | |
e30492bb JH |
239 | /** |
240 | * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. | |
241 | * | |
242 | * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. | |
243 | * | |
244 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
245 | */ | |
246 | static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) | |
247 | { | |
248 | s64 now_ns, periods; | |
249 | u64 delta; | |
250 | ||
251 | now_ns = ktime_to_ns(now); | |
252 | delta = now_ns + vcpu->arch.count_dyn_bias; | |
253 | ||
254 | if (delta >= vcpu->arch.count_period) { | |
255 | /* If delta is out of safe range the bias needs adjusting */ | |
256 | periods = div64_s64(now_ns, vcpu->arch.count_period); | |
257 | vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; | |
258 | /* Recalculate delta with new bias */ | |
259 | delta = now_ns + vcpu->arch.count_dyn_bias; | |
e685c689 SL |
260 | } |
261 | ||
e30492bb JH |
262 | /* |
263 | * We've ensured that: | |
264 | * delta < count_period | |
265 | * | |
266 | * Therefore the intermediate delta*count_hz will never overflow since | |
267 | * at the boundary condition: | |
268 | * delta = count_period | |
269 | * delta = NSEC_PER_SEC * 2^32 / count_hz | |
270 | * delta * count_hz = NSEC_PER_SEC * 2^32 | |
271 | */ | |
272 | return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); | |
273 | } | |
274 | ||
f8239342 JH |
275 | /** |
276 | * kvm_mips_count_time() - Get effective current time. | |
277 | * @vcpu: Virtual CPU. | |
278 | * | |
279 | * Get effective monotonic ktime. This is usually a straightforward ktime_get(), | |
280 | * except when the master disable bit is set in count_ctl, in which case it is | |
281 | * count_resume, i.e. the time that the count was disabled. | |
282 | * | |
283 | * Returns: Effective monotonic ktime for CP0_Count. | |
284 | */ | |
285 | static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) | |
286 | { | |
287 | if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) | |
288 | return vcpu->arch.count_resume; | |
289 | ||
290 | return ktime_get(); | |
291 | } | |
292 | ||
e30492bb JH |
293 | /** |
294 | * kvm_mips_read_count_running() - Read the current count value as if running. | |
295 | * @vcpu: Virtual CPU. | |
296 | * @now: Kernel time to read CP0_Count at. | |
297 | * | |
298 | * Returns the current guest CP0_Count register at time @now and handles if the | |
299 | * timer interrupt is pending and hasn't been handled yet. | |
300 | * | |
301 | * Returns: The current value of the guest CP0_Count register. | |
302 | */ | |
303 | static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) | |
304 | { | |
305 | ktime_t expires; | |
306 | int running; | |
307 | ||
308 | /* Is the hrtimer pending? */ | |
309 | expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); | |
310 | if (ktime_compare(now, expires) >= 0) { | |
311 | /* | |
312 | * Cancel it while we handle it so there's no chance of | |
313 | * interference with the timeout handler. | |
314 | */ | |
315 | running = hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
316 | ||
317 | /* Nothing should be waiting on the timeout */ | |
318 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
319 | ||
320 | /* | |
321 | * Restart the timer if it was running based on the expiry time | |
322 | * we read, so that we don't push it back 2 periods. | |
323 | */ | |
324 | if (running) { | |
325 | expires = ktime_add_ns(expires, | |
326 | vcpu->arch.count_period); | |
327 | hrtimer_start(&vcpu->arch.comparecount_timer, expires, | |
328 | HRTIMER_MODE_ABS); | |
329 | } | |
330 | } | |
331 | ||
332 | /* Return the biased and scaled guest CP0_Count */ | |
333 | return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); | |
334 | } | |
335 | ||
336 | /** | |
337 | * kvm_mips_read_count() - Read the current count value. | |
338 | * @vcpu: Virtual CPU. | |
339 | * | |
340 | * Read the current guest CP0_Count value, taking into account whether the timer | |
341 | * is stopped. | |
342 | * | |
343 | * Returns: The current guest CP0_Count value. | |
344 | */ | |
345 | uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu) | |
346 | { | |
347 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
348 | ||
349 | /* If count disabled just read static copy of count */ | |
350 | if (kvm_mips_count_disabled(vcpu)) | |
351 | return kvm_read_c0_guest_count(cop0); | |
352 | ||
353 | return kvm_mips_read_count_running(vcpu, ktime_get()); | |
354 | } | |
355 | ||
356 | /** | |
357 | * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. | |
358 | * @vcpu: Virtual CPU. | |
359 | * @count: Output pointer for CP0_Count value at point of freeze. | |
360 | * | |
361 | * Freeze the hrtimer safely and return both the ktime and the CP0_Count value | |
362 | * at the point it was frozen. It is guaranteed that any pending interrupts at | |
363 | * the point it was frozen are handled, and none after that point. | |
364 | * | |
365 | * This is useful where the time/CP0_Count is needed in the calculation of the | |
366 | * new parameters. | |
367 | * | |
368 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
369 | * | |
370 | * Returns: The ktime at the point of freeze. | |
371 | */ | |
372 | static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, | |
373 | uint32_t *count) | |
374 | { | |
375 | ktime_t now; | |
376 | ||
377 | /* stop hrtimer before finding time */ | |
378 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
379 | now = ktime_get(); | |
380 | ||
381 | /* find count at this point and handle pending hrtimer */ | |
382 | *count = kvm_mips_read_count_running(vcpu, now); | |
383 | ||
384 | return now; | |
385 | } | |
386 | ||
e30492bb JH |
387 | /** |
388 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. | |
389 | * @vcpu: Virtual CPU. | |
390 | * @now: ktime at point of resume. | |
391 | * @count: CP0_Count at point of resume. | |
392 | * | |
393 | * Resumes the timer and updates the timer expiry based on @now and @count. | |
394 | * This can be used in conjunction with kvm_mips_freeze_timer() when timer | |
395 | * parameters need to be changed. | |
396 | * | |
397 | * It is guaranteed that a timer interrupt immediately after resume will be | |
398 | * handled, but not if CP_Compare is exactly at @count. That case is already | |
399 | * handled by kvm_mips_freeze_timer(). | |
400 | * | |
401 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
402 | */ | |
403 | static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, | |
404 | ktime_t now, uint32_t count) | |
405 | { | |
406 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
407 | uint32_t compare; | |
408 | u64 delta; | |
409 | ktime_t expire; | |
410 | ||
411 | /* Calculate timeout (wrap 0 to 2^32) */ | |
412 | compare = kvm_read_c0_guest_compare(cop0); | |
413 | delta = (u64)(uint32_t)(compare - count - 1) + 1; | |
414 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); | |
415 | expire = ktime_add_ns(now, delta); | |
416 | ||
417 | /* Update hrtimer to use new timeout */ | |
418 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
419 | hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); | |
420 | } | |
421 | ||
422 | /** | |
423 | * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer. | |
424 | * @vcpu: Virtual CPU. | |
425 | * | |
426 | * Recalculates and updates the expiry time of the hrtimer. This can be used | |
427 | * after timer parameters have been altered which do not depend on the time that | |
428 | * the change occurs (in those cases kvm_mips_freeze_hrtimer() and | |
429 | * kvm_mips_resume_hrtimer() are used directly). | |
430 | * | |
431 | * It is guaranteed that no timer interrupts will be lost in the process. | |
432 | * | |
433 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | |
434 | */ | |
435 | static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu) | |
436 | { | |
437 | ktime_t now; | |
438 | uint32_t count; | |
439 | ||
440 | /* | |
441 | * freeze_hrtimer takes care of a timer interrupts <= count, and | |
442 | * resume_hrtimer the hrtimer takes care of a timer interrupts > count. | |
443 | */ | |
444 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | |
445 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
446 | } | |
447 | ||
448 | /** | |
449 | * kvm_mips_write_count() - Modify the count and update timer. | |
450 | * @vcpu: Virtual CPU. | |
451 | * @count: Guest CP0_Count value to set. | |
452 | * | |
453 | * Sets the CP0_Count value and updates the timer accordingly. | |
454 | */ | |
455 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count) | |
456 | { | |
457 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
458 | ktime_t now; | |
459 | ||
460 | /* Calculate bias */ | |
f8239342 | 461 | now = kvm_mips_count_time(vcpu); |
e30492bb JH |
462 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); |
463 | ||
464 | if (kvm_mips_count_disabled(vcpu)) | |
465 | /* The timer's disabled, adjust the static count */ | |
466 | kvm_write_c0_guest_count(cop0, count); | |
467 | else | |
468 | /* Update timeout */ | |
469 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
470 | } | |
471 | ||
472 | /** | |
473 | * kvm_mips_init_count() - Initialise timer. | |
474 | * @vcpu: Virtual CPU. | |
475 | * | |
476 | * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set | |
477 | * it going if it's enabled. | |
478 | */ | |
479 | void kvm_mips_init_count(struct kvm_vcpu *vcpu) | |
480 | { | |
481 | /* 100 MHz */ | |
482 | vcpu->arch.count_hz = 100*1000*1000; | |
483 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, | |
484 | vcpu->arch.count_hz); | |
485 | vcpu->arch.count_dyn_bias = 0; | |
486 | ||
487 | /* Starting at 0 */ | |
488 | kvm_mips_write_count(vcpu, 0); | |
489 | } | |
490 | ||
f74a8e22 JH |
491 | /** |
492 | * kvm_mips_set_count_hz() - Update the frequency of the timer. | |
493 | * @vcpu: Virtual CPU. | |
494 | * @count_hz: Frequency of CP0_Count timer in Hz. | |
495 | * | |
496 | * Change the frequency of the CP0_Count timer. This is done atomically so that | |
497 | * CP0_Count is continuous and no timer interrupt is lost. | |
498 | * | |
499 | * Returns: -EINVAL if @count_hz is out of range. | |
500 | * 0 on success. | |
501 | */ | |
502 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) | |
503 | { | |
504 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
505 | int dc; | |
506 | ktime_t now; | |
507 | u32 count; | |
508 | ||
509 | /* ensure the frequency is in a sensible range... */ | |
510 | if (count_hz <= 0 || count_hz > NSEC_PER_SEC) | |
511 | return -EINVAL; | |
512 | /* ... and has actually changed */ | |
513 | if (vcpu->arch.count_hz == count_hz) | |
514 | return 0; | |
515 | ||
516 | /* Safely freeze timer so we can keep it continuous */ | |
517 | dc = kvm_mips_count_disabled(vcpu); | |
518 | if (dc) { | |
519 | now = kvm_mips_count_time(vcpu); | |
520 | count = kvm_read_c0_guest_count(cop0); | |
521 | } else { | |
522 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | |
523 | } | |
524 | ||
525 | /* Update the frequency */ | |
526 | vcpu->arch.count_hz = count_hz; | |
527 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); | |
528 | vcpu->arch.count_dyn_bias = 0; | |
529 | ||
530 | /* Calculate adjusted bias so dynamic count is unchanged */ | |
531 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); | |
532 | ||
533 | /* Update and resume hrtimer */ | |
534 | if (!dc) | |
535 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
536 | return 0; | |
537 | } | |
538 | ||
e30492bb JH |
539 | /** |
540 | * kvm_mips_write_compare() - Modify compare and update timer. | |
541 | * @vcpu: Virtual CPU. | |
542 | * @compare: New CP0_Compare value. | |
543 | * | |
544 | * Update CP0_Compare to a new value and update the timeout. | |
545 | */ | |
546 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) | |
547 | { | |
548 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
549 | ||
550 | /* if unchanged, must just be an ack */ | |
551 | if (kvm_read_c0_guest_compare(cop0) == compare) | |
552 | return; | |
553 | ||
554 | /* Update compare */ | |
555 | kvm_write_c0_guest_compare(cop0, compare); | |
556 | ||
557 | /* Update timeout if count enabled */ | |
558 | if (!kvm_mips_count_disabled(vcpu)) | |
559 | kvm_mips_update_hrtimer(vcpu); | |
560 | } | |
561 | ||
562 | /** | |
563 | * kvm_mips_count_disable() - Disable count. | |
564 | * @vcpu: Virtual CPU. | |
565 | * | |
566 | * Disable the CP0_Count timer. A timer interrupt on or before the final stop | |
567 | * time will be handled but not after. | |
568 | * | |
f8239342 JH |
569 | * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or |
570 | * count_ctl.DC has been set (count disabled). | |
e30492bb JH |
571 | * |
572 | * Returns: The time that the timer was stopped. | |
573 | */ | |
574 | static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) | |
575 | { | |
576 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
577 | uint32_t count; | |
578 | ktime_t now; | |
579 | ||
580 | /* Stop hrtimer */ | |
581 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
582 | ||
583 | /* Set the static count from the dynamic count, handling pending TI */ | |
584 | now = ktime_get(); | |
585 | count = kvm_mips_read_count_running(vcpu, now); | |
586 | kvm_write_c0_guest_count(cop0, count); | |
587 | ||
588 | return now; | |
589 | } | |
590 | ||
591 | /** | |
592 | * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. | |
593 | * @vcpu: Virtual CPU. | |
594 | * | |
595 | * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or | |
f8239342 JH |
596 | * before the final stop time will be handled if the timer isn't disabled by |
597 | * count_ctl.DC, but not after. | |
e30492bb JH |
598 | * |
599 | * Assumes CP0_Cause.DC is clear (count enabled). | |
600 | */ | |
601 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) | |
602 | { | |
603 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
604 | ||
605 | kvm_set_c0_guest_cause(cop0, CAUSEF_DC); | |
f8239342 JH |
606 | if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) |
607 | kvm_mips_count_disable(vcpu); | |
e30492bb JH |
608 | } |
609 | ||
610 | /** | |
611 | * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. | |
612 | * @vcpu: Virtual CPU. | |
613 | * | |
614 | * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after | |
f8239342 JH |
615 | * the start time will be handled if the timer isn't disabled by count_ctl.DC, |
616 | * potentially before even returning, so the caller should be careful with | |
617 | * ordering of CP0_Cause modifications so as not to lose it. | |
e30492bb JH |
618 | * |
619 | * Assumes CP0_Cause.DC is set (count disabled). | |
620 | */ | |
621 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) | |
622 | { | |
623 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
624 | uint32_t count; | |
625 | ||
626 | kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); | |
627 | ||
628 | /* | |
629 | * Set the dynamic count to match the static count. | |
f8239342 JH |
630 | * This starts the hrtimer if count_ctl.DC allows it. |
631 | * Otherwise it conveniently updates the biases. | |
e30492bb JH |
632 | */ |
633 | count = kvm_read_c0_guest_count(cop0); | |
634 | kvm_mips_write_count(vcpu, count); | |
635 | } | |
636 | ||
f8239342 JH |
637 | /** |
638 | * kvm_mips_set_count_ctl() - Update the count control KVM register. | |
639 | * @vcpu: Virtual CPU. | |
640 | * @count_ctl: Count control register new value. | |
641 | * | |
642 | * Set the count control KVM register. The timer is updated accordingly. | |
643 | * | |
644 | * Returns: -EINVAL if reserved bits are set. | |
645 | * 0 on success. | |
646 | */ | |
647 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) | |
648 | { | |
649 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
650 | s64 changed = count_ctl ^ vcpu->arch.count_ctl; | |
651 | s64 delta; | |
652 | ktime_t expire, now; | |
653 | uint32_t count, compare; | |
654 | ||
655 | /* Only allow defined bits to be changed */ | |
656 | if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) | |
657 | return -EINVAL; | |
658 | ||
659 | /* Apply new value */ | |
660 | vcpu->arch.count_ctl = count_ctl; | |
661 | ||
662 | /* Master CP0_Count disable */ | |
663 | if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { | |
664 | /* Is CP0_Cause.DC already disabling CP0_Count? */ | |
665 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { | |
666 | if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) | |
667 | /* Just record the current time */ | |
668 | vcpu->arch.count_resume = ktime_get(); | |
669 | } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { | |
670 | /* disable timer and record current time */ | |
671 | vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); | |
672 | } else { | |
673 | /* | |
674 | * Calculate timeout relative to static count at resume | |
675 | * time (wrap 0 to 2^32). | |
676 | */ | |
677 | count = kvm_read_c0_guest_count(cop0); | |
678 | compare = kvm_read_c0_guest_compare(cop0); | |
679 | delta = (u64)(uint32_t)(compare - count - 1) + 1; | |
680 | delta = div_u64(delta * NSEC_PER_SEC, | |
681 | vcpu->arch.count_hz); | |
682 | expire = ktime_add_ns(vcpu->arch.count_resume, delta); | |
683 | ||
684 | /* Handle pending interrupt */ | |
685 | now = ktime_get(); | |
686 | if (ktime_compare(now, expire) >= 0) | |
687 | /* Nothing should be waiting on the timeout */ | |
688 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
689 | ||
690 | /* Resume hrtimer without changing bias */ | |
691 | count = kvm_mips_read_count_running(vcpu, now); | |
692 | kvm_mips_resume_hrtimer(vcpu, now, count); | |
693 | } | |
694 | } | |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
699 | /** | |
700 | * kvm_mips_set_count_resume() - Update the count resume KVM register. | |
701 | * @vcpu: Virtual CPU. | |
702 | * @count_resume: Count resume register new value. | |
703 | * | |
704 | * Set the count resume KVM register. | |
705 | * | |
706 | * Returns: -EINVAL if out of valid range (0..now). | |
707 | * 0 on success. | |
708 | */ | |
709 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) | |
710 | { | |
711 | /* | |
712 | * It doesn't make sense for the resume time to be in the future, as it | |
713 | * would be possible for the next interrupt to be more than a full | |
714 | * period in the future. | |
715 | */ | |
716 | if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) | |
717 | return -EINVAL; | |
718 | ||
719 | vcpu->arch.count_resume = ns_to_ktime(count_resume); | |
720 | return 0; | |
721 | } | |
722 | ||
e30492bb JH |
723 | /** |
724 | * kvm_mips_count_timeout() - Push timer forward on timeout. | |
725 | * @vcpu: Virtual CPU. | |
726 | * | |
727 | * Handle an hrtimer event by push the hrtimer forward a period. | |
728 | * | |
729 | * Returns: The hrtimer_restart value to return to the hrtimer subsystem. | |
730 | */ | |
731 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) | |
732 | { | |
733 | /* Add the Count period to the current expiry time */ | |
734 | hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, | |
735 | vcpu->arch.count_period); | |
736 | return HRTIMER_RESTART; | |
e685c689 SL |
737 | } |
738 | ||
739 | enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |
740 | { | |
741 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
742 | enum emulation_result er = EMULATE_DONE; | |
743 | ||
744 | if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { | |
745 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, | |
746 | kvm_read_c0_guest_epc(cop0)); | |
747 | kvm_clear_c0_guest_status(cop0, ST0_EXL); | |
748 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); | |
749 | ||
750 | } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { | |
751 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | |
752 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | |
753 | } else { | |
6ad78a5c DCZ |
754 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", |
755 | vcpu->arch.pc); | |
e685c689 SL |
756 | er = EMULATE_FAIL; |
757 | } | |
758 | ||
759 | return er; | |
760 | } | |
761 | ||
762 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | |
763 | { | |
e685c689 SL |
764 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, |
765 | vcpu->arch.pending_exceptions); | |
766 | ||
767 | ++vcpu->stat.wait_exits; | |
768 | trace_kvm_exit(vcpu, WAIT_EXITS); | |
769 | if (!vcpu->arch.pending_exceptions) { | |
770 | vcpu->arch.wait = 1; | |
771 | kvm_vcpu_block(vcpu); | |
772 | ||
d116e812 DCZ |
773 | /* |
774 | * We we are runnable, then definitely go off to user space to | |
775 | * check if any I/O interrupts are pending. | |
e685c689 SL |
776 | */ |
777 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | |
778 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | |
779 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | |
780 | } | |
781 | } | |
782 | ||
d98403a5 | 783 | return EMULATE_DONE; |
e685c689 SL |
784 | } |
785 | ||
d116e812 DCZ |
786 | /* |
787 | * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that | |
788 | * we can catch this, if things ever change | |
e685c689 SL |
789 | */ |
790 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) | |
791 | { | |
792 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
e685c689 SL |
793 | uint32_t pc = vcpu->arch.pc; |
794 | ||
6ad78a5c | 795 | kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); |
d98403a5 | 796 | return EMULATE_FAIL; |
e685c689 SL |
797 | } |
798 | ||
799 | /* Write Guest TLB Entry @ Index */ | |
800 | enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) | |
801 | { | |
802 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
803 | int index = kvm_read_c0_guest_index(cop0); | |
e685c689 SL |
804 | struct kvm_mips_tlb *tlb = NULL; |
805 | uint32_t pc = vcpu->arch.pc; | |
806 | ||
807 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | |
6ad78a5c DCZ |
808 | kvm_debug("%s: illegal index: %d\n", __func__, index); |
809 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | |
810 | pc, index, kvm_read_c0_guest_entryhi(cop0), | |
811 | kvm_read_c0_guest_entrylo0(cop0), | |
812 | kvm_read_c0_guest_entrylo1(cop0), | |
813 | kvm_read_c0_guest_pagemask(cop0)); | |
e685c689 SL |
814 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; |
815 | } | |
816 | ||
817 | tlb = &vcpu->arch.guest_tlb[index]; | |
d116e812 DCZ |
818 | /* |
819 | * Probe the shadow host TLB for the entry being overwritten, if one | |
820 | * matches, invalidate it | |
821 | */ | |
e685c689 | 822 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
e685c689 SL |
823 | |
824 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | |
825 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | |
826 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | |
827 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | |
828 | ||
d116e812 DCZ |
829 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
830 | pc, index, kvm_read_c0_guest_entryhi(cop0), | |
831 | kvm_read_c0_guest_entrylo0(cop0), | |
832 | kvm_read_c0_guest_entrylo1(cop0), | |
833 | kvm_read_c0_guest_pagemask(cop0)); | |
e685c689 | 834 | |
d98403a5 | 835 | return EMULATE_DONE; |
e685c689 SL |
836 | } |
837 | ||
838 | /* Write Guest TLB Entry @ Random Index */ | |
839 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) | |
840 | { | |
841 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
e685c689 SL |
842 | struct kvm_mips_tlb *tlb = NULL; |
843 | uint32_t pc = vcpu->arch.pc; | |
844 | int index; | |
845 | ||
e685c689 SL |
846 | get_random_bytes(&index, sizeof(index)); |
847 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); | |
e685c689 | 848 | |
e685c689 SL |
849 | tlb = &vcpu->arch.guest_tlb[index]; |
850 | ||
d116e812 DCZ |
851 | /* |
852 | * Probe the shadow host TLB for the entry being overwritten, if one | |
853 | * matches, invalidate it | |
854 | */ | |
e685c689 | 855 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
e685c689 SL |
856 | |
857 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | |
858 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | |
859 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | |
860 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | |
861 | ||
d116e812 DCZ |
862 | kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", |
863 | pc, index, kvm_read_c0_guest_entryhi(cop0), | |
864 | kvm_read_c0_guest_entrylo0(cop0), | |
865 | kvm_read_c0_guest_entrylo1(cop0)); | |
e685c689 | 866 | |
d98403a5 | 867 | return EMULATE_DONE; |
e685c689 SL |
868 | } |
869 | ||
870 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | |
871 | { | |
872 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
873 | long entryhi = kvm_read_c0_guest_entryhi(cop0); | |
e685c689 SL |
874 | uint32_t pc = vcpu->arch.pc; |
875 | int index = -1; | |
876 | ||
877 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | |
878 | ||
879 | kvm_write_c0_guest_index(cop0, index); | |
880 | ||
881 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, | |
882 | index); | |
883 | ||
d98403a5 | 884 | return EMULATE_DONE; |
e685c689 SL |
885 | } |
886 | ||
c771607a JH |
887 | /** |
888 | * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 | |
889 | * @vcpu: Virtual CPU. | |
890 | * | |
891 | * Finds the mask of bits which are writable in the guest's Config1 CP0 | |
892 | * register, by userland (currently read-only to the guest). | |
893 | */ | |
894 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) | |
895 | { | |
6cdc65e3 JH |
896 | unsigned int mask = 0; |
897 | ||
898 | /* Permit FPU to be present if FPU is supported */ | |
899 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) | |
900 | mask |= MIPS_CONF1_FP; | |
901 | ||
902 | return mask; | |
c771607a JH |
903 | } |
904 | ||
905 | /** | |
906 | * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 | |
907 | * @vcpu: Virtual CPU. | |
908 | * | |
909 | * Finds the mask of bits which are writable in the guest's Config3 CP0 | |
910 | * register, by userland (currently read-only to the guest). | |
911 | */ | |
912 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) | |
913 | { | |
914 | /* Config4 is optional */ | |
2b6009d6 JH |
915 | unsigned int mask = MIPS_CONF_M; |
916 | ||
917 | /* Permit MSA to be present if MSA is supported */ | |
918 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | |
919 | mask |= MIPS_CONF3_MSA; | |
920 | ||
921 | return mask; | |
c771607a JH |
922 | } |
923 | ||
924 | /** | |
925 | * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 | |
926 | * @vcpu: Virtual CPU. | |
927 | * | |
928 | * Finds the mask of bits which are writable in the guest's Config4 CP0 | |
929 | * register, by userland (currently read-only to the guest). | |
930 | */ | |
931 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) | |
932 | { | |
933 | /* Config5 is optional */ | |
934 | return MIPS_CONF_M; | |
935 | } | |
936 | ||
937 | /** | |
938 | * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 | |
939 | * @vcpu: Virtual CPU. | |
940 | * | |
941 | * Finds the mask of bits which are writable in the guest's Config5 CP0 | |
942 | * register, by the guest itself. | |
943 | */ | |
944 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) | |
945 | { | |
6cdc65e3 JH |
946 | unsigned int mask = 0; |
947 | ||
2b6009d6 JH |
948 | /* Permit MSAEn changes if MSA supported and enabled */ |
949 | if (kvm_mips_guest_has_msa(&vcpu->arch)) | |
950 | mask |= MIPS_CONF5_MSAEN; | |
951 | ||
6cdc65e3 JH |
952 | /* |
953 | * Permit guest FPU mode changes if FPU is enabled and the relevant | |
954 | * feature exists according to FIR register. | |
955 | */ | |
956 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | |
957 | if (cpu_has_fre) | |
958 | mask |= MIPS_CONF5_FRE; | |
959 | /* We don't support UFR or UFE */ | |
960 | } | |
961 | ||
962 | return mask; | |
c771607a JH |
963 | } |
964 | ||
d116e812 DCZ |
965 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, |
966 | uint32_t cause, struct kvm_run *run, | |
967 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
968 | { |
969 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
970 | enum emulation_result er = EMULATE_DONE; | |
971 | int32_t rt, rd, copz, sel, co_bit, op; | |
972 | uint32_t pc = vcpu->arch.pc; | |
973 | unsigned long curr_pc; | |
974 | ||
975 | /* | |
976 | * Update PC and hold onto current PC in case there is | |
977 | * an error and we want to rollback the PC | |
978 | */ | |
979 | curr_pc = vcpu->arch.pc; | |
980 | er = update_pc(vcpu, cause); | |
d116e812 | 981 | if (er == EMULATE_FAIL) |
e685c689 | 982 | return er; |
e685c689 SL |
983 | |
984 | copz = (inst >> 21) & 0x1f; | |
985 | rt = (inst >> 16) & 0x1f; | |
986 | rd = (inst >> 11) & 0x1f; | |
987 | sel = inst & 0x7; | |
988 | co_bit = (inst >> 25) & 1; | |
989 | ||
e685c689 SL |
990 | if (co_bit) { |
991 | op = (inst) & 0xff; | |
992 | ||
993 | switch (op) { | |
994 | case tlbr_op: /* Read indexed TLB entry */ | |
995 | er = kvm_mips_emul_tlbr(vcpu); | |
996 | break; | |
997 | case tlbwi_op: /* Write indexed */ | |
998 | er = kvm_mips_emul_tlbwi(vcpu); | |
999 | break; | |
1000 | case tlbwr_op: /* Write random */ | |
1001 | er = kvm_mips_emul_tlbwr(vcpu); | |
1002 | break; | |
1003 | case tlbp_op: /* TLB Probe */ | |
1004 | er = kvm_mips_emul_tlbp(vcpu); | |
1005 | break; | |
1006 | case rfe_op: | |
6ad78a5c | 1007 | kvm_err("!!!COP0_RFE!!!\n"); |
e685c689 SL |
1008 | break; |
1009 | case eret_op: | |
1010 | er = kvm_mips_emul_eret(vcpu); | |
1011 | goto dont_update_pc; | |
1012 | break; | |
1013 | case wait_op: | |
1014 | er = kvm_mips_emul_wait(vcpu); | |
1015 | break; | |
1016 | } | |
1017 | } else { | |
1018 | switch (copz) { | |
1019 | case mfc_op: | |
1020 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | |
1021 | cop0->stat[rd][sel]++; | |
1022 | #endif | |
1023 | /* Get reg */ | |
1024 | if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | |
e30492bb | 1025 | vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); |
e685c689 SL |
1026 | } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { |
1027 | vcpu->arch.gprs[rt] = 0x0; | |
1028 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1029 | kvm_mips_trans_mfc0(inst, opc, vcpu); | |
1030 | #endif | |
d116e812 | 1031 | } else { |
e685c689 SL |
1032 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; |
1033 | ||
1034 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1035 | kvm_mips_trans_mfc0(inst, opc, vcpu); | |
1036 | #endif | |
1037 | } | |
1038 | ||
1039 | kvm_debug | |
1040 | ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", | |
1041 | pc, rd, sel, rt, vcpu->arch.gprs[rt]); | |
1042 | ||
1043 | break; | |
1044 | ||
1045 | case dmfc_op: | |
1046 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | |
1047 | break; | |
1048 | ||
1049 | case mtc_op: | |
1050 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | |
1051 | cop0->stat[rd][sel]++; | |
1052 | #endif | |
1053 | if ((rd == MIPS_CP0_TLB_INDEX) | |
1054 | && (vcpu->arch.gprs[rt] >= | |
1055 | KVM_MIPS_GUEST_TLB_SIZE)) { | |
6ad78a5c DCZ |
1056 | kvm_err("Invalid TLB Index: %ld", |
1057 | vcpu->arch.gprs[rt]); | |
e685c689 SL |
1058 | er = EMULATE_FAIL; |
1059 | break; | |
1060 | } | |
1061 | #define C0_EBASE_CORE_MASK 0xff | |
1062 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { | |
1063 | /* Preserve CORE number */ | |
1064 | kvm_change_c0_guest_ebase(cop0, | |
1065 | ~(C0_EBASE_CORE_MASK), | |
1066 | vcpu->arch.gprs[rt]); | |
6ad78a5c DCZ |
1067 | kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", |
1068 | kvm_read_c0_guest_ebase(cop0)); | |
e685c689 | 1069 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
48c4ac97 | 1070 | uint32_t nasid = |
d116e812 DCZ |
1071 | vcpu->arch.gprs[rt] & ASID_MASK; |
1072 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && | |
48c4ac97 DD |
1073 | ((kvm_read_c0_guest_entryhi(cop0) & |
1074 | ASID_MASK) != nasid)) { | |
d116e812 DCZ |
1075 | kvm_debug("MTCz, change ASID from %#lx to %#lx\n", |
1076 | kvm_read_c0_guest_entryhi(cop0) | |
1077 | & ASID_MASK, | |
1078 | vcpu->arch.gprs[rt] | |
1079 | & ASID_MASK); | |
e685c689 SL |
1080 | |
1081 | /* Blow away the shadow host TLBs */ | |
1082 | kvm_mips_flush_host_tlb(1); | |
1083 | } | |
1084 | kvm_write_c0_guest_entryhi(cop0, | |
1085 | vcpu->arch.gprs[rt]); | |
1086 | } | |
1087 | /* Are we writing to COUNT */ | |
1088 | else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | |
e30492bb | 1089 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); |
e685c689 SL |
1090 | goto done; |
1091 | } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { | |
1092 | kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", | |
1093 | pc, kvm_read_c0_guest_compare(cop0), | |
1094 | vcpu->arch.gprs[rt]); | |
1095 | ||
1096 | /* If we are writing to COMPARE */ | |
1097 | /* Clear pending timer interrupt, if any */ | |
1098 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | |
e30492bb JH |
1099 | kvm_mips_write_compare(vcpu, |
1100 | vcpu->arch.gprs[rt]); | |
e685c689 | 1101 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
6cdc65e3 JH |
1102 | unsigned int old_val, val, change; |
1103 | ||
1104 | old_val = kvm_read_c0_guest_status(cop0); | |
1105 | val = vcpu->arch.gprs[rt]; | |
1106 | change = val ^ old_val; | |
1107 | ||
1108 | /* Make sure that the NMI bit is never set */ | |
1109 | val &= ~ST0_NMI; | |
1110 | ||
1111 | /* | |
1112 | * Don't allow CU1 or FR to be set unless FPU | |
1113 | * capability enabled and exists in guest | |
1114 | * configuration. | |
1115 | */ | |
1116 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
1117 | val &= ~(ST0_CU1 | ST0_FR); | |
1118 | ||
1119 | /* | |
1120 | * Also don't allow FR to be set if host doesn't | |
1121 | * support it. | |
1122 | */ | |
1123 | if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) | |
1124 | val &= ~ST0_FR; | |
1125 | ||
1126 | ||
1127 | /* Handle changes in FPU mode */ | |
1128 | preempt_disable(); | |
1129 | ||
1130 | /* | |
1131 | * FPU and Vector register state is made | |
1132 | * UNPREDICTABLE by a change of FR, so don't | |
1133 | * even bother saving it. | |
1134 | */ | |
1135 | if (change & ST0_FR) | |
1136 | kvm_drop_fpu(vcpu); | |
1137 | ||
2b6009d6 JH |
1138 | /* |
1139 | * If MSA state is already live, it is undefined | |
1140 | * how it interacts with FR=0 FPU state, and we | |
1141 | * don't want to hit reserved instruction | |
1142 | * exceptions trying to save the MSA state later | |
1143 | * when CU=1 && FR=1, so play it safe and save | |
1144 | * it first. | |
1145 | */ | |
1146 | if (change & ST0_CU1 && !(val & ST0_FR) && | |
1147 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | |
1148 | kvm_lose_fpu(vcpu); | |
1149 | ||
d116e812 | 1150 | /* |
6cdc65e3 JH |
1151 | * Propagate CU1 (FPU enable) changes |
1152 | * immediately if the FPU context is already | |
1153 | * loaded. When disabling we leave the context | |
1154 | * loaded so it can be quickly enabled again in | |
1155 | * the near future. | |
d116e812 | 1156 | */ |
6cdc65e3 JH |
1157 | if (change & ST0_CU1 && |
1158 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) | |
1159 | change_c0_status(ST0_CU1, val); | |
1160 | ||
1161 | preempt_enable(); | |
1162 | ||
1163 | kvm_write_c0_guest_status(cop0, val); | |
e685c689 SL |
1164 | |
1165 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
6cdc65e3 JH |
1166 | /* |
1167 | * If FPU present, we need CU1/FR bits to take | |
1168 | * effect fairly soon. | |
1169 | */ | |
1170 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
1171 | kvm_mips_trans_mtc0(inst, opc, vcpu); | |
e685c689 | 1172 | #endif |
6cdc65e3 JH |
1173 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { |
1174 | unsigned int old_val, val, change, wrmask; | |
1175 | ||
1176 | old_val = kvm_read_c0_guest_config5(cop0); | |
1177 | val = vcpu->arch.gprs[rt]; | |
1178 | ||
1179 | /* Only a few bits are writable in Config5 */ | |
1180 | wrmask = kvm_mips_config5_wrmask(vcpu); | |
1181 | change = (val ^ old_val) & wrmask; | |
1182 | val = old_val ^ change; | |
1183 | ||
1184 | ||
2b6009d6 | 1185 | /* Handle changes in FPU/MSA modes */ |
6cdc65e3 JH |
1186 | preempt_disable(); |
1187 | ||
1188 | /* | |
1189 | * Propagate FRE changes immediately if the FPU | |
1190 | * context is already loaded. | |
1191 | */ | |
1192 | if (change & MIPS_CONF5_FRE && | |
1193 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) | |
1194 | change_c0_config5(MIPS_CONF5_FRE, val); | |
1195 | ||
2b6009d6 JH |
1196 | /* |
1197 | * Propagate MSAEn changes immediately if the | |
1198 | * MSA context is already loaded. When disabling | |
1199 | * we leave the context loaded so it can be | |
1200 | * quickly enabled again in the near future. | |
1201 | */ | |
1202 | if (change & MIPS_CONF5_MSAEN && | |
1203 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | |
1204 | change_c0_config5(MIPS_CONF5_MSAEN, | |
1205 | val); | |
1206 | ||
6cdc65e3 JH |
1207 | preempt_enable(); |
1208 | ||
1209 | kvm_write_c0_guest_config5(cop0, val); | |
e30492bb JH |
1210 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
1211 | uint32_t old_cause, new_cause; | |
d116e812 | 1212 | |
e30492bb JH |
1213 | old_cause = kvm_read_c0_guest_cause(cop0); |
1214 | new_cause = vcpu->arch.gprs[rt]; | |
1215 | /* Update R/W bits */ | |
1216 | kvm_change_c0_guest_cause(cop0, 0x08800300, | |
1217 | new_cause); | |
1218 | /* DC bit enabling/disabling timer? */ | |
1219 | if ((old_cause ^ new_cause) & CAUSEF_DC) { | |
1220 | if (new_cause & CAUSEF_DC) | |
1221 | kvm_mips_count_disable_cause(vcpu); | |
1222 | else | |
1223 | kvm_mips_count_enable_cause(vcpu); | |
1224 | } | |
e685c689 SL |
1225 | } else { |
1226 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; | |
1227 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1228 | kvm_mips_trans_mtc0(inst, opc, vcpu); | |
1229 | #endif | |
1230 | } | |
1231 | ||
1232 | kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, | |
1233 | rd, sel, cop0->reg[rd][sel]); | |
1234 | break; | |
1235 | ||
1236 | case dmtc_op: | |
6ad78a5c DCZ |
1237 | kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", |
1238 | vcpu->arch.pc, rt, rd, sel); | |
e685c689 SL |
1239 | er = EMULATE_FAIL; |
1240 | break; | |
1241 | ||
1242 | case mfmcz_op: | |
1243 | #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS | |
1244 | cop0->stat[MIPS_CP0_STATUS][0]++; | |
1245 | #endif | |
1246 | if (rt != 0) { | |
1247 | vcpu->arch.gprs[rt] = | |
1248 | kvm_read_c0_guest_status(cop0); | |
1249 | } | |
1250 | /* EI */ | |
1251 | if (inst & 0x20) { | |
1252 | kvm_debug("[%#lx] mfmcz_op: EI\n", | |
1253 | vcpu->arch.pc); | |
1254 | kvm_set_c0_guest_status(cop0, ST0_IE); | |
1255 | } else { | |
1256 | kvm_debug("[%#lx] mfmcz_op: DI\n", | |
1257 | vcpu->arch.pc); | |
1258 | kvm_clear_c0_guest_status(cop0, ST0_IE); | |
1259 | } | |
1260 | ||
1261 | break; | |
1262 | ||
1263 | case wrpgpr_op: | |
1264 | { | |
1265 | uint32_t css = | |
1266 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; | |
1267 | uint32_t pss = | |
1268 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; | |
d116e812 DCZ |
1269 | /* |
1270 | * We don't support any shadow register sets, so | |
1271 | * SRSCtl[PSS] == SRSCtl[CSS] = 0 | |
1272 | */ | |
e685c689 SL |
1273 | if (css || pss) { |
1274 | er = EMULATE_FAIL; | |
1275 | break; | |
1276 | } | |
1277 | kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, | |
1278 | vcpu->arch.gprs[rt]); | |
1279 | vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; | |
1280 | } | |
1281 | break; | |
1282 | default: | |
6ad78a5c DCZ |
1283 | kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", |
1284 | vcpu->arch.pc, copz); | |
e685c689 SL |
1285 | er = EMULATE_FAIL; |
1286 | break; | |
1287 | } | |
1288 | } | |
1289 | ||
1290 | done: | |
d116e812 DCZ |
1291 | /* Rollback PC only if emulation was unsuccessful */ |
1292 | if (er == EMULATE_FAIL) | |
e685c689 | 1293 | vcpu->arch.pc = curr_pc; |
e685c689 SL |
1294 | |
1295 | dont_update_pc: | |
1296 | /* | |
1297 | * This is for special instructions whose emulation | |
1298 | * updates the PC, so do not overwrite the PC under | |
1299 | * any circumstances | |
1300 | */ | |
1301 | ||
1302 | return er; | |
1303 | } | |
1304 | ||
d116e812 DCZ |
1305 | enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, |
1306 | struct kvm_run *run, | |
1307 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1308 | { |
1309 | enum emulation_result er = EMULATE_DO_MMIO; | |
1310 | int32_t op, base, rt, offset; | |
1311 | uint32_t bytes; | |
1312 | void *data = run->mmio.data; | |
1313 | unsigned long curr_pc; | |
1314 | ||
1315 | /* | |
1316 | * Update PC and hold onto current PC in case there is | |
1317 | * an error and we want to rollback the PC | |
1318 | */ | |
1319 | curr_pc = vcpu->arch.pc; | |
1320 | er = update_pc(vcpu, cause); | |
1321 | if (er == EMULATE_FAIL) | |
1322 | return er; | |
1323 | ||
1324 | rt = (inst >> 16) & 0x1f; | |
1325 | base = (inst >> 21) & 0x1f; | |
1326 | offset = inst & 0xffff; | |
1327 | op = (inst >> 26) & 0x3f; | |
1328 | ||
1329 | switch (op) { | |
1330 | case sb_op: | |
1331 | bytes = 1; | |
1332 | if (bytes > sizeof(run->mmio.data)) { | |
1333 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1334 | run->mmio.len); | |
1335 | } | |
1336 | run->mmio.phys_addr = | |
1337 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1338 | host_cp0_badvaddr); | |
1339 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1340 | er = EMULATE_FAIL; | |
1341 | break; | |
1342 | } | |
1343 | run->mmio.len = bytes; | |
1344 | run->mmio.is_write = 1; | |
1345 | vcpu->mmio_needed = 1; | |
1346 | vcpu->mmio_is_write = 1; | |
1347 | *(u8 *) data = vcpu->arch.gprs[rt]; | |
1348 | kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1349 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], | |
1350 | *(uint8_t *) data); | |
1351 | ||
1352 | break; | |
1353 | ||
1354 | case sw_op: | |
1355 | bytes = 4; | |
1356 | if (bytes > sizeof(run->mmio.data)) { | |
1357 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1358 | run->mmio.len); | |
1359 | } | |
1360 | run->mmio.phys_addr = | |
1361 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1362 | host_cp0_badvaddr); | |
1363 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1364 | er = EMULATE_FAIL; | |
1365 | break; | |
1366 | } | |
1367 | ||
1368 | run->mmio.len = bytes; | |
1369 | run->mmio.is_write = 1; | |
1370 | vcpu->mmio_needed = 1; | |
1371 | vcpu->mmio_is_write = 1; | |
1372 | *(uint32_t *) data = vcpu->arch.gprs[rt]; | |
1373 | ||
1374 | kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1375 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
1376 | vcpu->arch.gprs[rt], *(uint32_t *) data); | |
1377 | break; | |
1378 | ||
1379 | case sh_op: | |
1380 | bytes = 2; | |
1381 | if (bytes > sizeof(run->mmio.data)) { | |
1382 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1383 | run->mmio.len); | |
1384 | } | |
1385 | run->mmio.phys_addr = | |
1386 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1387 | host_cp0_badvaddr); | |
1388 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1389 | er = EMULATE_FAIL; | |
1390 | break; | |
1391 | } | |
1392 | ||
1393 | run->mmio.len = bytes; | |
1394 | run->mmio.is_write = 1; | |
1395 | vcpu->mmio_needed = 1; | |
1396 | vcpu->mmio_is_write = 1; | |
1397 | *(uint16_t *) data = vcpu->arch.gprs[rt]; | |
1398 | ||
1399 | kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", | |
1400 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | |
1401 | vcpu->arch.gprs[rt], *(uint32_t *) data); | |
1402 | break; | |
1403 | ||
1404 | default: | |
6ad78a5c | 1405 | kvm_err("Store not yet supported"); |
e685c689 SL |
1406 | er = EMULATE_FAIL; |
1407 | break; | |
1408 | } | |
1409 | ||
d116e812 DCZ |
1410 | /* Rollback PC if emulation was unsuccessful */ |
1411 | if (er == EMULATE_FAIL) | |
e685c689 | 1412 | vcpu->arch.pc = curr_pc; |
e685c689 SL |
1413 | |
1414 | return er; | |
1415 | } | |
1416 | ||
d116e812 DCZ |
1417 | enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, |
1418 | struct kvm_run *run, | |
1419 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1420 | { |
1421 | enum emulation_result er = EMULATE_DO_MMIO; | |
1422 | int32_t op, base, rt, offset; | |
1423 | uint32_t bytes; | |
1424 | ||
1425 | rt = (inst >> 16) & 0x1f; | |
1426 | base = (inst >> 21) & 0x1f; | |
1427 | offset = inst & 0xffff; | |
1428 | op = (inst >> 26) & 0x3f; | |
1429 | ||
1430 | vcpu->arch.pending_load_cause = cause; | |
1431 | vcpu->arch.io_gpr = rt; | |
1432 | ||
1433 | switch (op) { | |
1434 | case lw_op: | |
1435 | bytes = 4; | |
1436 | if (bytes > sizeof(run->mmio.data)) { | |
1437 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1438 | run->mmio.len); | |
1439 | er = EMULATE_FAIL; | |
1440 | break; | |
1441 | } | |
1442 | run->mmio.phys_addr = | |
1443 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1444 | host_cp0_badvaddr); | |
1445 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1446 | er = EMULATE_FAIL; | |
1447 | break; | |
1448 | } | |
1449 | ||
1450 | run->mmio.len = bytes; | |
1451 | run->mmio.is_write = 0; | |
1452 | vcpu->mmio_needed = 1; | |
1453 | vcpu->mmio_is_write = 0; | |
1454 | break; | |
1455 | ||
1456 | case lh_op: | |
1457 | case lhu_op: | |
1458 | bytes = 2; | |
1459 | if (bytes > sizeof(run->mmio.data)) { | |
1460 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1461 | run->mmio.len); | |
1462 | er = EMULATE_FAIL; | |
1463 | break; | |
1464 | } | |
1465 | run->mmio.phys_addr = | |
1466 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1467 | host_cp0_badvaddr); | |
1468 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1469 | er = EMULATE_FAIL; | |
1470 | break; | |
1471 | } | |
1472 | ||
1473 | run->mmio.len = bytes; | |
1474 | run->mmio.is_write = 0; | |
1475 | vcpu->mmio_needed = 1; | |
1476 | vcpu->mmio_is_write = 0; | |
1477 | ||
1478 | if (op == lh_op) | |
1479 | vcpu->mmio_needed = 2; | |
1480 | else | |
1481 | vcpu->mmio_needed = 1; | |
1482 | ||
1483 | break; | |
1484 | ||
1485 | case lbu_op: | |
1486 | case lb_op: | |
1487 | bytes = 1; | |
1488 | if (bytes > sizeof(run->mmio.data)) { | |
1489 | kvm_err("%s: bad MMIO length: %d\n", __func__, | |
1490 | run->mmio.len); | |
1491 | er = EMULATE_FAIL; | |
1492 | break; | |
1493 | } | |
1494 | run->mmio.phys_addr = | |
1495 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | |
1496 | host_cp0_badvaddr); | |
1497 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | |
1498 | er = EMULATE_FAIL; | |
1499 | break; | |
1500 | } | |
1501 | ||
1502 | run->mmio.len = bytes; | |
1503 | run->mmio.is_write = 0; | |
1504 | vcpu->mmio_is_write = 0; | |
1505 | ||
1506 | if (op == lb_op) | |
1507 | vcpu->mmio_needed = 2; | |
1508 | else | |
1509 | vcpu->mmio_needed = 1; | |
1510 | ||
1511 | break; | |
1512 | ||
1513 | default: | |
6ad78a5c | 1514 | kvm_err("Load not yet supported"); |
e685c689 SL |
1515 | er = EMULATE_FAIL; |
1516 | break; | |
1517 | } | |
1518 | ||
1519 | return er; | |
1520 | } | |
1521 | ||
1522 | int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |
1523 | { | |
1524 | unsigned long offset = (va & ~PAGE_MASK); | |
1525 | struct kvm *kvm = vcpu->kvm; | |
1526 | unsigned long pa; | |
1527 | gfn_t gfn; | |
ba049e93 | 1528 | kvm_pfn_t pfn; |
e685c689 SL |
1529 | |
1530 | gfn = va >> PAGE_SHIFT; | |
1531 | ||
1532 | if (gfn >= kvm->arch.guest_pmap_npages) { | |
6ad78a5c | 1533 | kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); |
e685c689 SL |
1534 | kvm_mips_dump_host_tlbs(); |
1535 | kvm_arch_vcpu_dump_regs(vcpu); | |
1536 | return -1; | |
1537 | } | |
1538 | pfn = kvm->arch.guest_pmap[gfn]; | |
1539 | pa = (pfn << PAGE_SHIFT) | offset; | |
1540 | ||
6ad78a5c DCZ |
1541 | kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va, |
1542 | CKSEG0ADDR(pa)); | |
e685c689 | 1543 | |
facaaec1 | 1544 | local_flush_icache_range(CKSEG0ADDR(pa), 32); |
e685c689 SL |
1545 | return 0; |
1546 | } | |
1547 | ||
1548 | #define MIPS_CACHE_OP_INDEX_INV 0x0 | |
1549 | #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 | |
1550 | #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 | |
1551 | #define MIPS_CACHE_OP_IMP 0x3 | |
1552 | #define MIPS_CACHE_OP_HIT_INV 0x4 | |
1553 | #define MIPS_CACHE_OP_FILL_WB_INV 0x5 | |
1554 | #define MIPS_CACHE_OP_HIT_HB 0x6 | |
1555 | #define MIPS_CACHE_OP_FETCH_LOCK 0x7 | |
1556 | ||
1557 | #define MIPS_CACHE_ICACHE 0x0 | |
1558 | #define MIPS_CACHE_DCACHE 0x1 | |
1559 | #define MIPS_CACHE_SEC 0x3 | |
1560 | ||
d116e812 DCZ |
1561 | enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, |
1562 | uint32_t cause, | |
1563 | struct kvm_run *run, | |
1564 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1565 | { |
1566 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
e685c689 SL |
1567 | enum emulation_result er = EMULATE_DONE; |
1568 | int32_t offset, cache, op_inst, op, base; | |
1569 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1570 | unsigned long va; | |
1571 | unsigned long curr_pc; | |
1572 | ||
1573 | /* | |
1574 | * Update PC and hold onto current PC in case there is | |
1575 | * an error and we want to rollback the PC | |
1576 | */ | |
1577 | curr_pc = vcpu->arch.pc; | |
1578 | er = update_pc(vcpu, cause); | |
1579 | if (er == EMULATE_FAIL) | |
1580 | return er; | |
1581 | ||
1582 | base = (inst >> 21) & 0x1f; | |
1583 | op_inst = (inst >> 16) & 0x1f; | |
c5c2a3b9 | 1584 | offset = (int16_t)inst; |
e685c689 SL |
1585 | cache = (inst >> 16) & 0x3; |
1586 | op = (inst >> 18) & 0x7; | |
1587 | ||
1588 | va = arch->gprs[base] + offset; | |
1589 | ||
1590 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | |
1591 | cache, op, base, arch->gprs[base], offset); | |
1592 | ||
d116e812 DCZ |
1593 | /* |
1594 | * Treat INDEX_INV as a nop, basically issued by Linux on startup to | |
1595 | * invalidate the caches entirely by stepping through all the | |
1596 | * ways/indexes | |
e685c689 SL |
1597 | */ |
1598 | if (op == MIPS_CACHE_OP_INDEX_INV) { | |
d116e812 DCZ |
1599 | kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1600 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, | |
1601 | arch->gprs[base], offset); | |
e685c689 SL |
1602 | |
1603 | if (cache == MIPS_CACHE_DCACHE) | |
1604 | r4k_blast_dcache(); | |
1605 | else if (cache == MIPS_CACHE_ICACHE) | |
1606 | r4k_blast_icache(); | |
1607 | else { | |
6ad78a5c DCZ |
1608 | kvm_err("%s: unsupported CACHE INDEX operation\n", |
1609 | __func__); | |
e685c689 SL |
1610 | return EMULATE_FAIL; |
1611 | } | |
1612 | ||
1613 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1614 | kvm_mips_trans_cache_index(inst, opc, vcpu); | |
1615 | #endif | |
1616 | goto done; | |
1617 | } | |
1618 | ||
1619 | preempt_disable(); | |
1620 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { | |
d116e812 | 1621 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) |
e685c689 | 1622 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); |
e685c689 SL |
1623 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || |
1624 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { | |
1625 | int index; | |
1626 | ||
1627 | /* If an entry already exists then skip */ | |
d116e812 | 1628 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) |
e685c689 | 1629 | goto skip_fault; |
e685c689 | 1630 | |
d116e812 DCZ |
1631 | /* |
1632 | * If address not in the guest TLB, then give the guest a fault, | |
1633 | * the resulting handler will do the right thing | |
e685c689 SL |
1634 | */ |
1635 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | |
48c4ac97 DD |
1636 | (kvm_read_c0_guest_entryhi |
1637 | (cop0) & ASID_MASK)); | |
e685c689 SL |
1638 | |
1639 | if (index < 0) { | |
1640 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); | |
1641 | vcpu->arch.host_cp0_badvaddr = va; | |
1642 | er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, | |
1643 | vcpu); | |
1644 | preempt_enable(); | |
1645 | goto dont_update_pc; | |
1646 | } else { | |
1647 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | |
d116e812 DCZ |
1648 | /* |
1649 | * Check if the entry is valid, if not then setup a TLB | |
1650 | * invalid exception to the guest | |
1651 | */ | |
e685c689 SL |
1652 | if (!TLB_IS_VALID(*tlb, va)) { |
1653 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, | |
1654 | run, vcpu); | |
1655 | preempt_enable(); | |
1656 | goto dont_update_pc; | |
1657 | } else { | |
d116e812 DCZ |
1658 | /* |
1659 | * We fault an entry from the guest tlb to the | |
1660 | * shadow host TLB | |
1661 | */ | |
e685c689 SL |
1662 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, |
1663 | NULL, | |
1664 | NULL); | |
1665 | } | |
1666 | } | |
1667 | } else { | |
6ad78a5c DCZ |
1668 | kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1669 | cache, op, base, arch->gprs[base], offset); | |
e685c689 SL |
1670 | er = EMULATE_FAIL; |
1671 | preempt_enable(); | |
1672 | goto dont_update_pc; | |
1673 | ||
1674 | } | |
1675 | ||
1676 | skip_fault: | |
1677 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ | |
1678 | if (cache == MIPS_CACHE_DCACHE | |
1679 | && (op == MIPS_CACHE_OP_FILL_WB_INV | |
1680 | || op == MIPS_CACHE_OP_HIT_INV)) { | |
1681 | flush_dcache_line(va); | |
1682 | ||
1683 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
d116e812 DCZ |
1684 | /* |
1685 | * Replace the CACHE instruction, with a SYNCI, not the same, | |
1686 | * but avoids a trap | |
1687 | */ | |
e685c689 SL |
1688 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1689 | #endif | |
1690 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { | |
1691 | flush_dcache_line(va); | |
1692 | flush_icache_line(va); | |
1693 | ||
1694 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | |
1695 | /* Replace the CACHE instruction, with a SYNCI */ | |
1696 | kvm_mips_trans_cache_va(inst, opc, vcpu); | |
1697 | #endif | |
1698 | } else { | |
6ad78a5c DCZ |
1699 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1700 | cache, op, base, arch->gprs[base], offset); | |
e685c689 SL |
1701 | er = EMULATE_FAIL; |
1702 | preempt_enable(); | |
1703 | goto dont_update_pc; | |
1704 | } | |
1705 | ||
1706 | preempt_enable(); | |
1707 | ||
d116e812 DCZ |
1708 | dont_update_pc: |
1709 | /* Rollback PC */ | |
e685c689 | 1710 | vcpu->arch.pc = curr_pc; |
d116e812 | 1711 | done: |
e685c689 SL |
1712 | return er; |
1713 | } | |
1714 | ||
d116e812 DCZ |
1715 | enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, |
1716 | struct kvm_run *run, | |
1717 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1718 | { |
1719 | enum emulation_result er = EMULATE_DONE; | |
1720 | uint32_t inst; | |
1721 | ||
d116e812 DCZ |
1722 | /* Fetch the instruction. */ |
1723 | if (cause & CAUSEF_BD) | |
e685c689 | 1724 | opc += 1; |
e685c689 SL |
1725 | |
1726 | inst = kvm_get_inst(opc, vcpu); | |
1727 | ||
1728 | switch (((union mips_instruction)inst).r_format.opcode) { | |
1729 | case cop0_op: | |
1730 | er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); | |
1731 | break; | |
1732 | case sb_op: | |
1733 | case sh_op: | |
1734 | case sw_op: | |
1735 | er = kvm_mips_emulate_store(inst, cause, run, vcpu); | |
1736 | break; | |
1737 | case lb_op: | |
1738 | case lbu_op: | |
1739 | case lhu_op: | |
1740 | case lh_op: | |
1741 | case lw_op: | |
1742 | er = kvm_mips_emulate_load(inst, cause, run, vcpu); | |
1743 | break; | |
1744 | ||
1745 | case cache_op: | |
1746 | ++vcpu->stat.cache_exits; | |
1747 | trace_kvm_exit(vcpu, CACHE_EXITS); | |
1748 | er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); | |
1749 | break; | |
1750 | ||
1751 | default: | |
6ad78a5c DCZ |
1752 | kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, |
1753 | inst); | |
e685c689 SL |
1754 | kvm_arch_vcpu_dump_regs(vcpu); |
1755 | er = EMULATE_FAIL; | |
1756 | break; | |
1757 | } | |
1758 | ||
1759 | return er; | |
1760 | } | |
1761 | ||
d116e812 DCZ |
1762 | enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, |
1763 | uint32_t *opc, | |
1764 | struct kvm_run *run, | |
1765 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1766 | { |
1767 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1768 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
1769 | enum emulation_result er = EMULATE_DONE; | |
1770 | ||
1771 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1772 | /* save old pc */ | |
1773 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1774 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1775 | ||
1776 | if (cause & CAUSEF_BD) | |
1777 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1778 | else | |
1779 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1780 | ||
1781 | kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); | |
1782 | ||
1783 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1784 | (T_SYSCALL << CAUSEB_EXCCODE)); | |
1785 | ||
1786 | /* Set PC to the exception entry point */ | |
1787 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1788 | ||
1789 | } else { | |
6ad78a5c | 1790 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); |
e685c689 SL |
1791 | er = EMULATE_FAIL; |
1792 | } | |
1793 | ||
1794 | return er; | |
1795 | } | |
1796 | ||
d116e812 DCZ |
1797 | enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, |
1798 | uint32_t *opc, | |
1799 | struct kvm_run *run, | |
1800 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1801 | { |
1802 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1803 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 | 1804 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | |
48c4ac97 | 1805 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1806 | |
1807 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1808 | /* save old pc */ | |
1809 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1810 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1811 | ||
1812 | if (cause & CAUSEF_BD) | |
1813 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1814 | else | |
1815 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1816 | ||
1817 | kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", | |
1818 | arch->pc); | |
1819 | ||
1820 | /* set pc to the exception entry point */ | |
1821 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | |
1822 | ||
1823 | } else { | |
1824 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | |
1825 | arch->pc); | |
1826 | ||
1827 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1828 | } | |
1829 | ||
1830 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1831 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); | |
1832 | ||
1833 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1834 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1835 | /* XXXKYMA: is the context register used by linux??? */ | |
1836 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1837 | /* Blow away the shadow host TLBs */ | |
1838 | kvm_mips_flush_host_tlb(1); | |
1839 | ||
d98403a5 | 1840 | return EMULATE_DONE; |
e685c689 SL |
1841 | } |
1842 | ||
d116e812 DCZ |
1843 | enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, |
1844 | uint32_t *opc, | |
1845 | struct kvm_run *run, | |
1846 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1847 | { |
1848 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1849 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 SL |
1850 | unsigned long entryhi = |
1851 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
48c4ac97 | 1852 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1853 | |
1854 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1855 | /* save old pc */ | |
1856 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1857 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1858 | ||
1859 | if (cause & CAUSEF_BD) | |
1860 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1861 | else | |
1862 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1863 | ||
1864 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", | |
1865 | arch->pc); | |
1866 | ||
1867 | /* set pc to the exception entry point */ | |
1868 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1869 | ||
1870 | } else { | |
1871 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | |
1872 | arch->pc); | |
1873 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1874 | } | |
1875 | ||
1876 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1877 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); | |
1878 | ||
1879 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1880 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1881 | /* XXXKYMA: is the context register used by linux??? */ | |
1882 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1883 | /* Blow away the shadow host TLBs */ | |
1884 | kvm_mips_flush_host_tlb(1); | |
1885 | ||
d98403a5 | 1886 | return EMULATE_DONE; |
e685c689 SL |
1887 | } |
1888 | ||
d116e812 DCZ |
1889 | enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, |
1890 | uint32_t *opc, | |
1891 | struct kvm_run *run, | |
1892 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1893 | { |
1894 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1895 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 | 1896 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
48c4ac97 | 1897 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1898 | |
1899 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1900 | /* save old pc */ | |
1901 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1902 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1903 | ||
1904 | if (cause & CAUSEF_BD) | |
1905 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1906 | else | |
1907 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1908 | ||
1909 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | |
1910 | arch->pc); | |
1911 | ||
1912 | /* Set PC to the exception entry point */ | |
1913 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | |
1914 | } else { | |
1915 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | |
1916 | arch->pc); | |
1917 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1918 | } | |
1919 | ||
1920 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1921 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); | |
1922 | ||
1923 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1924 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1925 | /* XXXKYMA: is the context register used by linux??? */ | |
1926 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1927 | /* Blow away the shadow host TLBs */ | |
1928 | kvm_mips_flush_host_tlb(1); | |
1929 | ||
d98403a5 | 1930 | return EMULATE_DONE; |
e685c689 SL |
1931 | } |
1932 | ||
d116e812 DCZ |
1933 | enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, |
1934 | uint32_t *opc, | |
1935 | struct kvm_run *run, | |
1936 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1937 | { |
1938 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1939 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 | 1940 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
48c4ac97 | 1941 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 SL |
1942 | |
1943 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
1944 | /* save old pc */ | |
1945 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
1946 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
1947 | ||
1948 | if (cause & CAUSEF_BD) | |
1949 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
1950 | else | |
1951 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
1952 | ||
1953 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | |
1954 | arch->pc); | |
1955 | ||
1956 | /* Set PC to the exception entry point */ | |
1957 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1958 | } else { | |
1959 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | |
1960 | arch->pc); | |
1961 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
1962 | } | |
1963 | ||
1964 | kvm_change_c0_guest_cause(cop0, (0xff), | |
1965 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); | |
1966 | ||
1967 | /* setup badvaddr, context and entryhi registers for the guest */ | |
1968 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
1969 | /* XXXKYMA: is the context register used by linux??? */ | |
1970 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
1971 | /* Blow away the shadow host TLBs */ | |
1972 | kvm_mips_flush_host_tlb(1); | |
1973 | ||
d98403a5 | 1974 | return EMULATE_DONE; |
e685c689 SL |
1975 | } |
1976 | ||
1977 | /* TLBMOD: store into address matching TLB with Dirty bit off */ | |
d116e812 DCZ |
1978 | enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, |
1979 | struct kvm_run *run, | |
1980 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
1981 | { |
1982 | enum emulation_result er = EMULATE_DONE; | |
e685c689 | 1983 | #ifdef DEBUG |
3d654833 JH |
1984 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1985 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
1986 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | |
1987 | int index; | |
1988 | ||
d116e812 | 1989 | /* If address not in the guest TLB, then we are in trouble */ |
e685c689 SL |
1990 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); |
1991 | if (index < 0) { | |
1992 | /* XXXKYMA Invalidate and retry */ | |
1993 | kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); | |
1994 | kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", | |
1995 | __func__, entryhi); | |
1996 | kvm_mips_dump_guest_tlbs(vcpu); | |
1997 | kvm_mips_dump_host_tlbs(); | |
1998 | return EMULATE_FAIL; | |
1999 | } | |
2000 | #endif | |
2001 | ||
2002 | er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); | |
2003 | return er; | |
2004 | } | |
2005 | ||
d116e812 DCZ |
2006 | enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, |
2007 | uint32_t *opc, | |
2008 | struct kvm_run *run, | |
2009 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2010 | { |
2011 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2012 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | |
48c4ac97 | 2013 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
e685c689 | 2014 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
e685c689 SL |
2015 | |
2016 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2017 | /* save old pc */ | |
2018 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2019 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2020 | ||
2021 | if (cause & CAUSEF_BD) | |
2022 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2023 | else | |
2024 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2025 | ||
2026 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", | |
2027 | arch->pc); | |
2028 | ||
2029 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2030 | } else { | |
2031 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", | |
2032 | arch->pc); | |
2033 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2034 | } | |
2035 | ||
2036 | kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); | |
2037 | ||
2038 | /* setup badvaddr, context and entryhi registers for the guest */ | |
2039 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2040 | /* XXXKYMA: is the context register used by linux??? */ | |
2041 | kvm_write_c0_guest_entryhi(cop0, entryhi); | |
2042 | /* Blow away the shadow host TLBs */ | |
2043 | kvm_mips_flush_host_tlb(1); | |
2044 | ||
d98403a5 | 2045 | return EMULATE_DONE; |
e685c689 SL |
2046 | } |
2047 | ||
d116e812 DCZ |
2048 | enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, |
2049 | uint32_t *opc, | |
2050 | struct kvm_run *run, | |
2051 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2052 | { |
2053 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2054 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
e685c689 SL |
2055 | |
2056 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2057 | /* save old pc */ | |
2058 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2059 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2060 | ||
2061 | if (cause & CAUSEF_BD) | |
2062 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2063 | else | |
2064 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2065 | ||
2066 | } | |
2067 | ||
2068 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2069 | ||
2070 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2071 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); | |
2072 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); | |
2073 | ||
d98403a5 | 2074 | return EMULATE_DONE; |
e685c689 SL |
2075 | } |
2076 | ||
d116e812 DCZ |
2077 | enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, |
2078 | uint32_t *opc, | |
2079 | struct kvm_run *run, | |
2080 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2081 | { |
2082 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2083 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2084 | enum emulation_result er = EMULATE_DONE; | |
2085 | ||
2086 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2087 | /* save old pc */ | |
2088 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2089 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2090 | ||
2091 | if (cause & CAUSEF_BD) | |
2092 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2093 | else | |
2094 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2095 | ||
2096 | kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); | |
2097 | ||
2098 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2099 | (T_RES_INST << CAUSEB_EXCCODE)); | |
2100 | ||
2101 | /* Set PC to the exception entry point */ | |
2102 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2103 | ||
2104 | } else { | |
2105 | kvm_err("Trying to deliver RI when EXL is already set\n"); | |
2106 | er = EMULATE_FAIL; | |
2107 | } | |
2108 | ||
2109 | return er; | |
2110 | } | |
2111 | ||
d116e812 DCZ |
2112 | enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, |
2113 | uint32_t *opc, | |
2114 | struct kvm_run *run, | |
2115 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2116 | { |
2117 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2118 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2119 | enum emulation_result er = EMULATE_DONE; | |
2120 | ||
2121 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2122 | /* save old pc */ | |
2123 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2124 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2125 | ||
2126 | if (cause & CAUSEF_BD) | |
2127 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2128 | else | |
2129 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2130 | ||
2131 | kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); | |
2132 | ||
2133 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2134 | (T_BREAK << CAUSEB_EXCCODE)); | |
2135 | ||
2136 | /* Set PC to the exception entry point */ | |
2137 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2138 | ||
2139 | } else { | |
6ad78a5c | 2140 | kvm_err("Trying to deliver BP when EXL is already set\n"); |
e685c689 SL |
2141 | er = EMULATE_FAIL; |
2142 | } | |
2143 | ||
2144 | return er; | |
2145 | } | |
2146 | ||
0a560427 JH |
2147 | enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, |
2148 | uint32_t *opc, | |
2149 | struct kvm_run *run, | |
2150 | struct kvm_vcpu *vcpu) | |
2151 | { | |
2152 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2153 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2154 | enum emulation_result er = EMULATE_DONE; | |
2155 | ||
2156 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2157 | /* save old pc */ | |
2158 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2159 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2160 | ||
2161 | if (cause & CAUSEF_BD) | |
2162 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2163 | else | |
2164 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2165 | ||
2166 | kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); | |
2167 | ||
2168 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2169 | (T_TRAP << CAUSEB_EXCCODE)); | |
2170 | ||
2171 | /* Set PC to the exception entry point */ | |
2172 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2173 | ||
2174 | } else { | |
2175 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); | |
2176 | er = EMULATE_FAIL; | |
2177 | } | |
2178 | ||
2179 | return er; | |
2180 | } | |
2181 | ||
c2537ed9 JH |
2182 | enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, |
2183 | uint32_t *opc, | |
2184 | struct kvm_run *run, | |
2185 | struct kvm_vcpu *vcpu) | |
2186 | { | |
2187 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2188 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2189 | enum emulation_result er = EMULATE_DONE; | |
2190 | ||
2191 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2192 | /* save old pc */ | |
2193 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2194 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2195 | ||
2196 | if (cause & CAUSEF_BD) | |
2197 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2198 | else | |
2199 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2200 | ||
2201 | kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); | |
2202 | ||
2203 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2204 | (T_MSAFPE << CAUSEB_EXCCODE)); | |
2205 | ||
2206 | /* Set PC to the exception entry point */ | |
2207 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2208 | ||
2209 | } else { | |
2210 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); | |
2211 | er = EMULATE_FAIL; | |
2212 | } | |
2213 | ||
2214 | return er; | |
2215 | } | |
2216 | ||
1c0cd66a JH |
2217 | enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, |
2218 | uint32_t *opc, | |
2219 | struct kvm_run *run, | |
2220 | struct kvm_vcpu *vcpu) | |
2221 | { | |
2222 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2223 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2224 | enum emulation_result er = EMULATE_DONE; | |
2225 | ||
2226 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2227 | /* save old pc */ | |
2228 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2229 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2230 | ||
2231 | if (cause & CAUSEF_BD) | |
2232 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2233 | else | |
2234 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2235 | ||
2236 | kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); | |
2237 | ||
2238 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2239 | (T_FPE << CAUSEB_EXCCODE)); | |
2240 | ||
2241 | /* Set PC to the exception entry point */ | |
2242 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2243 | ||
2244 | } else { | |
2245 | kvm_err("Trying to deliver FPE when EXL is already set\n"); | |
2246 | er = EMULATE_FAIL; | |
2247 | } | |
2248 | ||
2249 | return er; | |
2250 | } | |
2251 | ||
c2537ed9 JH |
2252 | enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, |
2253 | uint32_t *opc, | |
2254 | struct kvm_run *run, | |
2255 | struct kvm_vcpu *vcpu) | |
2256 | { | |
2257 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2258 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2259 | enum emulation_result er = EMULATE_DONE; | |
2260 | ||
2261 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2262 | /* save old pc */ | |
2263 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2264 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2265 | ||
2266 | if (cause & CAUSEF_BD) | |
2267 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2268 | else | |
2269 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2270 | ||
2271 | kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); | |
2272 | ||
2273 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2274 | (T_MSADIS << CAUSEB_EXCCODE)); | |
2275 | ||
2276 | /* Set PC to the exception entry point */ | |
2277 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2278 | ||
2279 | } else { | |
2280 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); | |
2281 | er = EMULATE_FAIL; | |
2282 | } | |
2283 | ||
2284 | return er; | |
2285 | } | |
2286 | ||
d116e812 | 2287 | /* ll/sc, rdhwr, sync emulation */ |
e685c689 SL |
2288 | |
2289 | #define OPCODE 0xfc000000 | |
2290 | #define BASE 0x03e00000 | |
2291 | #define RT 0x001f0000 | |
2292 | #define OFFSET 0x0000ffff | |
2293 | #define LL 0xc0000000 | |
2294 | #define SC 0xe0000000 | |
2295 | #define SPEC0 0x00000000 | |
2296 | #define SPEC3 0x7c000000 | |
2297 | #define RD 0x0000f800 | |
2298 | #define FUNC 0x0000003f | |
2299 | #define SYNC 0x0000000f | |
2300 | #define RDHWR 0x0000003b | |
2301 | ||
d116e812 DCZ |
2302 | enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, |
2303 | struct kvm_run *run, | |
2304 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2305 | { |
2306 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2307 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2308 | enum emulation_result er = EMULATE_DONE; | |
2309 | unsigned long curr_pc; | |
2310 | uint32_t inst; | |
2311 | ||
2312 | /* | |
2313 | * Update PC and hold onto current PC in case there is | |
2314 | * an error and we want to rollback the PC | |
2315 | */ | |
2316 | curr_pc = vcpu->arch.pc; | |
2317 | er = update_pc(vcpu, cause); | |
2318 | if (er == EMULATE_FAIL) | |
2319 | return er; | |
2320 | ||
d116e812 | 2321 | /* Fetch the instruction. */ |
e685c689 SL |
2322 | if (cause & CAUSEF_BD) |
2323 | opc += 1; | |
2324 | ||
2325 | inst = kvm_get_inst(opc, vcpu); | |
2326 | ||
2327 | if (inst == KVM_INVALID_INST) { | |
6ad78a5c | 2328 | kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); |
e685c689 SL |
2329 | return EMULATE_FAIL; |
2330 | } | |
2331 | ||
2332 | if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { | |
26f4f3b5 | 2333 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); |
e685c689 SL |
2334 | int rd = (inst & RD) >> 11; |
2335 | int rt = (inst & RT) >> 16; | |
26f4f3b5 JH |
2336 | /* If usermode, check RDHWR rd is allowed by guest HWREna */ |
2337 | if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { | |
2338 | kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", | |
2339 | rd, opc); | |
2340 | goto emulate_ri; | |
2341 | } | |
e685c689 SL |
2342 | switch (rd) { |
2343 | case 0: /* CPU number */ | |
2344 | arch->gprs[rt] = 0; | |
2345 | break; | |
2346 | case 1: /* SYNCI length */ | |
2347 | arch->gprs[rt] = min(current_cpu_data.dcache.linesz, | |
2348 | current_cpu_data.icache.linesz); | |
2349 | break; | |
2350 | case 2: /* Read count register */ | |
e30492bb | 2351 | arch->gprs[rt] = kvm_mips_read_count(vcpu); |
e685c689 SL |
2352 | break; |
2353 | case 3: /* Count register resolution */ | |
2354 | switch (current_cpu_data.cputype) { | |
2355 | case CPU_20KC: | |
2356 | case CPU_25KF: | |
2357 | arch->gprs[rt] = 1; | |
2358 | break; | |
2359 | default: | |
2360 | arch->gprs[rt] = 2; | |
2361 | } | |
2362 | break; | |
2363 | case 29: | |
e685c689 | 2364 | arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); |
e685c689 SL |
2365 | break; |
2366 | ||
2367 | default: | |
15505679 | 2368 | kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); |
26f4f3b5 | 2369 | goto emulate_ri; |
e685c689 SL |
2370 | } |
2371 | } else { | |
15505679 | 2372 | kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); |
26f4f3b5 | 2373 | goto emulate_ri; |
e685c689 SL |
2374 | } |
2375 | ||
26f4f3b5 JH |
2376 | return EMULATE_DONE; |
2377 | ||
2378 | emulate_ri: | |
e685c689 | 2379 | /* |
26f4f3b5 JH |
2380 | * Rollback PC (if in branch delay slot then the PC already points to |
2381 | * branch target), and pass the RI exception to the guest OS. | |
e685c689 | 2382 | */ |
26f4f3b5 JH |
2383 | vcpu->arch.pc = curr_pc; |
2384 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | |
e685c689 SL |
2385 | } |
2386 | ||
d116e812 DCZ |
2387 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
2388 | struct kvm_run *run) | |
e685c689 SL |
2389 | { |
2390 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; | |
2391 | enum emulation_result er = EMULATE_DONE; | |
e685c689 SL |
2392 | |
2393 | if (run->mmio.len > sizeof(*gpr)) { | |
6ad78a5c | 2394 | kvm_err("Bad MMIO length: %d", run->mmio.len); |
e685c689 SL |
2395 | er = EMULATE_FAIL; |
2396 | goto done; | |
2397 | } | |
2398 | ||
e685c689 SL |
2399 | er = update_pc(vcpu, vcpu->arch.pending_load_cause); |
2400 | if (er == EMULATE_FAIL) | |
2401 | return er; | |
2402 | ||
2403 | switch (run->mmio.len) { | |
2404 | case 4: | |
2405 | *gpr = *(int32_t *) run->mmio.data; | |
2406 | break; | |
2407 | ||
2408 | case 2: | |
2409 | if (vcpu->mmio_needed == 2) | |
2410 | *gpr = *(int16_t *) run->mmio.data; | |
2411 | else | |
ed9244e6 | 2412 | *gpr = *(uint16_t *)run->mmio.data; |
e685c689 SL |
2413 | |
2414 | break; | |
2415 | case 1: | |
2416 | if (vcpu->mmio_needed == 2) | |
2417 | *gpr = *(int8_t *) run->mmio.data; | |
2418 | else | |
2419 | *gpr = *(u8 *) run->mmio.data; | |
2420 | break; | |
2421 | } | |
2422 | ||
2423 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) | |
d116e812 DCZ |
2424 | kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", |
2425 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, | |
2426 | vcpu->mmio_needed); | |
e685c689 SL |
2427 | |
2428 | done: | |
2429 | return er; | |
2430 | } | |
2431 | ||
d116e812 DCZ |
2432 | static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, |
2433 | uint32_t *opc, | |
2434 | struct kvm_run *run, | |
2435 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2436 | { |
2437 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
2438 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
2439 | struct kvm_vcpu_arch *arch = &vcpu->arch; | |
2440 | enum emulation_result er = EMULATE_DONE; | |
2441 | ||
2442 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | |
2443 | /* save old pc */ | |
2444 | kvm_write_c0_guest_epc(cop0, arch->pc); | |
2445 | kvm_set_c0_guest_status(cop0, ST0_EXL); | |
2446 | ||
2447 | if (cause & CAUSEF_BD) | |
2448 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | |
2449 | else | |
2450 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | |
2451 | ||
2452 | kvm_change_c0_guest_cause(cop0, (0xff), | |
2453 | (exccode << CAUSEB_EXCCODE)); | |
2454 | ||
2455 | /* Set PC to the exception entry point */ | |
2456 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | |
2457 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | |
2458 | ||
2459 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", | |
2460 | exccode, kvm_read_c0_guest_epc(cop0), | |
2461 | kvm_read_c0_guest_badvaddr(cop0)); | |
2462 | } else { | |
6ad78a5c | 2463 | kvm_err("Trying to deliver EXC when EXL is already set\n"); |
e685c689 SL |
2464 | er = EMULATE_FAIL; |
2465 | } | |
2466 | ||
2467 | return er; | |
2468 | } | |
2469 | ||
d116e812 DCZ |
2470 | enum emulation_result kvm_mips_check_privilege(unsigned long cause, |
2471 | uint32_t *opc, | |
2472 | struct kvm_run *run, | |
2473 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2474 | { |
2475 | enum emulation_result er = EMULATE_DONE; | |
2476 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
2477 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | |
2478 | ||
2479 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); | |
2480 | ||
2481 | if (usermode) { | |
2482 | switch (exccode) { | |
2483 | case T_INT: | |
2484 | case T_SYSCALL: | |
2485 | case T_BREAK: | |
2486 | case T_RES_INST: | |
0a560427 | 2487 | case T_TRAP: |
c2537ed9 | 2488 | case T_MSAFPE: |
1c0cd66a | 2489 | case T_FPE: |
98119ad5 | 2490 | case T_MSADIS: |
e685c689 SL |
2491 | break; |
2492 | ||
2493 | case T_COP_UNUSABLE: | |
2494 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) | |
2495 | er = EMULATE_PRIV_FAIL; | |
2496 | break; | |
2497 | ||
2498 | case T_TLB_MOD: | |
2499 | break; | |
2500 | ||
2501 | case T_TLB_LD_MISS: | |
d116e812 DCZ |
2502 | /* |
2503 | * We we are accessing Guest kernel space, then send an | |
2504 | * address error exception to the guest | |
2505 | */ | |
e685c689 | 2506 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
6ad78a5c DCZ |
2507 | kvm_debug("%s: LD MISS @ %#lx\n", __func__, |
2508 | badvaddr); | |
e685c689 SL |
2509 | cause &= ~0xff; |
2510 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); | |
2511 | er = EMULATE_PRIV_FAIL; | |
2512 | } | |
2513 | break; | |
2514 | ||
2515 | case T_TLB_ST_MISS: | |
d116e812 DCZ |
2516 | /* |
2517 | * We we are accessing Guest kernel space, then send an | |
2518 | * address error exception to the guest | |
2519 | */ | |
e685c689 | 2520 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
6ad78a5c DCZ |
2521 | kvm_debug("%s: ST MISS @ %#lx\n", __func__, |
2522 | badvaddr); | |
e685c689 SL |
2523 | cause &= ~0xff; |
2524 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); | |
2525 | er = EMULATE_PRIV_FAIL; | |
2526 | } | |
2527 | break; | |
2528 | ||
2529 | case T_ADDR_ERR_ST: | |
6ad78a5c DCZ |
2530 | kvm_debug("%s: address error ST @ %#lx\n", __func__, |
2531 | badvaddr); | |
e685c689 SL |
2532 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2533 | cause &= ~0xff; | |
2534 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); | |
2535 | } | |
2536 | er = EMULATE_PRIV_FAIL; | |
2537 | break; | |
2538 | case T_ADDR_ERR_LD: | |
6ad78a5c DCZ |
2539 | kvm_debug("%s: address error LD @ %#lx\n", __func__, |
2540 | badvaddr); | |
e685c689 SL |
2541 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2542 | cause &= ~0xff; | |
2543 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); | |
2544 | } | |
2545 | er = EMULATE_PRIV_FAIL; | |
2546 | break; | |
2547 | default: | |
2548 | er = EMULATE_PRIV_FAIL; | |
2549 | break; | |
2550 | } | |
2551 | } | |
2552 | ||
d116e812 | 2553 | if (er == EMULATE_PRIV_FAIL) |
e685c689 | 2554 | kvm_mips_emulate_exc(cause, opc, run, vcpu); |
d116e812 | 2555 | |
e685c689 SL |
2556 | return er; |
2557 | } | |
2558 | ||
d116e812 DCZ |
2559 | /* |
2560 | * User Address (UA) fault, this could happen if | |
e685c689 SL |
2561 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this |
2562 | * case we pass on the fault to the guest kernel and let it handle it. | |
2563 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | |
2564 | * case we inject the TLB from the Guest TLB into the shadow host TLB | |
2565 | */ | |
d116e812 DCZ |
2566 | enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, |
2567 | uint32_t *opc, | |
2568 | struct kvm_run *run, | |
2569 | struct kvm_vcpu *vcpu) | |
e685c689 SL |
2570 | { |
2571 | enum emulation_result er = EMULATE_DONE; | |
2572 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
2573 | unsigned long va = vcpu->arch.host_cp0_badvaddr; | |
2574 | int index; | |
2575 | ||
2576 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", | |
2577 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); | |
2578 | ||
d116e812 DCZ |
2579 | /* |
2580 | * KVM would not have got the exception if this entry was valid in the | |
2581 | * shadow host TLB. Check the Guest TLB, if the entry is not there then | |
2582 | * send the guest an exception. The guest exc handler should then inject | |
2583 | * an entry into the guest TLB. | |
e685c689 SL |
2584 | */ |
2585 | index = kvm_mips_guest_tlb_lookup(vcpu, | |
2586 | (va & VPN2_MASK) | | |
48c4ac97 DD |
2587 | (kvm_read_c0_guest_entryhi |
2588 | (vcpu->arch.cop0) & ASID_MASK)); | |
e685c689 SL |
2589 | if (index < 0) { |
2590 | if (exccode == T_TLB_LD_MISS) { | |
2591 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); | |
2592 | } else if (exccode == T_TLB_ST_MISS) { | |
2593 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); | |
2594 | } else { | |
6ad78a5c DCZ |
2595 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2596 | exccode); | |
e685c689 SL |
2597 | er = EMULATE_FAIL; |
2598 | } | |
2599 | } else { | |
2600 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | |
2601 | ||
d116e812 DCZ |
2602 | /* |
2603 | * Check if the entry is valid, if not then setup a TLB invalid | |
2604 | * exception to the guest | |
2605 | */ | |
e685c689 SL |
2606 | if (!TLB_IS_VALID(*tlb, va)) { |
2607 | if (exccode == T_TLB_LD_MISS) { | |
2608 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, | |
2609 | vcpu); | |
2610 | } else if (exccode == T_TLB_ST_MISS) { | |
2611 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, | |
2612 | vcpu); | |
2613 | } else { | |
6ad78a5c DCZ |
2614 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2615 | exccode); | |
e685c689 SL |
2616 | er = EMULATE_FAIL; |
2617 | } | |
2618 | } else { | |
d116e812 DCZ |
2619 | kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", |
2620 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); | |
2621 | /* | |
2622 | * OK we have a Guest TLB entry, now inject it into the | |
2623 | * shadow host TLB | |
2624 | */ | |
e685c689 SL |
2625 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, |
2626 | NULL); | |
2627 | } | |
2628 | } | |
2629 | ||
2630 | return er; | |
2631 | } |