MIPS: KVM: Convert code to kernel sized types
[deliverable/linux.git] / arch / mips / kvm / tlb.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
8 *
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
11 */
12
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/delay.h>
17 #include <linux/module.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
20
21 #include <asm/cpu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlb.h>
27
28 #undef CONFIG_MIPS_MT
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
31
32 #define KVM_GUEST_PC_TLB 0
33 #define KVM_GUEST_SP_TLB 1
34
35 atomic_t kvm_mips_instance;
36 EXPORT_SYMBOL_GPL(kvm_mips_instance);
37
38 /* These function pointers are initialized once the KVM module is loaded */
39 kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
40 EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
41
42 void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
43 EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
44
45 bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
46 EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
47
48 u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
49 {
50 int cpu = smp_processor_id();
51
52 return vcpu->arch.guest_kernel_asid[cpu] &
53 cpu_asid_mask(&cpu_data[cpu]);
54 }
55
56 u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
57 {
58 int cpu = smp_processor_id();
59
60 return vcpu->arch.guest_user_asid[cpu] &
61 cpu_asid_mask(&cpu_data[cpu]);
62 }
63
64 inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
65 {
66 return vcpu->kvm->arch.commpage_tlb;
67 }
68
69 /* Structure defining an tlb entry data set. */
70
71 void kvm_mips_dump_host_tlbs(void)
72 {
73 unsigned long old_entryhi;
74 unsigned long old_pagemask;
75 struct kvm_mips_tlb tlb;
76 unsigned long flags;
77 int i;
78
79 local_irq_save(flags);
80
81 old_entryhi = read_c0_entryhi();
82 old_pagemask = read_c0_pagemask();
83
84 kvm_info("HOST TLBs:\n");
85 kvm_info("ASID: %#lx\n", read_c0_entryhi() &
86 cpu_asid_mask(&current_cpu_data));
87
88 for (i = 0; i < current_cpu_data.tlbsize; i++) {
89 write_c0_index(i);
90 mtc0_tlbw_hazard();
91
92 tlb_read();
93 tlbw_use_hazard();
94
95 tlb.tlb_hi = read_c0_entryhi();
96 tlb.tlb_lo0 = read_c0_entrylo0();
97 tlb.tlb_lo1 = read_c0_entrylo1();
98 tlb.tlb_mask = read_c0_pagemask();
99
100 kvm_info("TLB%c%3d Hi 0x%08lx ",
101 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
102 i, tlb.tlb_hi);
103 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
104 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
105 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo0 >> 3) & 7);
108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
109 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
110 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
111 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
112 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
113 }
114 write_c0_entryhi(old_entryhi);
115 write_c0_pagemask(old_pagemask);
116 mtc0_tlbw_hazard();
117 local_irq_restore(flags);
118 }
119 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
120
121 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122 {
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
125 int i;
126
127 kvm_info("Guest TLBs:\n");
128 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
132 kvm_info("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 i, tlb.tlb_hi);
135 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
136 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
140 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
141 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
145 }
146 }
147 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
148
149 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
150 {
151 int srcu_idx, err = 0;
152 kvm_pfn_t pfn;
153
154 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
155 return 0;
156
157 srcu_idx = srcu_read_lock(&kvm->srcu);
158 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
159
160 if (kvm_mips_is_error_pfn(pfn)) {
161 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
162 err = -EFAULT;
163 goto out;
164 }
165
166 kvm->arch.guest_pmap[gfn] = pfn;
167 out:
168 srcu_read_unlock(&kvm->srcu, srcu_idx);
169 return err;
170 }
171
172 /* Translate guest KSEG0 addresses to Host PA */
173 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
174 unsigned long gva)
175 {
176 gfn_t gfn;
177 unsigned long offset = gva & ~PAGE_MASK;
178 struct kvm *kvm = vcpu->kvm;
179
180 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
181 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
182 __builtin_return_address(0), gva);
183 return KVM_INVALID_PAGE;
184 }
185
186 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
187
188 if (gfn >= kvm->arch.guest_pmap_npages) {
189 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
190 gva);
191 return KVM_INVALID_PAGE;
192 }
193
194 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
195 return KVM_INVALID_ADDR;
196
197 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
198 }
199 EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
200
201 /* XXXKYMA: Must be called with interrupts disabled */
202 /* set flush_dcache_mask == 0 if no dcache flush required */
203 int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
204 unsigned long entrylo0, unsigned long entrylo1,
205 int flush_dcache_mask)
206 {
207 unsigned long flags;
208 unsigned long old_entryhi;
209 int idx;
210
211 local_irq_save(flags);
212
213 old_entryhi = read_c0_entryhi();
214 write_c0_entryhi(entryhi);
215 mtc0_tlbw_hazard();
216
217 tlb_probe();
218 tlb_probe_hazard();
219 idx = read_c0_index();
220
221 if (idx > current_cpu_data.tlbsize) {
222 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
223 kvm_mips_dump_host_tlbs();
224 local_irq_restore(flags);
225 return -1;
226 }
227
228 write_c0_entrylo0(entrylo0);
229 write_c0_entrylo1(entrylo1);
230 mtc0_tlbw_hazard();
231
232 if (idx < 0)
233 tlb_write_random();
234 else
235 tlb_write_indexed();
236 tlbw_use_hazard();
237
238 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
239 vcpu->arch.pc, idx, read_c0_entryhi(),
240 read_c0_entrylo0(), read_c0_entrylo1());
241
242 /* Flush D-cache */
243 if (flush_dcache_mask) {
244 if (entrylo0 & MIPS3_PG_V) {
245 ++vcpu->stat.flush_dcache_exits;
246 flush_data_cache_page((entryhi & VPN2_MASK) &
247 ~flush_dcache_mask);
248 }
249 if (entrylo1 & MIPS3_PG_V) {
250 ++vcpu->stat.flush_dcache_exits;
251 flush_data_cache_page(((entryhi & VPN2_MASK) &
252 ~flush_dcache_mask) |
253 (0x1 << PAGE_SHIFT));
254 }
255 }
256
257 /* Restore old ASID */
258 write_c0_entryhi(old_entryhi);
259 mtc0_tlbw_hazard();
260 tlbw_use_hazard();
261 local_irq_restore(flags);
262 return 0;
263 }
264
265 /* XXXKYMA: Must be called with interrupts disabled */
266 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
267 struct kvm_vcpu *vcpu)
268 {
269 gfn_t gfn;
270 kvm_pfn_t pfn0, pfn1;
271 unsigned long vaddr = 0;
272 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
273 int even;
274 struct kvm *kvm = vcpu->kvm;
275 const int flush_dcache_mask = 0;
276 int ret;
277
278 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
279 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
280 kvm_mips_dump_host_tlbs();
281 return -1;
282 }
283
284 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
285 if (gfn >= kvm->arch.guest_pmap_npages) {
286 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
287 gfn, badvaddr);
288 kvm_mips_dump_host_tlbs();
289 return -1;
290 }
291 even = !(gfn & 0x1);
292 vaddr = badvaddr & (PAGE_MASK << 1);
293
294 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
295 return -1;
296
297 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
298 return -1;
299
300 if (even) {
301 pfn0 = kvm->arch.guest_pmap[gfn];
302 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
303 } else {
304 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
305 pfn1 = kvm->arch.guest_pmap[gfn];
306 }
307
308 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
309 (1 << 2) | (0x1 << 1);
310 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
311 (1 << 2) | (0x1 << 1);
312
313 preempt_disable();
314 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
315 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
316 flush_dcache_mask);
317 preempt_enable();
318
319 return ret;
320 }
321 EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
322
323 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
324 struct kvm_vcpu *vcpu)
325 {
326 kvm_pfn_t pfn0, pfn1;
327 unsigned long flags, old_entryhi = 0, vaddr = 0;
328 unsigned long entrylo0 = 0, entrylo1 = 0;
329
330 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
331 pfn1 = 0;
332 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
333 (1 << 2) | (0x1 << 1);
334 entrylo1 = 0;
335
336 local_irq_save(flags);
337
338 old_entryhi = read_c0_entryhi();
339 vaddr = badvaddr & (PAGE_MASK << 1);
340 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
341 mtc0_tlbw_hazard();
342 write_c0_entrylo0(entrylo0);
343 mtc0_tlbw_hazard();
344 write_c0_entrylo1(entrylo1);
345 mtc0_tlbw_hazard();
346 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
347 mtc0_tlbw_hazard();
348 tlb_write_indexed();
349 mtc0_tlbw_hazard();
350 tlbw_use_hazard();
351
352 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
353 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
354 read_c0_entrylo0(), read_c0_entrylo1());
355
356 /* Restore old ASID */
357 write_c0_entryhi(old_entryhi);
358 mtc0_tlbw_hazard();
359 tlbw_use_hazard();
360 local_irq_restore(flags);
361
362 return 0;
363 }
364 EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
365
366 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
367 struct kvm_mips_tlb *tlb,
368 unsigned long *hpa0,
369 unsigned long *hpa1)
370 {
371 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
372 struct kvm *kvm = vcpu->kvm;
373 kvm_pfn_t pfn0, pfn1;
374 int ret;
375
376 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
377 pfn0 = 0;
378 pfn1 = 0;
379 } else {
380 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
381 >> PAGE_SHIFT) < 0)
382 return -1;
383
384 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
385 >> PAGE_SHIFT) < 0)
386 return -1;
387
388 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
389 >> PAGE_SHIFT];
390 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
391 >> PAGE_SHIFT];
392 }
393
394 if (hpa0)
395 *hpa0 = pfn0 << PAGE_SHIFT;
396
397 if (hpa1)
398 *hpa1 = pfn1 << PAGE_SHIFT;
399
400 /* Get attributes from the Guest TLB */
401 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
402 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
403 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
404 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
405
406 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
407 tlb->tlb_lo0, tlb->tlb_lo1);
408
409 preempt_disable();
410 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
411 kvm_mips_get_kernel_asid(vcpu) :
412 kvm_mips_get_user_asid(vcpu));
413 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
414 tlb->tlb_mask);
415 preempt_enable();
416
417 return ret;
418 }
419 EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
420
421 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
422 {
423 int i;
424 int index = -1;
425 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
426
427 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
428 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
429 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
430 index = i;
431 break;
432 }
433 }
434
435 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
436 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
437
438 return index;
439 }
440 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
441
442 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
443 {
444 unsigned long old_entryhi, flags;
445 int idx;
446
447 local_irq_save(flags);
448
449 old_entryhi = read_c0_entryhi();
450
451 if (KVM_GUEST_KERNEL_MODE(vcpu))
452 write_c0_entryhi((vaddr & VPN2_MASK) |
453 kvm_mips_get_kernel_asid(vcpu));
454 else {
455 write_c0_entryhi((vaddr & VPN2_MASK) |
456 kvm_mips_get_user_asid(vcpu));
457 }
458
459 mtc0_tlbw_hazard();
460
461 tlb_probe();
462 tlb_probe_hazard();
463 idx = read_c0_index();
464
465 /* Restore old ASID */
466 write_c0_entryhi(old_entryhi);
467 mtc0_tlbw_hazard();
468 tlbw_use_hazard();
469
470 local_irq_restore(flags);
471
472 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
473
474 return idx;
475 }
476 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
477
478 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
479 {
480 int idx;
481 unsigned long flags, old_entryhi;
482
483 local_irq_save(flags);
484
485 old_entryhi = read_c0_entryhi();
486
487 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
488 mtc0_tlbw_hazard();
489
490 tlb_probe();
491 tlb_probe_hazard();
492 idx = read_c0_index();
493
494 if (idx >= current_cpu_data.tlbsize)
495 BUG();
496
497 if (idx > 0) {
498 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
499 mtc0_tlbw_hazard();
500
501 write_c0_entrylo0(0);
502 mtc0_tlbw_hazard();
503
504 write_c0_entrylo1(0);
505 mtc0_tlbw_hazard();
506
507 tlb_write_indexed();
508 mtc0_tlbw_hazard();
509 }
510
511 write_c0_entryhi(old_entryhi);
512 mtc0_tlbw_hazard();
513 tlbw_use_hazard();
514
515 local_irq_restore(flags);
516
517 if (idx > 0)
518 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
519 (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
520
521 return 0;
522 }
523 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
524
525 void kvm_mips_flush_host_tlb(int skip_kseg0)
526 {
527 unsigned long flags;
528 unsigned long old_entryhi, entryhi;
529 unsigned long old_pagemask;
530 int entry = 0;
531 int maxentry = current_cpu_data.tlbsize;
532
533 local_irq_save(flags);
534
535 old_entryhi = read_c0_entryhi();
536 old_pagemask = read_c0_pagemask();
537
538 /* Blast 'em all away. */
539 for (entry = 0; entry < maxentry; entry++) {
540 write_c0_index(entry);
541 mtc0_tlbw_hazard();
542
543 if (skip_kseg0) {
544 tlb_read();
545 tlbw_use_hazard();
546
547 entryhi = read_c0_entryhi();
548
549 /* Don't blow away guest kernel entries */
550 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
551 continue;
552 }
553
554 /* Make sure all entries differ. */
555 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
556 mtc0_tlbw_hazard();
557 write_c0_entrylo0(0);
558 mtc0_tlbw_hazard();
559 write_c0_entrylo1(0);
560 mtc0_tlbw_hazard();
561
562 tlb_write_indexed();
563 mtc0_tlbw_hazard();
564 }
565
566 tlbw_use_hazard();
567
568 write_c0_entryhi(old_entryhi);
569 write_c0_pagemask(old_pagemask);
570 mtc0_tlbw_hazard();
571 tlbw_use_hazard();
572
573 local_irq_restore(flags);
574 }
575 EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
576
577 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
578 struct kvm_vcpu *vcpu)
579 {
580 unsigned long asid = asid_cache(cpu);
581
582 asid += cpu_asid_inc();
583 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
584 if (cpu_has_vtag_icache)
585 flush_icache_all();
586
587 kvm_local_flush_tlb_all(); /* start new asid cycle */
588
589 if (!asid) /* fix version if needed */
590 asid = asid_first_version(cpu);
591 }
592
593 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
594 }
595
596 void kvm_local_flush_tlb_all(void)
597 {
598 unsigned long flags;
599 unsigned long old_ctx;
600 int entry = 0;
601
602 local_irq_save(flags);
603 /* Save old context and create impossible VPN2 value */
604 old_ctx = read_c0_entryhi();
605 write_c0_entrylo0(0);
606 write_c0_entrylo1(0);
607
608 /* Blast 'em all away. */
609 while (entry < current_cpu_data.tlbsize) {
610 /* Make sure all entries differ. */
611 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
612 write_c0_index(entry);
613 mtc0_tlbw_hazard();
614 tlb_write_indexed();
615 entry++;
616 }
617 tlbw_use_hazard();
618 write_c0_entryhi(old_ctx);
619 mtc0_tlbw_hazard();
620
621 local_irq_restore(flags);
622 }
623 EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
624
625 /**
626 * kvm_mips_migrate_count() - Migrate timer.
627 * @vcpu: Virtual CPU.
628 *
629 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
630 * if it was running prior to being cancelled.
631 *
632 * Must be called when the VCPU is migrated to a different CPU to ensure that
633 * timer expiry during guest execution interrupts the guest and causes the
634 * interrupt to be delivered in a timely manner.
635 */
636 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
637 {
638 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
639 hrtimer_restart(&vcpu->arch.comparecount_timer);
640 }
641
642 /* Restore ASID once we are scheduled back after preemption */
643 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
644 {
645 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
646 unsigned long flags;
647 int newasid = 0;
648
649 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
650
651 /* Allocate new kernel and user ASIDs if needed */
652
653 local_irq_save(flags);
654
655 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
656 asid_version_mask(cpu)) {
657 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
658 vcpu->arch.guest_kernel_asid[cpu] =
659 vcpu->arch.guest_kernel_mm.context.asid[cpu];
660 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
661 vcpu->arch.guest_user_asid[cpu] =
662 vcpu->arch.guest_user_mm.context.asid[cpu];
663 newasid++;
664
665 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
666 cpu_context(cpu, current->mm));
667 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
668 cpu, vcpu->arch.guest_kernel_asid[cpu]);
669 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
670 vcpu->arch.guest_user_asid[cpu]);
671 }
672
673 if (vcpu->arch.last_sched_cpu != cpu) {
674 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
675 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
676 /*
677 * Migrate the timer interrupt to the current CPU so that it
678 * always interrupts the guest and synchronously triggers a
679 * guest timer interrupt.
680 */
681 kvm_mips_migrate_count(vcpu);
682 }
683
684 if (!newasid) {
685 /*
686 * If we preempted while the guest was executing, then reload
687 * the pre-empted ASID
688 */
689 if (current->flags & PF_VCPU) {
690 write_c0_entryhi(vcpu->arch.
691 preempt_entryhi & asid_mask);
692 ehb();
693 }
694 } else {
695 /* New ASIDs were allocated for the VM */
696
697 /*
698 * Were we in guest context? If so then the pre-empted ASID is
699 * no longer valid, we need to set it to what it should be based
700 * on the mode of the Guest (Kernel/User)
701 */
702 if (current->flags & PF_VCPU) {
703 if (KVM_GUEST_KERNEL_MODE(vcpu))
704 write_c0_entryhi(vcpu->arch.
705 guest_kernel_asid[cpu] &
706 asid_mask);
707 else
708 write_c0_entryhi(vcpu->arch.
709 guest_user_asid[cpu] &
710 asid_mask);
711 ehb();
712 }
713 }
714
715 /* restore guest state to registers */
716 kvm_mips_callbacks->vcpu_set_regs(vcpu);
717
718 local_irq_restore(flags);
719
720 }
721 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
722
723 /* ASID can change if another task is scheduled during preemption */
724 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
725 {
726 unsigned long flags;
727 int cpu;
728
729 local_irq_save(flags);
730
731 cpu = smp_processor_id();
732
733 vcpu->arch.preempt_entryhi = read_c0_entryhi();
734 vcpu->arch.last_sched_cpu = cpu;
735
736 /* save guest state in registers */
737 kvm_mips_callbacks->vcpu_get_regs(vcpu);
738
739 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
740 asid_version_mask(cpu))) {
741 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
742 cpu_context(cpu, current->mm));
743 drop_mmu_context(current->mm, cpu);
744 }
745 write_c0_entryhi(cpu_asid(cpu, current->mm));
746 ehb();
747
748 local_irq_restore(flags);
749 }
750 EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
751
752 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
753 {
754 struct mips_coproc *cop0 = vcpu->arch.cop0;
755 unsigned long paddr, flags, vpn2, asid;
756 u32 inst;
757 int index;
758
759 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
760 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
761 local_irq_save(flags);
762 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
763 if (index >= 0) {
764 inst = *(opc);
765 } else {
766 vpn2 = (unsigned long) opc & VPN2_MASK;
767 asid = kvm_read_c0_guest_entryhi(cop0) &
768 KVM_ENTRYHI_ASID;
769 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
770 if (index < 0) {
771 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
772 __func__, opc, vcpu, read_c0_entryhi());
773 kvm_mips_dump_host_tlbs();
774 local_irq_restore(flags);
775 return KVM_INVALID_INST;
776 }
777 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
778 &vcpu->arch.
779 guest_tlb[index],
780 NULL, NULL);
781 inst = *(opc);
782 }
783 local_irq_restore(flags);
784 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
785 paddr =
786 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
787 (unsigned long) opc);
788 inst = *(u32 *) CKSEG0ADDR(paddr);
789 } else {
790 kvm_err("%s: illegal address: %p\n", __func__, opc);
791 return KVM_INVALID_INST;
792 }
793
794 return inst;
795 }
796 EXPORT_SYMBOL_GPL(kvm_get_inst);
This page took 0.05495 seconds and 5 git commands to generate.