2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS MMU handling in the KVM module.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/kvm_host.h>
13 #include <asm/mmu_context.h>
15 static u32
kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
17 int cpu
= smp_processor_id();
19 return vcpu
->arch
.guest_kernel_asid
[cpu
] &
20 cpu_asid_mask(&cpu_data
[cpu
]);
23 static u32
kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
25 int cpu
= smp_processor_id();
27 return vcpu
->arch
.guest_user_asid
[cpu
] &
28 cpu_asid_mask(&cpu_data
[cpu
]);
31 static int kvm_mips_map_page(struct kvm
*kvm
, gfn_t gfn
)
33 int srcu_idx
, err
= 0;
36 if (kvm
->arch
.guest_pmap
[gfn
] != KVM_INVALID_PAGE
)
39 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
40 pfn
= kvm_mips_gfn_to_pfn(kvm
, gfn
);
42 if (kvm_mips_is_error_pfn(pfn
)) {
43 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn
);
48 kvm
->arch
.guest_pmap
[gfn
] = pfn
;
50 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
54 /* Translate guest KSEG0 addresses to Host PA */
55 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu
*vcpu
,
59 unsigned long offset
= gva
& ~PAGE_MASK
;
60 struct kvm
*kvm
= vcpu
->kvm
;
62 if (KVM_GUEST_KSEGX(gva
) != KVM_GUEST_KSEG0
) {
63 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__
,
64 __builtin_return_address(0), gva
);
65 return KVM_INVALID_PAGE
;
68 gfn
= (KVM_GUEST_CPHYSADDR(gva
) >> PAGE_SHIFT
);
70 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
71 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__
, gfn
,
73 return KVM_INVALID_PAGE
;
76 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
77 return KVM_INVALID_ADDR
;
79 return (kvm
->arch
.guest_pmap
[gfn
] << PAGE_SHIFT
) + offset
;
82 /* XXXKYMA: Must be called with interrupts disabled */
83 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr
,
84 struct kvm_vcpu
*vcpu
)
88 unsigned long vaddr
= 0;
89 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
91 struct kvm
*kvm
= vcpu
->kvm
;
92 const int flush_dcache_mask
= 0;
95 if (KVM_GUEST_KSEGX(badvaddr
) != KVM_GUEST_KSEG0
) {
96 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__
, badvaddr
);
97 kvm_mips_dump_host_tlbs();
101 gfn
= (KVM_GUEST_CPHYSADDR(badvaddr
) >> PAGE_SHIFT
);
102 if (gfn
>= kvm
->arch
.guest_pmap_npages
) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__
,
105 kvm_mips_dump_host_tlbs();
109 vaddr
= badvaddr
& (PAGE_MASK
<< 1);
111 if (kvm_mips_map_page(vcpu
->kvm
, gfn
) < 0)
114 if (kvm_mips_map_page(vcpu
->kvm
, gfn
^ 0x1) < 0)
118 pfn0
= kvm
->arch
.guest_pmap
[gfn
];
119 pfn1
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
121 pfn0
= kvm
->arch
.guest_pmap
[gfn
^ 0x1];
122 pfn1
= kvm
->arch
.guest_pmap
[gfn
];
125 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
126 (1 << 2) | (0x1 << 1);
127 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
128 (1 << 2) | (0x1 << 1);
131 entryhi
= (vaddr
| kvm_mips_get_kernel_asid(vcpu
));
132 ret
= kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
139 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu
*vcpu
,
140 struct kvm_mips_tlb
*tlb
,
144 unsigned long entryhi
= 0, entrylo0
= 0, entrylo1
= 0;
145 struct kvm
*kvm
= vcpu
->kvm
;
146 kvm_pfn_t pfn0
, pfn1
;
149 if ((tlb
->tlb_hi
& VPN2_MASK
) == 0) {
153 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
)
157 if (kvm_mips_map_page(kvm
, mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
)
161 pfn0
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo0
)
163 pfn1
= kvm
->arch
.guest_pmap
[mips3_tlbpfn_to_paddr(tlb
->tlb_lo1
)
168 *hpa0
= pfn0
<< PAGE_SHIFT
;
171 *hpa1
= pfn1
<< PAGE_SHIFT
;
173 /* Get attributes from the Guest TLB */
174 entrylo0
= mips3_paddr_to_tlbpfn(pfn0
<< PAGE_SHIFT
) | (0x3 << 3) |
175 (tlb
->tlb_lo0
& MIPS3_PG_D
) | (tlb
->tlb_lo0
& MIPS3_PG_V
);
176 entrylo1
= mips3_paddr_to_tlbpfn(pfn1
<< PAGE_SHIFT
) | (0x3 << 3) |
177 (tlb
->tlb_lo1
& MIPS3_PG_D
) | (tlb
->tlb_lo1
& MIPS3_PG_V
);
179 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu
->arch
.pc
,
180 tlb
->tlb_lo0
, tlb
->tlb_lo1
);
183 entryhi
= (tlb
->tlb_hi
& VPN2_MASK
) | (KVM_GUEST_KERNEL_MODE(vcpu
) ?
184 kvm_mips_get_kernel_asid(vcpu
) :
185 kvm_mips_get_user_asid(vcpu
));
186 ret
= kvm_mips_host_tlb_write(vcpu
, entryhi
, entrylo0
, entrylo1
,
193 void kvm_get_new_mmu_context(struct mm_struct
*mm
, unsigned long cpu
,
194 struct kvm_vcpu
*vcpu
)
196 unsigned long asid
= asid_cache(cpu
);
198 asid
+= cpu_asid_inc();
199 if (!(asid
& cpu_asid_mask(&cpu_data
[cpu
]))) {
200 if (cpu_has_vtag_icache
)
203 kvm_local_flush_tlb_all(); /* start new asid cycle */
205 if (!asid
) /* fix version if needed */
206 asid
= asid_first_version(cpu
);
209 cpu_context(cpu
, mm
) = asid_cache(cpu
) = asid
;
213 * kvm_mips_migrate_count() - Migrate timer.
214 * @vcpu: Virtual CPU.
216 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
217 * if it was running prior to being cancelled.
219 * Must be called when the VCPU is migrated to a different CPU to ensure that
220 * timer expiry during guest execution interrupts the guest and causes the
221 * interrupt to be delivered in a timely manner.
223 static void kvm_mips_migrate_count(struct kvm_vcpu
*vcpu
)
225 if (hrtimer_cancel(&vcpu
->arch
.comparecount_timer
))
226 hrtimer_restart(&vcpu
->arch
.comparecount_timer
);
229 /* Restore ASID once we are scheduled back after preemption */
230 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
232 unsigned long asid_mask
= cpu_asid_mask(&cpu_data
[cpu
]);
236 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__
, vcpu
, cpu
);
238 /* Allocate new kernel and user ASIDs if needed */
240 local_irq_save(flags
);
242 if ((vcpu
->arch
.guest_kernel_asid
[cpu
] ^ asid_cache(cpu
)) &
243 asid_version_mask(cpu
)) {
244 kvm_get_new_mmu_context(&vcpu
->arch
.guest_kernel_mm
, cpu
, vcpu
);
245 vcpu
->arch
.guest_kernel_asid
[cpu
] =
246 vcpu
->arch
.guest_kernel_mm
.context
.asid
[cpu
];
247 kvm_get_new_mmu_context(&vcpu
->arch
.guest_user_mm
, cpu
, vcpu
);
248 vcpu
->arch
.guest_user_asid
[cpu
] =
249 vcpu
->arch
.guest_user_mm
.context
.asid
[cpu
];
252 kvm_debug("[%d]: cpu_context: %#lx\n", cpu
,
253 cpu_context(cpu
, current
->mm
));
254 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
255 cpu
, vcpu
->arch
.guest_kernel_asid
[cpu
]);
256 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu
,
257 vcpu
->arch
.guest_user_asid
[cpu
]);
260 if (vcpu
->arch
.last_sched_cpu
!= cpu
) {
261 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
262 vcpu
->arch
.last_sched_cpu
, cpu
, vcpu
->vcpu_id
);
264 * Migrate the timer interrupt to the current CPU so that it
265 * always interrupts the guest and synchronously triggers a
266 * guest timer interrupt.
268 kvm_mips_migrate_count(vcpu
);
273 * If we preempted while the guest was executing, then reload
274 * the pre-empted ASID
276 if (current
->flags
& PF_VCPU
) {
277 write_c0_entryhi(vcpu
->arch
.
278 preempt_entryhi
& asid_mask
);
282 /* New ASIDs were allocated for the VM */
285 * Were we in guest context? If so then the pre-empted ASID is
286 * no longer valid, we need to set it to what it should be based
287 * on the mode of the Guest (Kernel/User)
289 if (current
->flags
& PF_VCPU
) {
290 if (KVM_GUEST_KERNEL_MODE(vcpu
))
291 write_c0_entryhi(vcpu
->arch
.
292 guest_kernel_asid
[cpu
] &
295 write_c0_entryhi(vcpu
->arch
.
296 guest_user_asid
[cpu
] &
302 /* restore guest state to registers */
303 kvm_mips_callbacks
->vcpu_set_regs(vcpu
);
305 local_irq_restore(flags
);
309 /* ASID can change if another task is scheduled during preemption */
310 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
315 local_irq_save(flags
);
317 cpu
= smp_processor_id();
319 vcpu
->arch
.preempt_entryhi
= read_c0_entryhi();
320 vcpu
->arch
.last_sched_cpu
= cpu
;
322 /* save guest state in registers */
323 kvm_mips_callbacks
->vcpu_get_regs(vcpu
);
325 if (((cpu_context(cpu
, current
->mm
) ^ asid_cache(cpu
)) &
326 asid_version_mask(cpu
))) {
327 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__
,
328 cpu_context(cpu
, current
->mm
));
329 drop_mmu_context(current
->mm
, cpu
);
331 write_c0_entryhi(cpu_asid(cpu
, current
->mm
));
334 local_irq_restore(flags
);
337 u32
kvm_get_inst(u32
*opc
, struct kvm_vcpu
*vcpu
)
339 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
340 unsigned long paddr
, flags
, vpn2
, asid
;
344 if (KVM_GUEST_KSEGX((unsigned long) opc
) < KVM_GUEST_KSEG0
||
345 KVM_GUEST_KSEGX((unsigned long) opc
) == KVM_GUEST_KSEG23
) {
346 local_irq_save(flags
);
347 index
= kvm_mips_host_tlb_lookup(vcpu
, (unsigned long) opc
);
351 vpn2
= (unsigned long) opc
& VPN2_MASK
;
352 asid
= kvm_read_c0_guest_entryhi(cop0
) &
354 index
= kvm_mips_guest_tlb_lookup(vcpu
, vpn2
| asid
);
356 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
357 __func__
, opc
, vcpu
, read_c0_entryhi());
358 kvm_mips_dump_host_tlbs();
359 local_irq_restore(flags
);
360 return KVM_INVALID_INST
;
362 kvm_mips_handle_mapped_seg_tlb_fault(vcpu
,
368 local_irq_restore(flags
);
369 } else if (KVM_GUEST_KSEGX(opc
) == KVM_GUEST_KSEG0
) {
371 kvm_mips_translate_guest_kseg0_to_hpa(vcpu
,
372 (unsigned long) opc
);
373 inst
= *(u32
*) CKSEG0ADDR(paddr
);
375 kvm_err("%s: illegal address: %p\n", __func__
, opc
);
376 return KVM_INVALID_INST
;
This page took 0.040964 seconds and 6 git commands to generate.