MIPS: KVM: Move non-TLB handling code out of tlb.c
[deliverable/linux.git] / arch / mips / kvm / mmu.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS MMU handling in the KVM module.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12 #include <linux/kvm_host.h>
13 #include <asm/mmu_context.h>
14
15 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
16 {
17 int cpu = smp_processor_id();
18
19 return vcpu->arch.guest_kernel_asid[cpu] &
20 cpu_asid_mask(&cpu_data[cpu]);
21 }
22
23 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
24 {
25 int cpu = smp_processor_id();
26
27 return vcpu->arch.guest_user_asid[cpu] &
28 cpu_asid_mask(&cpu_data[cpu]);
29 }
30
31 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
32 {
33 int srcu_idx, err = 0;
34 kvm_pfn_t pfn;
35
36 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
37 return 0;
38
39 srcu_idx = srcu_read_lock(&kvm->srcu);
40 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
41
42 if (kvm_mips_is_error_pfn(pfn)) {
43 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
44 err = -EFAULT;
45 goto out;
46 }
47
48 kvm->arch.guest_pmap[gfn] = pfn;
49 out:
50 srcu_read_unlock(&kvm->srcu, srcu_idx);
51 return err;
52 }
53
54 /* Translate guest KSEG0 addresses to Host PA */
55 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
56 unsigned long gva)
57 {
58 gfn_t gfn;
59 unsigned long offset = gva & ~PAGE_MASK;
60 struct kvm *kvm = vcpu->kvm;
61
62 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
63 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
64 __builtin_return_address(0), gva);
65 return KVM_INVALID_PAGE;
66 }
67
68 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
69
70 if (gfn >= kvm->arch.guest_pmap_npages) {
71 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
72 gva);
73 return KVM_INVALID_PAGE;
74 }
75
76 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
77 return KVM_INVALID_ADDR;
78
79 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
80 }
81
82 /* XXXKYMA: Must be called with interrupts disabled */
83 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
84 struct kvm_vcpu *vcpu)
85 {
86 gfn_t gfn;
87 kvm_pfn_t pfn0, pfn1;
88 unsigned long vaddr = 0;
89 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
90 int even;
91 struct kvm *kvm = vcpu->kvm;
92 const int flush_dcache_mask = 0;
93 int ret;
94
95 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
96 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
97 kvm_mips_dump_host_tlbs();
98 return -1;
99 }
100
101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 if (gfn >= kvm->arch.guest_pmap_npages) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
104 gfn, badvaddr);
105 kvm_mips_dump_host_tlbs();
106 return -1;
107 }
108 even = !(gfn & 0x1);
109 vaddr = badvaddr & (PAGE_MASK << 1);
110
111 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
112 return -1;
113
114 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
115 return -1;
116
117 if (even) {
118 pfn0 = kvm->arch.guest_pmap[gfn];
119 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
120 } else {
121 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
122 pfn1 = kvm->arch.guest_pmap[gfn];
123 }
124
125 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
126 (1 << 2) | (0x1 << 1);
127 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
128 (1 << 2) | (0x1 << 1);
129
130 preempt_disable();
131 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
132 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
133 flush_dcache_mask);
134 preempt_enable();
135
136 return ret;
137 }
138
139 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
140 struct kvm_mips_tlb *tlb,
141 unsigned long *hpa0,
142 unsigned long *hpa1)
143 {
144 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
145 struct kvm *kvm = vcpu->kvm;
146 kvm_pfn_t pfn0, pfn1;
147 int ret;
148
149 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
150 pfn0 = 0;
151 pfn1 = 0;
152 } else {
153 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
154 >> PAGE_SHIFT) < 0)
155 return -1;
156
157 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
158 >> PAGE_SHIFT) < 0)
159 return -1;
160
161 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
162 >> PAGE_SHIFT];
163 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
164 >> PAGE_SHIFT];
165 }
166
167 if (hpa0)
168 *hpa0 = pfn0 << PAGE_SHIFT;
169
170 if (hpa1)
171 *hpa1 = pfn1 << PAGE_SHIFT;
172
173 /* Get attributes from the Guest TLB */
174 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
175 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
176 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
177 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
178
179 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
180 tlb->tlb_lo0, tlb->tlb_lo1);
181
182 preempt_disable();
183 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
184 kvm_mips_get_kernel_asid(vcpu) :
185 kvm_mips_get_user_asid(vcpu));
186 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
187 tlb->tlb_mask);
188 preempt_enable();
189
190 return ret;
191 }
192
193 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
194 struct kvm_vcpu *vcpu)
195 {
196 unsigned long asid = asid_cache(cpu);
197
198 asid += cpu_asid_inc();
199 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
200 if (cpu_has_vtag_icache)
201 flush_icache_all();
202
203 kvm_local_flush_tlb_all(); /* start new asid cycle */
204
205 if (!asid) /* fix version if needed */
206 asid = asid_first_version(cpu);
207 }
208
209 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
210 }
211
212 /**
213 * kvm_mips_migrate_count() - Migrate timer.
214 * @vcpu: Virtual CPU.
215 *
216 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
217 * if it was running prior to being cancelled.
218 *
219 * Must be called when the VCPU is migrated to a different CPU to ensure that
220 * timer expiry during guest execution interrupts the guest and causes the
221 * interrupt to be delivered in a timely manner.
222 */
223 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
224 {
225 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
226 hrtimer_restart(&vcpu->arch.comparecount_timer);
227 }
228
229 /* Restore ASID once we are scheduled back after preemption */
230 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
231 {
232 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
233 unsigned long flags;
234 int newasid = 0;
235
236 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
237
238 /* Allocate new kernel and user ASIDs if needed */
239
240 local_irq_save(flags);
241
242 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
243 asid_version_mask(cpu)) {
244 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
245 vcpu->arch.guest_kernel_asid[cpu] =
246 vcpu->arch.guest_kernel_mm.context.asid[cpu];
247 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
248 vcpu->arch.guest_user_asid[cpu] =
249 vcpu->arch.guest_user_mm.context.asid[cpu];
250 newasid++;
251
252 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
253 cpu_context(cpu, current->mm));
254 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
255 cpu, vcpu->arch.guest_kernel_asid[cpu]);
256 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
257 vcpu->arch.guest_user_asid[cpu]);
258 }
259
260 if (vcpu->arch.last_sched_cpu != cpu) {
261 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
262 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
263 /*
264 * Migrate the timer interrupt to the current CPU so that it
265 * always interrupts the guest and synchronously triggers a
266 * guest timer interrupt.
267 */
268 kvm_mips_migrate_count(vcpu);
269 }
270
271 if (!newasid) {
272 /*
273 * If we preempted while the guest was executing, then reload
274 * the pre-empted ASID
275 */
276 if (current->flags & PF_VCPU) {
277 write_c0_entryhi(vcpu->arch.
278 preempt_entryhi & asid_mask);
279 ehb();
280 }
281 } else {
282 /* New ASIDs were allocated for the VM */
283
284 /*
285 * Were we in guest context? If so then the pre-empted ASID is
286 * no longer valid, we need to set it to what it should be based
287 * on the mode of the Guest (Kernel/User)
288 */
289 if (current->flags & PF_VCPU) {
290 if (KVM_GUEST_KERNEL_MODE(vcpu))
291 write_c0_entryhi(vcpu->arch.
292 guest_kernel_asid[cpu] &
293 asid_mask);
294 else
295 write_c0_entryhi(vcpu->arch.
296 guest_user_asid[cpu] &
297 asid_mask);
298 ehb();
299 }
300 }
301
302 /* restore guest state to registers */
303 kvm_mips_callbacks->vcpu_set_regs(vcpu);
304
305 local_irq_restore(flags);
306
307 }
308
309 /* ASID can change if another task is scheduled during preemption */
310 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
311 {
312 unsigned long flags;
313 int cpu;
314
315 local_irq_save(flags);
316
317 cpu = smp_processor_id();
318
319 vcpu->arch.preempt_entryhi = read_c0_entryhi();
320 vcpu->arch.last_sched_cpu = cpu;
321
322 /* save guest state in registers */
323 kvm_mips_callbacks->vcpu_get_regs(vcpu);
324
325 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
326 asid_version_mask(cpu))) {
327 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
328 cpu_context(cpu, current->mm));
329 drop_mmu_context(current->mm, cpu);
330 }
331 write_c0_entryhi(cpu_asid(cpu, current->mm));
332 ehb();
333
334 local_irq_restore(flags);
335 }
336
337 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
338 {
339 struct mips_coproc *cop0 = vcpu->arch.cop0;
340 unsigned long paddr, flags, vpn2, asid;
341 u32 inst;
342 int index;
343
344 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
345 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
346 local_irq_save(flags);
347 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
348 if (index >= 0) {
349 inst = *(opc);
350 } else {
351 vpn2 = (unsigned long) opc & VPN2_MASK;
352 asid = kvm_read_c0_guest_entryhi(cop0) &
353 KVM_ENTRYHI_ASID;
354 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
355 if (index < 0) {
356 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
357 __func__, opc, vcpu, read_c0_entryhi());
358 kvm_mips_dump_host_tlbs();
359 local_irq_restore(flags);
360 return KVM_INVALID_INST;
361 }
362 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
363 &vcpu->arch.
364 guest_tlb[index],
365 NULL, NULL);
366 inst = *(opc);
367 }
368 local_irq_restore(flags);
369 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
370 paddr =
371 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
372 (unsigned long) opc);
373 inst = *(u32 *) CKSEG0ADDR(paddr);
374 } else {
375 kvm_err("%s: illegal address: %p\n", __func__, opc);
376 return KVM_INVALID_INST;
377 }
378
379 return inst;
380 }
This page took 0.040964 seconds and 6 git commands to generate.