Commit | Line | Data |
---|---|---|
b71c9e2f AG |
1 | /* |
2 | * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved. | |
3 | * | |
4 | * Author: Yu Liu, yu.liu@freescale.com | |
5 | * Scott Wood, scottwood@freescale.com | |
6 | * Ashish Kalra, ashish.kalra@freescale.com | |
7 | * Varun Sethi, varun.sethi@freescale.com | |
8 | * Alexander Graf, agraf@suse.de | |
9 | * | |
10 | * Description: | |
11 | * This file is based on arch/powerpc/kvm/44x_tlb.c, | |
12 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License, version 2, as | |
16 | * published by the Free Software Foundation. | |
17 | */ | |
18 | ||
19 | #include <linux/kernel.h> | |
20 | #include <linux/types.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/kvm.h> | |
24 | #include <linux/kvm_host.h> | |
25 | #include <linux/highmem.h> | |
26 | #include <linux/log2.h> | |
27 | #include <linux/uaccess.h> | |
28 | #include <linux/sched.h> | |
29 | #include <linux/rwsem.h> | |
30 | #include <linux/vmalloc.h> | |
31 | #include <linux/hugetlb.h> | |
32 | #include <asm/kvm_ppc.h> | |
33 | ||
34 | #include "e500.h" | |
35 | #include "trace.h" | |
36 | #include "timing.h" | |
37 | #include "e500_mmu_host.h" | |
38 | ||
39 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) | |
40 | ||
41 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; | |
42 | ||
43 | static inline unsigned int tlb1_max_shadow_size(void) | |
44 | { | |
45 | /* reserve one entry for magic page */ | |
46 | return host_tlb_params[1].entries - tlbcam_index - 1; | |
47 | } | |
48 | ||
49 | static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | |
50 | { | |
51 | /* Mask off reserved bits. */ | |
52 | mas3 &= MAS3_ATTRIB_MASK; | |
53 | ||
54 | #ifndef CONFIG_KVM_BOOKE_HV | |
55 | if (!usermode) { | |
56 | /* Guest is in supervisor mode, | |
57 | * so we need to translate guest | |
58 | * supervisor permissions into user permissions. */ | |
59 | mas3 &= ~E500_TLB_USER_PERM_MASK; | |
60 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; | |
61 | } | |
62 | mas3 |= E500_TLB_SUPER_PERM_MASK; | |
63 | #endif | |
64 | return mas3; | |
65 | } | |
66 | ||
67 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) | |
68 | { | |
69 | #ifdef CONFIG_SMP | |
70 | return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; | |
71 | #else | |
72 | return mas2 & MAS2_ATTRIB_MASK; | |
73 | #endif | |
74 | } | |
75 | ||
76 | /* | |
77 | * writing shadow tlb entry to host TLB | |
78 | */ | |
79 | static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, | |
80 | uint32_t mas0) | |
81 | { | |
82 | unsigned long flags; | |
83 | ||
84 | local_irq_save(flags); | |
85 | mtspr(SPRN_MAS0, mas0); | |
86 | mtspr(SPRN_MAS1, stlbe->mas1); | |
87 | mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); | |
88 | mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); | |
89 | mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); | |
90 | #ifdef CONFIG_KVM_BOOKE_HV | |
91 | mtspr(SPRN_MAS8, stlbe->mas8); | |
92 | #endif | |
93 | asm volatile("isync; tlbwe" : : : "memory"); | |
94 | ||
95 | #ifdef CONFIG_KVM_BOOKE_HV | |
96 | /* Must clear mas8 for other host tlbwe's */ | |
97 | mtspr(SPRN_MAS8, 0); | |
98 | isync(); | |
99 | #endif | |
100 | local_irq_restore(flags); | |
101 | ||
102 | trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, | |
103 | stlbe->mas2, stlbe->mas7_3); | |
104 | } | |
105 | ||
106 | /* | |
107 | * Acquire a mas0 with victim hint, as if we just took a TLB miss. | |
108 | * | |
109 | * We don't care about the address we're searching for, other than that it's | |
110 | * in the right set and is not present in the TLB. Using a zero PID and a | |
111 | * userspace address means we don't have to set and then restore MAS5, or | |
112 | * calculate a proper MAS6 value. | |
113 | */ | |
114 | static u32 get_host_mas0(unsigned long eaddr) | |
115 | { | |
116 | unsigned long flags; | |
117 | u32 mas0; | |
118 | ||
119 | local_irq_save(flags); | |
120 | mtspr(SPRN_MAS6, 0); | |
121 | asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); | |
122 | mas0 = mfspr(SPRN_MAS0); | |
123 | local_irq_restore(flags); | |
124 | ||
125 | return mas0; | |
126 | } | |
127 | ||
128 | /* sesel is for tlb1 only */ | |
129 | static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |
130 | int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe) | |
131 | { | |
132 | u32 mas0; | |
133 | ||
134 | if (tlbsel == 0) { | |
135 | mas0 = get_host_mas0(stlbe->mas2); | |
136 | __write_host_tlbe(stlbe, mas0); | |
137 | } else { | |
138 | __write_host_tlbe(stlbe, | |
139 | MAS0_TLBSEL(1) | | |
140 | MAS0_ESEL(to_htlb1_esel(sesel))); | |
141 | } | |
142 | } | |
143 | ||
144 | /* sesel is for tlb1 only */ | |
145 | static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |
146 | struct kvm_book3e_206_tlb_entry *gtlbe, | |
147 | struct kvm_book3e_206_tlb_entry *stlbe, | |
148 | int stlbsel, int sesel) | |
149 | { | |
150 | int stid; | |
151 | ||
152 | preempt_disable(); | |
153 | stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); | |
154 | ||
155 | stlbe->mas1 |= MAS1_TID(stid); | |
156 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); | |
157 | preempt_enable(); | |
158 | } | |
159 | ||
160 | #ifdef CONFIG_KVM_E500V2 | |
161 | /* XXX should be a hook in the gva2hpa translation */ | |
162 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) | |
163 | { | |
164 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
165 | struct kvm_book3e_206_tlb_entry magic; | |
166 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | |
167 | unsigned int stid; | |
168 | pfn_t pfn; | |
169 | ||
170 | pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; | |
171 | get_page(pfn_to_page(pfn)); | |
172 | ||
173 | preempt_disable(); | |
174 | stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0); | |
175 | ||
176 | magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | | |
177 | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | |
178 | magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; | |
179 | magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | | |
180 | MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; | |
181 | magic.mas8 = 0; | |
182 | ||
183 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); | |
184 | preempt_enable(); | |
185 | } | |
186 | #endif | |
187 | ||
188 | void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | |
189 | int esel) | |
190 | { | |
191 | struct kvm_book3e_206_tlb_entry *gtlbe = | |
192 | get_entry(vcpu_e500, tlbsel, esel); | |
193 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref; | |
194 | ||
195 | /* Don't bother with unmapped entries */ | |
4d2be6f7 SW |
196 | if (!(ref->flags & E500_TLB_VALID)) { |
197 | WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0), | |
198 | "%s: flags %x\n", __func__, ref->flags); | |
199 | WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]); | |
200 | } | |
b71c9e2f AG |
201 | |
202 | if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) { | |
203 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; | |
204 | int hw_tlb_indx; | |
205 | unsigned long flags; | |
206 | ||
207 | local_irq_save(flags); | |
208 | while (tmp) { | |
209 | hw_tlb_indx = __ilog2_u64(tmp & -tmp); | |
210 | mtspr(SPRN_MAS0, | |
211 | MAS0_TLBSEL(1) | | |
212 | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); | |
213 | mtspr(SPRN_MAS1, 0); | |
214 | asm volatile("tlbwe"); | |
215 | vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; | |
216 | tmp &= tmp - 1; | |
217 | } | |
218 | mb(); | |
219 | vcpu_e500->g2h_tlb1_map[esel] = 0; | |
220 | ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID); | |
221 | local_irq_restore(flags); | |
c015c62b | 222 | } |
b71c9e2f | 223 | |
c015c62b AG |
224 | if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) { |
225 | /* | |
226 | * TLB1 entry is backed by 4k pages. This should happen | |
227 | * rarely and is not worth optimizing. Invalidate everything. | |
228 | */ | |
229 | kvmppc_e500_tlbil_all(vcpu_e500); | |
230 | ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); | |
b71c9e2f AG |
231 | } |
232 | ||
c015c62b AG |
233 | /* Already invalidated in between */ |
234 | if (!(ref->flags & E500_TLB_VALID)) | |
235 | return; | |
236 | ||
b71c9e2f AG |
237 | /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ |
238 | kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); | |
239 | ||
240 | /* Mark the TLB as not backed by the host anymore */ | |
241 | ref->flags &= ~E500_TLB_VALID; | |
242 | } | |
243 | ||
244 | static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) | |
245 | { | |
246 | return tlbe->mas7_3 & (MAS3_SW|MAS3_UW); | |
247 | } | |
248 | ||
249 | static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, | |
250 | struct kvm_book3e_206_tlb_entry *gtlbe, | |
251 | pfn_t pfn) | |
252 | { | |
253 | ref->pfn = pfn; | |
4d2be6f7 | 254 | ref->flags |= E500_TLB_VALID; |
b71c9e2f | 255 | |
84e4d632 BB |
256 | /* Mark the page accessed */ |
257 | kvm_set_pfn_accessed(pfn); | |
258 | ||
b71c9e2f AG |
259 | if (tlbe_is_writable(gtlbe)) |
260 | kvm_set_pfn_dirty(pfn); | |
261 | } | |
262 | ||
263 | static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | |
264 | { | |
265 | if (ref->flags & E500_TLB_VALID) { | |
4d2be6f7 | 266 | /* FIXME: don't log bogus pfn for TLB1 */ |
b71c9e2f AG |
267 | trace_kvm_booke206_ref_release(ref->pfn, ref->flags); |
268 | ref->flags = 0; | |
269 | } | |
270 | } | |
271 | ||
483ba97c | 272 | static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) |
b71c9e2f AG |
273 | { |
274 | if (vcpu_e500->g2h_tlb1_map) | |
275 | memset(vcpu_e500->g2h_tlb1_map, 0, | |
276 | sizeof(u64) * vcpu_e500->gtlb_params[1].entries); | |
277 | if (vcpu_e500->h2g_tlb1_rmap) | |
278 | memset(vcpu_e500->h2g_tlb1_rmap, 0, | |
279 | sizeof(unsigned int) * host_tlb_params[1].entries); | |
280 | } | |
281 | ||
282 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | |
283 | { | |
4d2be6f7 | 284 | int tlbsel; |
b71c9e2f AG |
285 | int i; |
286 | ||
4d2be6f7 SW |
287 | for (tlbsel = 0; tlbsel <= 1; tlbsel++) { |
288 | for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) { | |
289 | struct tlbe_ref *ref = | |
290 | &vcpu_e500->gtlb_priv[tlbsel][i].ref; | |
291 | kvmppc_e500_ref_release(ref); | |
292 | } | |
b71c9e2f | 293 | } |
b71c9e2f AG |
294 | } |
295 | ||
296 | void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu) | |
297 | { | |
298 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
4d2be6f7 SW |
299 | kvmppc_e500_tlbil_all(vcpu_e500); |
300 | clear_tlb_privs(vcpu_e500); | |
b71c9e2f AG |
301 | clear_tlb1_bitmap(vcpu_e500); |
302 | } | |
303 | ||
304 | /* TID must be supplied by the caller */ | |
305 | static void kvmppc_e500_setup_stlbe( | |
306 | struct kvm_vcpu *vcpu, | |
307 | struct kvm_book3e_206_tlb_entry *gtlbe, | |
308 | int tsize, struct tlbe_ref *ref, u64 gvaddr, | |
309 | struct kvm_book3e_206_tlb_entry *stlbe) | |
310 | { | |
311 | pfn_t pfn = ref->pfn; | |
312 | u32 pr = vcpu->arch.shared->msr & MSR_PR; | |
313 | ||
314 | BUG_ON(!(ref->flags & E500_TLB_VALID)); | |
315 | ||
316 | /* Force IPROT=0 for all guest mappings. */ | |
317 | stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; | |
318 | stlbe->mas2 = (gvaddr & MAS2_EPN) | | |
319 | e500_shadow_mas2_attrib(gtlbe->mas2, pr); | |
320 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | | |
321 | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); | |
322 | ||
323 | #ifdef CONFIG_KVM_BOOKE_HV | |
324 | stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; | |
325 | #endif | |
326 | } | |
327 | ||
328 | static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |
329 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | |
330 | int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, | |
331 | struct tlbe_ref *ref) | |
332 | { | |
333 | struct kvm_memory_slot *slot; | |
334 | unsigned long pfn = 0; /* silence GCC warning */ | |
335 | unsigned long hva; | |
336 | int pfnmap = 0; | |
337 | int tsize = BOOK3E_PAGESZ_4K; | |
338 | ||
339 | /* | |
340 | * Translate guest physical to true physical, acquiring | |
341 | * a page reference if it is normal, non-reserved memory. | |
342 | * | |
343 | * gfn_to_memslot() must succeed because otherwise we wouldn't | |
344 | * have gotten this far. Eventually we should just pass the slot | |
345 | * pointer through from the first lookup. | |
346 | */ | |
347 | slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); | |
348 | hva = gfn_to_hva_memslot(slot, gfn); | |
349 | ||
350 | if (tlbsel == 1) { | |
351 | struct vm_area_struct *vma; | |
352 | down_read(¤t->mm->mmap_sem); | |
353 | ||
354 | vma = find_vma(current->mm, hva); | |
355 | if (vma && hva >= vma->vm_start && | |
356 | (vma->vm_flags & VM_PFNMAP)) { | |
357 | /* | |
358 | * This VMA is a physically contiguous region (e.g. | |
359 | * /dev/mem) that bypasses normal Linux page | |
360 | * management. Find the overlap between the | |
361 | * vma and the memslot. | |
362 | */ | |
363 | ||
364 | unsigned long start, end; | |
365 | unsigned long slot_start, slot_end; | |
366 | ||
367 | pfnmap = 1; | |
368 | ||
369 | start = vma->vm_pgoff; | |
370 | end = start + | |
371 | ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); | |
372 | ||
373 | pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT); | |
374 | ||
375 | slot_start = pfn - (gfn - slot->base_gfn); | |
376 | slot_end = slot_start + slot->npages; | |
377 | ||
378 | if (start < slot_start) | |
379 | start = slot_start; | |
380 | if (end > slot_end) | |
381 | end = slot_end; | |
382 | ||
383 | tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> | |
384 | MAS1_TSIZE_SHIFT; | |
385 | ||
386 | /* | |
387 | * e500 doesn't implement the lowest tsize bit, | |
388 | * or 1K pages. | |
389 | */ | |
390 | tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); | |
391 | ||
392 | /* | |
393 | * Now find the largest tsize (up to what the guest | |
394 | * requested) that will cover gfn, stay within the | |
395 | * range, and for which gfn and pfn are mutually | |
396 | * aligned. | |
397 | */ | |
398 | ||
399 | for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { | |
400 | unsigned long gfn_start, gfn_end, tsize_pages; | |
401 | tsize_pages = 1 << (tsize - 2); | |
402 | ||
403 | gfn_start = gfn & ~(tsize_pages - 1); | |
404 | gfn_end = gfn_start + tsize_pages; | |
405 | ||
406 | if (gfn_start + pfn - gfn < start) | |
407 | continue; | |
408 | if (gfn_end + pfn - gfn > end) | |
409 | continue; | |
410 | if ((gfn & (tsize_pages - 1)) != | |
411 | (pfn & (tsize_pages - 1))) | |
412 | continue; | |
413 | ||
414 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | |
415 | pfn &= ~(tsize_pages - 1); | |
416 | break; | |
417 | } | |
418 | } else if (vma && hva >= vma->vm_start && | |
419 | (vma->vm_flags & VM_HUGETLB)) { | |
420 | unsigned long psize = vma_kernel_pagesize(vma); | |
421 | ||
422 | tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >> | |
423 | MAS1_TSIZE_SHIFT; | |
424 | ||
425 | /* | |
426 | * Take the largest page size that satisfies both host | |
427 | * and guest mapping | |
428 | */ | |
429 | tsize = min(__ilog2(psize) - 10, tsize); | |
430 | ||
431 | /* | |
432 | * e500 doesn't implement the lowest tsize bit, | |
433 | * or 1K pages. | |
434 | */ | |
435 | tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1); | |
436 | } | |
437 | ||
438 | up_read(¤t->mm->mmap_sem); | |
439 | } | |
440 | ||
441 | if (likely(!pfnmap)) { | |
442 | unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); | |
443 | pfn = gfn_to_pfn_memslot(slot, gfn); | |
444 | if (is_error_noslot_pfn(pfn)) { | |
445 | printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", | |
446 | (long)gfn); | |
447 | return -EINVAL; | |
448 | } | |
449 | ||
450 | /* Align guest and physical address to page map boundaries */ | |
451 | pfn &= ~(tsize_pages - 1); | |
452 | gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); | |
453 | } | |
454 | ||
b71c9e2f AG |
455 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); |
456 | ||
457 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, | |
458 | ref, gvaddr, stlbe); | |
459 | ||
460 | /* Clear i-cache for new pages */ | |
461 | kvmppc_mmu_flush_icache(pfn); | |
462 | ||
463 | /* Drop refcount on page, so that mmu notifiers can clear it */ | |
464 | kvm_release_pfn_clean(pfn); | |
465 | ||
466 | return 0; | |
467 | } | |
468 | ||
469 | /* XXX only map the one-one case, for now use TLB0 */ | |
470 | static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel, | |
471 | struct kvm_book3e_206_tlb_entry *stlbe) | |
472 | { | |
473 | struct kvm_book3e_206_tlb_entry *gtlbe; | |
474 | struct tlbe_ref *ref; | |
475 | int stlbsel = 0; | |
476 | int sesel = 0; | |
477 | int r; | |
478 | ||
479 | gtlbe = get_entry(vcpu_e500, 0, esel); | |
480 | ref = &vcpu_e500->gtlb_priv[0][esel].ref; | |
481 | ||
482 | r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), | |
483 | get_tlb_raddr(gtlbe) >> PAGE_SHIFT, | |
484 | gtlbe, 0, stlbe, ref); | |
485 | if (r) | |
486 | return r; | |
487 | ||
488 | write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel); | |
489 | ||
490 | return 0; | |
491 | } | |
492 | ||
c015c62b AG |
493 | static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500, |
494 | struct tlbe_ref *ref, | |
495 | int esel) | |
496 | { | |
497 | unsigned int sesel = vcpu_e500->host_tlb1_nv++; | |
498 | ||
499 | if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size())) | |
500 | vcpu_e500->host_tlb1_nv = 0; | |
501 | ||
c015c62b | 502 | if (vcpu_e500->h2g_tlb1_rmap[sesel]) { |
6b2ba1a9 | 503 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1; |
c015c62b AG |
504 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel); |
505 | } | |
66a5fecd | 506 | |
66a5fecd SW |
507 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; |
508 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel; | |
6b2ba1a9 | 509 | vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1; |
4d2be6f7 | 510 | WARN_ON(!(ref->flags & E500_TLB_VALID)); |
c015c62b AG |
511 | |
512 | return sesel; | |
513 | } | |
514 | ||
b71c9e2f AG |
515 | /* Caller must ensure that the specified guest TLB entry is safe to insert into |
516 | * the shadow TLB. */ | |
c015c62b | 517 | /* For both one-one and one-to-many */ |
b71c9e2f AG |
518 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
519 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | |
520 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) | |
521 | { | |
4d2be6f7 | 522 | struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref; |
c015c62b | 523 | int sesel; |
b71c9e2f | 524 | int r; |
b71c9e2f | 525 | |
b71c9e2f | 526 | r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, |
4d2be6f7 | 527 | ref); |
b71c9e2f AG |
528 | if (r) |
529 | return r; | |
530 | ||
c015c62b AG |
531 | /* Use TLB0 when we can only map a page with 4k */ |
532 | if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) { | |
533 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0; | |
534 | write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0); | |
535 | return 0; | |
b71c9e2f | 536 | } |
b71c9e2f | 537 | |
c015c62b | 538 | /* Otherwise map into TLB1 */ |
4d2be6f7 | 539 | sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel); |
c015c62b | 540 | write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel); |
b71c9e2f AG |
541 | |
542 | return 0; | |
543 | } | |
544 | ||
545 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |
546 | unsigned int index) | |
547 | { | |
548 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | |
549 | struct tlbe_priv *priv; | |
550 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; | |
551 | int tlbsel = tlbsel_of(index); | |
552 | int esel = esel_of(index); | |
553 | ||
554 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); | |
555 | ||
556 | switch (tlbsel) { | |
557 | case 0: | |
558 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | |
559 | ||
4d2be6f7 | 560 | /* Triggers after clear_tlb_privs or on initial mapping */ |
b71c9e2f AG |
561 | if (!(priv->ref.flags & E500_TLB_VALID)) { |
562 | kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); | |
563 | } else { | |
564 | kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, | |
565 | &priv->ref, eaddr, &stlbe); | |
566 | write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0); | |
567 | } | |
568 | break; | |
569 | ||
570 | case 1: { | |
571 | gfn_t gfn = gpaddr >> PAGE_SHIFT; | |
572 | kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, | |
573 | esel); | |
574 | break; | |
575 | } | |
576 | ||
577 | default: | |
578 | BUG(); | |
579 | break; | |
580 | } | |
581 | } | |
582 | ||
583 | /************* MMU Notifiers *************/ | |
584 | ||
585 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
586 | { | |
587 | trace_kvm_unmap_hva(hva); | |
588 | ||
589 | /* | |
590 | * Flush all shadow tlb entries everywhere. This is slow, but | |
591 | * we are 100% sure that we catch the to be unmapped page | |
592 | */ | |
593 | kvm_flush_remote_tlbs(kvm); | |
594 | ||
595 | return 0; | |
596 | } | |
597 | ||
598 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | |
599 | { | |
600 | /* kvm_unmap_hva flushes everything anyways */ | |
601 | kvm_unmap_hva(kvm, start); | |
602 | ||
603 | return 0; | |
604 | } | |
605 | ||
606 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | |
607 | { | |
608 | /* XXX could be more clever ;) */ | |
609 | return 0; | |
610 | } | |
611 | ||
612 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
613 | { | |
614 | /* XXX could be more clever ;) */ | |
615 | return 0; | |
616 | } | |
617 | ||
618 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
619 | { | |
620 | /* The page will get remapped properly on its next fault */ | |
621 | kvm_unmap_hva(kvm, hva); | |
622 | } | |
623 | ||
624 | /*****************************************/ | |
625 | ||
626 | int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |
627 | { | |
628 | host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY; | |
629 | host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; | |
630 | ||
631 | /* | |
632 | * This should never happen on real e500 hardware, but is | |
633 | * architecturally possible -- e.g. in some weird nested | |
634 | * virtualization case. | |
635 | */ | |
636 | if (host_tlb_params[0].entries == 0 || | |
637 | host_tlb_params[1].entries == 0) { | |
638 | pr_err("%s: need to know host tlb size\n", __func__); | |
639 | return -ENODEV; | |
640 | } | |
641 | ||
642 | host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >> | |
643 | TLBnCFG_ASSOC_SHIFT; | |
644 | host_tlb_params[1].ways = host_tlb_params[1].entries; | |
645 | ||
646 | if (!is_power_of_2(host_tlb_params[0].entries) || | |
647 | !is_power_of_2(host_tlb_params[0].ways) || | |
648 | host_tlb_params[0].entries < host_tlb_params[0].ways || | |
649 | host_tlb_params[0].ways == 0) { | |
650 | pr_err("%s: bad tlb0 host config: %u entries %u ways\n", | |
651 | __func__, host_tlb_params[0].entries, | |
652 | host_tlb_params[0].ways); | |
653 | return -ENODEV; | |
654 | } | |
655 | ||
656 | host_tlb_params[0].sets = | |
657 | host_tlb_params[0].entries / host_tlb_params[0].ways; | |
658 | host_tlb_params[1].sets = 1; | |
659 | ||
b71c9e2f AG |
660 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * |
661 | host_tlb_params[1].entries, | |
662 | GFP_KERNEL); | |
663 | if (!vcpu_e500->h2g_tlb1_rmap) | |
4d2be6f7 | 664 | return -EINVAL; |
b71c9e2f AG |
665 | |
666 | return 0; | |
b71c9e2f AG |
667 | } |
668 | ||
669 | void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | |
670 | { | |
671 | kfree(vcpu_e500->h2g_tlb1_rmap); | |
b71c9e2f | 672 | } |