Merge tag 'sunxi-dt-for-3.17-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / powerpc / kvm / book3s_32_mmu_host.c
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21 #include <linux/kvm_host.h>
22
23 #include <asm/kvm_ppc.h>
24 #include <asm/kvm_book3s.h>
25 #include <asm/mmu-hash32.h>
26 #include <asm/machdep.h>
27 #include <asm/mmu_context.h>
28 #include <asm/hw_irq.h>
29
30 /* #define DEBUG_MMU */
31 /* #define DEBUG_SR */
32
33 #ifdef DEBUG_MMU
34 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
35 #else
36 #define dprintk_mmu(a, ...) do { } while(0)
37 #endif
38
39 #ifdef DEBUG_SR
40 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
41 #else
42 #define dprintk_sr(a, ...) do { } while(0)
43 #endif
44
45 #if PAGE_SHIFT != 12
46 #error Unknown page size
47 #endif
48
49 #ifdef CONFIG_SMP
50 #error XXX need to grab mmu_hash_lock
51 #endif
52
53 #ifdef CONFIG_PTE_64BIT
54 #error Only 32 bit pages are supported for now
55 #endif
56
57 static ulong htab;
58 static u32 htabmask;
59
60 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
61 {
62 volatile u32 *pteg;
63
64 /* Remove from host HTAB */
65 pteg = (u32*)pte->slot;
66 pteg[0] = 0;
67
68 /* And make sure it's gone from the TLB too */
69 asm volatile ("sync");
70 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
71 asm volatile ("sync");
72 asm volatile ("tlbsync");
73 }
74
75 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
76 * a hash, so we don't waste cycles on looping */
77 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
78 {
79 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
80 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
81 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
82 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
83 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
84 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
85 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
86 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
87 }
88
89
90 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
91 {
92 struct kvmppc_sid_map *map;
93 u16 sid_map_mask;
94
95 if (kvmppc_get_msr(vcpu) & MSR_PR)
96 gvsid |= VSID_PR;
97
98 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
99 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
100 if (map->guest_vsid == gvsid) {
101 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
102 gvsid, map->host_vsid);
103 return map;
104 }
105
106 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
107 if (map->guest_vsid == gvsid) {
108 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
109 gvsid, map->host_vsid);
110 return map;
111 }
112
113 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
114 return NULL;
115 }
116
117 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
118 bool primary)
119 {
120 u32 page, hash;
121 ulong pteg = htab;
122
123 page = (eaddr & ~ESID_MASK) >> 12;
124
125 hash = ((vsid ^ page) << 6);
126 if (!primary)
127 hash = ~hash;
128
129 hash &= htabmask;
130
131 pteg |= hash;
132
133 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
134 htab, hash, htabmask, pteg);
135
136 return (u32*)pteg;
137 }
138
139 extern char etext[];
140
141 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
142 bool iswrite)
143 {
144 pfn_t hpaddr;
145 u64 vpn;
146 u64 vsid;
147 struct kvmppc_sid_map *map;
148 volatile u32 *pteg;
149 u32 eaddr = orig_pte->eaddr;
150 u32 pteg0, pteg1;
151 register int rr = 0;
152 bool primary = false;
153 bool evict = false;
154 struct hpte_cache *pte;
155 int r = 0;
156 bool writable;
157
158 /* Get host physical address for gpa */
159 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
160 if (is_error_noslot_pfn(hpaddr)) {
161 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
162 orig_pte->raddr);
163 r = -EINVAL;
164 goto out;
165 }
166 hpaddr <<= PAGE_SHIFT;
167
168 /* and write the mapping ea -> hpa into the pt */
169 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
170 map = find_sid_vsid(vcpu, vsid);
171 if (!map) {
172 kvmppc_mmu_map_segment(vcpu, eaddr);
173 map = find_sid_vsid(vcpu, vsid);
174 }
175 BUG_ON(!map);
176
177 vsid = map->host_vsid;
178 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
179 ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
180 next_pteg:
181 if (rr == 16) {
182 primary = !primary;
183 evict = true;
184 rr = 0;
185 }
186
187 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
188
189 /* not evicting yet */
190 if (!evict && (pteg[rr] & PTE_V)) {
191 rr += 2;
192 goto next_pteg;
193 }
194
195 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
196 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
197 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
198 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
199 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
200 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
201 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
202 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
203 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
204
205 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
206 (primary ? 0 : PTE_SEC);
207 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
208
209 if (orig_pte->may_write && writable) {
210 pteg1 |= PP_RWRW;
211 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
212 } else {
213 pteg1 |= PP_RWRX;
214 }
215
216 if (orig_pte->may_execute)
217 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
218
219 local_irq_disable();
220
221 if (pteg[rr]) {
222 pteg[rr] = 0;
223 asm volatile ("sync");
224 }
225 pteg[rr + 1] = pteg1;
226 pteg[rr] = pteg0;
227 asm volatile ("sync");
228
229 local_irq_enable();
230
231 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
232 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
233 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
234 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
235 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
236 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
237 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
238 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
239 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
240
241
242 /* Now tell our Shadow PTE code about the new page */
243
244 pte = kvmppc_mmu_hpte_cache_next(vcpu);
245 if (!pte) {
246 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
247 r = -EAGAIN;
248 goto out;
249 }
250
251 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
252 orig_pte->may_write ? 'w' : '-',
253 orig_pte->may_execute ? 'x' : '-',
254 orig_pte->eaddr, (ulong)pteg, vpn,
255 orig_pte->vpage, hpaddr);
256
257 pte->slot = (ulong)&pteg[rr];
258 pte->host_vpn = vpn;
259 pte->pte = *orig_pte;
260 pte->pfn = hpaddr >> PAGE_SHIFT;
261
262 kvmppc_mmu_hpte_cache_map(vcpu, pte);
263
264 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
265 out:
266 return r;
267 }
268
269 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
270 {
271 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
272 }
273
274 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
275 {
276 struct kvmppc_sid_map *map;
277 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
278 u16 sid_map_mask;
279 static int backwards_map = 0;
280
281 if (kvmppc_get_msr(vcpu) & MSR_PR)
282 gvsid |= VSID_PR;
283
284 /* We might get collisions that trap in preceding order, so let's
285 map them differently */
286
287 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
288 if (backwards_map)
289 sid_map_mask = SID_MAP_MASK - sid_map_mask;
290
291 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
292
293 /* Make sure we're taking the other map next time */
294 backwards_map = !backwards_map;
295
296 /* Uh-oh ... out of mappings. Let's flush! */
297 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
298 vcpu_book3s->vsid_next = 0;
299 memset(vcpu_book3s->sid_map, 0,
300 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
301 kvmppc_mmu_pte_flush(vcpu, 0, 0);
302 kvmppc_mmu_flush_segments(vcpu);
303 }
304 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
305 vcpu_book3s->vsid_next++;
306
307 map->guest_vsid = gvsid;
308 map->valid = true;
309
310 return map;
311 }
312
313 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
314 {
315 u32 esid = eaddr >> SID_SHIFT;
316 u64 gvsid;
317 u32 sr;
318 struct kvmppc_sid_map *map;
319 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
320 int r = 0;
321
322 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
323 /* Invalidate an entry */
324 svcpu->sr[esid] = SR_INVALID;
325 r = -ENOENT;
326 goto out;
327 }
328
329 map = find_sid_vsid(vcpu, gvsid);
330 if (!map)
331 map = create_sid_map(vcpu, gvsid);
332
333 map->guest_esid = esid;
334 sr = map->host_vsid | SR_KP;
335 svcpu->sr[esid] = sr;
336
337 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
338
339 out:
340 svcpu_put(svcpu);
341 return r;
342 }
343
344 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
345 {
346 int i;
347 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
348
349 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
350 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
351 svcpu->sr[i] = SR_INVALID;
352
353 svcpu_put(svcpu);
354 }
355
356 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
357 {
358 int i;
359
360 kvmppc_mmu_hpte_destroy(vcpu);
361 preempt_disable();
362 for (i = 0; i < SID_CONTEXTS; i++)
363 __destroy_context(to_book3s(vcpu)->context_id[i]);
364 preempt_enable();
365 }
366
367 /* From mm/mmu_context_hash32.c */
368 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
369
370 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
371 {
372 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
373 int err;
374 ulong sdr1;
375 int i;
376 int j;
377
378 for (i = 0; i < SID_CONTEXTS; i++) {
379 err = __init_new_context();
380 if (err < 0)
381 goto init_fail;
382 vcpu3s->context_id[i] = err;
383
384 /* Remember context id for this combination */
385 for (j = 0; j < 16; j++)
386 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
387 }
388
389 vcpu3s->vsid_next = 0;
390
391 /* Remember where the HTAB is */
392 asm ( "mfsdr1 %0" : "=r"(sdr1) );
393 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
394 htab = (ulong)__va(sdr1 & 0xffff0000);
395
396 kvmppc_mmu_hpte_init(vcpu);
397
398 return 0;
399
400 init_fail:
401 for (j = 0; j < i; j++) {
402 if (!vcpu3s->context_id[j])
403 continue;
404
405 __destroy_context(to_book3s(vcpu)->context_id[j]);
406 }
407
408 return -1;
409 }
This page took 0.058569 seconds and 6 git commands to generate.