Commit | Line | Data |
---|---|---|
f05ed4d5 PM |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * Paul Mackerras <paulus@samba.org> | |
8 | * | |
9 | * Description: | |
10 | * Functions relating to running KVM on Book 3S processors where | |
11 | * we don't have access to hypervisor mode, and we run the guest | |
12 | * in problem state (user mode). | |
13 | * | |
14 | * This file is derived from arch/powerpc/kvm/44x.c, | |
15 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or modify | |
18 | * it under the terms of the GNU General Public License, version 2, as | |
19 | * published by the Free Software Foundation. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
93087948 | 23 | #include <linux/export.h> |
f05ed4d5 PM |
24 | #include <linux/err.h> |
25 | #include <linux/slab.h> | |
26 | ||
27 | #include <asm/reg.h> | |
28 | #include <asm/cputable.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | #include <asm/kvm_ppc.h> | |
34 | #include <asm/kvm_book3s.h> | |
35 | #include <asm/mmu_context.h> | |
95327d08 | 36 | #include <asm/switch_to.h> |
a413f474 | 37 | #include <asm/firmware.h> |
deb26c27 | 38 | #include <asm/hvcall.h> |
f05ed4d5 PM |
39 | #include <linux/gfp.h> |
40 | #include <linux/sched.h> | |
41 | #include <linux/vmalloc.h> | |
42 | #include <linux/highmem.h> | |
43 | ||
44 | #include "trace.h" | |
45 | ||
46 | /* #define EXIT_DEBUG */ | |
47 | /* #define DEBUG_EXT */ | |
48 | ||
49 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
50 | ulong msr); | |
51 | ||
52 | /* Some compatibility defines */ | |
53 | #ifdef CONFIG_PPC_BOOK3S_32 | |
54 | #define MSR_USER32 MSR_USER | |
55 | #define MSR_USER64 MSR_USER | |
56 | #define HW_PAGE_SIZE PAGE_SIZE | |
57 | #endif | |
58 | ||
59 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
60 | { | |
61 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
62 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
63 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | |
468a12c2 AG |
64 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
65 | svcpu_put(svcpu); | |
f05ed4d5 | 66 | #endif |
a47d72f3 | 67 | vcpu->cpu = smp_processor_id(); |
f05ed4d5 | 68 | #ifdef CONFIG_PPC_BOOK3S_32 |
3ff95502 | 69 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
f05ed4d5 PM |
70 | #endif |
71 | } | |
72 | ||
73 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
74 | { | |
75 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
76 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
77 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | |
468a12c2 AG |
78 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
79 | svcpu_put(svcpu); | |
f05ed4d5 PM |
80 | #endif |
81 | ||
28c483b6 | 82 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
a47d72f3 | 83 | vcpu->cpu = -1; |
f05ed4d5 PM |
84 | } |
85 | ||
a2d56020 PM |
86 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
87 | void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |
88 | struct kvm_vcpu *vcpu) | |
89 | { | |
90 | svcpu->gpr[0] = vcpu->arch.gpr[0]; | |
91 | svcpu->gpr[1] = vcpu->arch.gpr[1]; | |
92 | svcpu->gpr[2] = vcpu->arch.gpr[2]; | |
93 | svcpu->gpr[3] = vcpu->arch.gpr[3]; | |
94 | svcpu->gpr[4] = vcpu->arch.gpr[4]; | |
95 | svcpu->gpr[5] = vcpu->arch.gpr[5]; | |
96 | svcpu->gpr[6] = vcpu->arch.gpr[6]; | |
97 | svcpu->gpr[7] = vcpu->arch.gpr[7]; | |
98 | svcpu->gpr[8] = vcpu->arch.gpr[8]; | |
99 | svcpu->gpr[9] = vcpu->arch.gpr[9]; | |
100 | svcpu->gpr[10] = vcpu->arch.gpr[10]; | |
101 | svcpu->gpr[11] = vcpu->arch.gpr[11]; | |
102 | svcpu->gpr[12] = vcpu->arch.gpr[12]; | |
103 | svcpu->gpr[13] = vcpu->arch.gpr[13]; | |
104 | svcpu->cr = vcpu->arch.cr; | |
105 | svcpu->xer = vcpu->arch.xer; | |
106 | svcpu->ctr = vcpu->arch.ctr; | |
107 | svcpu->lr = vcpu->arch.lr; | |
108 | svcpu->pc = vcpu->arch.pc; | |
109 | } | |
110 | ||
111 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | |
112 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |
113 | struct kvmppc_book3s_shadow_vcpu *svcpu) | |
114 | { | |
115 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | |
116 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | |
117 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | |
118 | vcpu->arch.gpr[3] = svcpu->gpr[3]; | |
119 | vcpu->arch.gpr[4] = svcpu->gpr[4]; | |
120 | vcpu->arch.gpr[5] = svcpu->gpr[5]; | |
121 | vcpu->arch.gpr[6] = svcpu->gpr[6]; | |
122 | vcpu->arch.gpr[7] = svcpu->gpr[7]; | |
123 | vcpu->arch.gpr[8] = svcpu->gpr[8]; | |
124 | vcpu->arch.gpr[9] = svcpu->gpr[9]; | |
125 | vcpu->arch.gpr[10] = svcpu->gpr[10]; | |
126 | vcpu->arch.gpr[11] = svcpu->gpr[11]; | |
127 | vcpu->arch.gpr[12] = svcpu->gpr[12]; | |
128 | vcpu->arch.gpr[13] = svcpu->gpr[13]; | |
129 | vcpu->arch.cr = svcpu->cr; | |
130 | vcpu->arch.xer = svcpu->xer; | |
131 | vcpu->arch.ctr = svcpu->ctr; | |
132 | vcpu->arch.lr = svcpu->lr; | |
133 | vcpu->arch.pc = svcpu->pc; | |
134 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; | |
135 | vcpu->arch.fault_dar = svcpu->fault_dar; | |
136 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | |
137 | vcpu->arch.last_inst = svcpu->last_inst; | |
138 | } | |
139 | ||
7c973a2e | 140 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) |
03d25c5b | 141 | { |
7c973a2e AG |
142 | int r = 1; /* Indicate we want to get back into the guest */ |
143 | ||
9b0cb3c8 AG |
144 | /* We misuse TLB_FLUSH to indicate that we want to clear |
145 | all shadow cache entries */ | |
146 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) | |
147 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
7c973a2e AG |
148 | |
149 | return r; | |
03d25c5b AG |
150 | } |
151 | ||
9b0cb3c8 | 152 | /************* MMU Notifiers *************/ |
491d6ecc PM |
153 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, |
154 | unsigned long end) | |
155 | { | |
156 | long i; | |
157 | struct kvm_vcpu *vcpu; | |
158 | struct kvm_memslots *slots; | |
159 | struct kvm_memory_slot *memslot; | |
160 | ||
161 | slots = kvm_memslots(kvm); | |
162 | kvm_for_each_memslot(memslot, slots) { | |
163 | unsigned long hva_start, hva_end; | |
164 | gfn_t gfn, gfn_end; | |
165 | ||
166 | hva_start = max(start, memslot->userspace_addr); | |
167 | hva_end = min(end, memslot->userspace_addr + | |
168 | (memslot->npages << PAGE_SHIFT)); | |
169 | if (hva_start >= hva_end) | |
170 | continue; | |
171 | /* | |
172 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
173 | * {gfn, gfn+1, ..., gfn_end-1}. | |
174 | */ | |
175 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
176 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
177 | kvm_for_each_vcpu(i, vcpu, kvm) | |
178 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | |
179 | gfn_end << PAGE_SHIFT); | |
180 | } | |
181 | } | |
9b0cb3c8 AG |
182 | |
183 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
184 | { | |
185 | trace_kvm_unmap_hva(hva); | |
186 | ||
491d6ecc | 187 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
188 | |
189 | return 0; | |
190 | } | |
191 | ||
192 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | |
193 | { | |
491d6ecc | 194 | do_kvm_unmap_hva(kvm, start, end); |
9b0cb3c8 AG |
195 | |
196 | return 0; | |
197 | } | |
198 | ||
199 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | |
200 | { | |
201 | /* XXX could be more clever ;) */ | |
202 | return 0; | |
203 | } | |
204 | ||
205 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
206 | { | |
207 | /* XXX could be more clever ;) */ | |
208 | return 0; | |
209 | } | |
210 | ||
211 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
212 | { | |
213 | /* The page will get remapped properly on its next fault */ | |
491d6ecc | 214 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
215 | } |
216 | ||
217 | /*****************************************/ | |
218 | ||
f05ed4d5 PM |
219 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
220 | { | |
221 | ulong smsr = vcpu->arch.shared->msr; | |
222 | ||
223 | /* Guest MSR values */ | |
3a2e7b0d | 224 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE; |
f05ed4d5 PM |
225 | /* Process MSR values */ |
226 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
227 | /* External providers the guest reserved */ | |
228 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); | |
229 | /* 64-bit Process MSR values */ | |
230 | #ifdef CONFIG_PPC_BOOK3S_64 | |
231 | smsr |= MSR_ISF | MSR_HV; | |
232 | #endif | |
233 | vcpu->arch.shadow_msr = smsr; | |
234 | } | |
235 | ||
236 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |
237 | { | |
238 | ulong old_msr = vcpu->arch.shared->msr; | |
239 | ||
240 | #ifdef EXIT_DEBUG | |
241 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
242 | #endif | |
243 | ||
244 | msr &= to_book3s(vcpu)->msr_mask; | |
245 | vcpu->arch.shared->msr = msr; | |
246 | kvmppc_recalc_shadow_msr(vcpu); | |
247 | ||
248 | if (msr & MSR_POW) { | |
249 | if (!vcpu->arch.pending_exceptions) { | |
250 | kvm_vcpu_block(vcpu); | |
966cd0f3 | 251 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
f05ed4d5 PM |
252 | vcpu->stat.halt_wakeup++; |
253 | ||
254 | /* Unset POW bit after we woke up */ | |
255 | msr &= ~MSR_POW; | |
256 | vcpu->arch.shared->msr = msr; | |
257 | } | |
258 | } | |
259 | ||
260 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != | |
261 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | |
262 | kvmppc_mmu_flush_segments(vcpu); | |
263 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
264 | ||
265 | /* Preload magic page segment when in kernel mode */ | |
266 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
267 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
268 | ||
269 | if (msr & MSR_DR) | |
270 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
271 | else | |
272 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
273 | } | |
274 | } | |
275 | ||
bbcc9c06 BH |
276 | /* |
277 | * When switching from 32 to 64-bit, we may have a stale 32-bit | |
278 | * magic page around, we need to flush it. Typically 32-bit magic | |
279 | * page will be instanciated when calling into RTAS. Note: We | |
280 | * assume that such transition only happens while in kernel mode, | |
281 | * ie, we never transition from user 32-bit to kernel 64-bit with | |
282 | * a 32-bit magic page around. | |
283 | */ | |
284 | if (vcpu->arch.magic_page_pa && | |
285 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | |
286 | /* going from RTAS to normal kernel code */ | |
287 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | |
288 | ~0xFFFUL); | |
289 | } | |
290 | ||
f05ed4d5 PM |
291 | /* Preload FPU if it's enabled */ |
292 | if (vcpu->arch.shared->msr & MSR_FP) | |
293 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
294 | } | |
295 | ||
296 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
297 | { | |
298 | u32 host_pvr; | |
299 | ||
300 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
301 | vcpu->arch.pvr = pvr; | |
302 | #ifdef CONFIG_PPC_BOOK3S_64 | |
303 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
304 | kvmppc_mmu_book3s_64_init(vcpu); | |
1022fc3d AG |
305 | if (!to_book3s(vcpu)->hior_explicit) |
306 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 | 307 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
af8f38b3 | 308 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
f05ed4d5 PM |
309 | } else |
310 | #endif | |
311 | { | |
312 | kvmppc_mmu_book3s_32_init(vcpu); | |
1022fc3d AG |
313 | if (!to_book3s(vcpu)->hior_explicit) |
314 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 | 315 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
af8f38b3 | 316 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
f05ed4d5 PM |
317 | } |
318 | ||
af8f38b3 AG |
319 | kvmppc_sanity_check(vcpu); |
320 | ||
f05ed4d5 PM |
321 | /* If we are in hypervisor level on 970, we can tell the CPU to |
322 | * treat DCBZ as 32 bytes store */ | |
323 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
324 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
325 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
326 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
327 | ||
328 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
329 | really needs them in a VM on Cell and force disable them. */ | |
330 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
331 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
332 | ||
a4a0f252 PM |
333 | /* |
334 | * If they're asking for POWER6 or later, set the flag | |
335 | * indicating that we can do multiple large page sizes | |
336 | * and 1TB segments. | |
337 | * Also set the flag that indicates that tlbie has the large | |
338 | * page bit in the RB operand instead of the instruction. | |
339 | */ | |
340 | switch (PVR_VER(pvr)) { | |
341 | case PVR_POWER6: | |
342 | case PVR_POWER7: | |
343 | case PVR_POWER7p: | |
344 | case PVR_POWER8: | |
345 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | | |
346 | BOOK3S_HFLAG_NEW_TLBIE; | |
347 | break; | |
348 | } | |
349 | ||
f05ed4d5 PM |
350 | #ifdef CONFIG_PPC_BOOK3S_32 |
351 | /* 32 bit Book3S always has 32 byte dcbz */ | |
352 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
353 | #endif | |
354 | ||
355 | /* On some CPUs we can execute paired single operations natively */ | |
356 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
357 | switch (host_pvr) { | |
358 | case 0x00080200: /* lonestar 2.0 */ | |
359 | case 0x00088202: /* lonestar 2.2 */ | |
360 | case 0x70000100: /* gekko 1.0 */ | |
361 | case 0x00080100: /* gekko 2.0 */ | |
362 | case 0x00083203: /* gekko 2.3a */ | |
363 | case 0x00083213: /* gekko 2.3b */ | |
364 | case 0x00083204: /* gekko 2.4 */ | |
365 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
366 | case 0x00087200: /* broadway */ | |
367 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
368 | /* Enable HID2.PSE - in case we need it later */ | |
369 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
370 | } | |
371 | } | |
372 | ||
373 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
374 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
375 | * emulate 32 bytes dcbz length. | |
376 | * | |
377 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
378 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
379 | * | |
380 | * My approach here is to patch the dcbz instruction on executing pages. | |
381 | */ | |
382 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
383 | { | |
384 | struct page *hpage; | |
385 | u64 hpage_offset; | |
386 | u32 *page; | |
387 | int i; | |
388 | ||
389 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
32cad84f | 390 | if (is_error_page(hpage)) |
f05ed4d5 | 391 | return; |
f05ed4d5 PM |
392 | |
393 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
394 | hpage_offset &= ~0xFFFULL; | |
395 | hpage_offset /= 4; | |
396 | ||
397 | get_page(hpage); | |
2480b208 | 398 | page = kmap_atomic(hpage); |
f05ed4d5 PM |
399 | |
400 | /* patch dcbz into reserved instruction, so we trap */ | |
401 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
402 | if ((page[i] & 0xff0007ff) == INS_DCBZ) | |
403 | page[i] &= 0xfffffff7; | |
404 | ||
2480b208 | 405 | kunmap_atomic(page); |
f05ed4d5 PM |
406 | put_page(hpage); |
407 | } | |
408 | ||
409 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |
410 | { | |
411 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
412 | ||
bbcc9c06 BH |
413 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
414 | mp_pa = (uint32_t)mp_pa; | |
415 | ||
f05ed4d5 PM |
416 | if (unlikely(mp_pa) && |
417 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | |
418 | return 1; | |
419 | } | |
420 | ||
421 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | |
422 | } | |
423 | ||
424 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
425 | ulong eaddr, int vec) | |
426 | { | |
427 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
93b159b4 | 428 | bool iswrite = false; |
f05ed4d5 PM |
429 | int r = RESUME_GUEST; |
430 | int relocated; | |
431 | int page_found = 0; | |
432 | struct kvmppc_pte pte; | |
433 | bool is_mmio = false; | |
434 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; | |
435 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; | |
436 | u64 vsid; | |
437 | ||
438 | relocated = data ? dr : ir; | |
93b159b4 PM |
439 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) |
440 | iswrite = true; | |
f05ed4d5 PM |
441 | |
442 | /* Resolve real address if translation turned on */ | |
443 | if (relocated) { | |
93b159b4 | 444 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
f05ed4d5 PM |
445 | } else { |
446 | pte.may_execute = true; | |
447 | pte.may_read = true; | |
448 | pte.may_write = true; | |
449 | pte.raddr = eaddr & KVM_PAM; | |
450 | pte.eaddr = eaddr; | |
451 | pte.vpage = eaddr >> 12; | |
c9029c34 | 452 | pte.page_size = MMU_PAGE_64K; |
f05ed4d5 PM |
453 | } |
454 | ||
455 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | |
456 | case 0: | |
457 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
458 | break; | |
459 | case MSR_DR: | |
460 | case MSR_IR: | |
461 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
462 | ||
463 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) | |
464 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | |
465 | else | |
466 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
467 | pte.vpage |= vsid; | |
468 | ||
469 | if (vsid == -1) | |
470 | page_found = -EINVAL; | |
471 | break; | |
472 | } | |
473 | ||
474 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
475 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
476 | /* | |
477 | * If we do the dcbz hack, we have to NX on every execution, | |
478 | * so we can patch the executing code. This renders our guest | |
479 | * NX-less. | |
480 | */ | |
481 | pte.may_execute = !data; | |
482 | } | |
483 | ||
484 | if (page_found == -ENOENT) { | |
485 | /* Page not found in guest PTE entries */ | |
486 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
a2d56020 | 487 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr; |
f05ed4d5 | 488 | vcpu->arch.shared->msr |= |
a2d56020 | 489 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; |
f05ed4d5 PM |
490 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
491 | } else if (page_found == -EPERM) { | |
492 | /* Storage protection */ | |
493 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
a2d56020 | 494 | vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; |
f05ed4d5 PM |
495 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
496 | vcpu->arch.shared->msr |= | |
a2d56020 | 497 | vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL; |
f05ed4d5 PM |
498 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
499 | } else if (page_found == -EINVAL) { | |
500 | /* Page not found in guest SLB */ | |
501 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
502 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | |
503 | } else if (!is_mmio && | |
504 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | |
93b159b4 PM |
505 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { |
506 | /* | |
507 | * There is already a host HPTE there, presumably | |
508 | * a read-only one for a page the guest thinks | |
509 | * is writable, so get rid of it first. | |
510 | */ | |
511 | kvmppc_mmu_unmap_page(vcpu, &pte); | |
512 | } | |
f05ed4d5 | 513 | /* The guest's PTE is not mapped yet. Map on the host */ |
93b159b4 | 514 | kvmppc_mmu_map_page(vcpu, &pte, iswrite); |
f05ed4d5 PM |
515 | if (data) |
516 | vcpu->stat.sp_storage++; | |
517 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
93b159b4 | 518 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
f05ed4d5 PM |
519 | kvmppc_patch_dcbz(vcpu, &pte); |
520 | } else { | |
521 | /* MMIO */ | |
522 | vcpu->stat.mmio_exits++; | |
523 | vcpu->arch.paddr_accessed = pte.raddr; | |
6020c0f6 | 524 | vcpu->arch.vaddr_accessed = pte.eaddr; |
f05ed4d5 PM |
525 | r = kvmppc_emulate_mmio(run, vcpu); |
526 | if ( r == RESUME_HOST_NV ) | |
527 | r = RESUME_HOST; | |
528 | } | |
529 | ||
530 | return r; | |
531 | } | |
532 | ||
533 | static inline int get_fpr_index(int i) | |
534 | { | |
28c483b6 | 535 | return i * TS_FPRWIDTH; |
f05ed4d5 PM |
536 | } |
537 | ||
538 | /* Give up external provider (FPU, Altivec, VSX) */ | |
539 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
540 | { | |
541 | struct thread_struct *t = ¤t->thread; | |
542 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
543 | #ifdef CONFIG_VSX | |
544 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
545 | #endif | |
546 | u64 *thread_fpr = (u64*)t->fpr; | |
547 | int i; | |
548 | ||
28c483b6 PM |
549 | /* |
550 | * VSX instructions can access FP and vector registers, so if | |
551 | * we are giving up VSX, make sure we give up FP and VMX as well. | |
552 | */ | |
553 | if (msr & MSR_VSX) | |
554 | msr |= MSR_FP | MSR_VEC; | |
555 | ||
556 | msr &= vcpu->arch.guest_owned_ext; | |
557 | if (!msr) | |
f05ed4d5 PM |
558 | return; |
559 | ||
560 | #ifdef DEBUG_EXT | |
561 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
562 | #endif | |
563 | ||
28c483b6 PM |
564 | if (msr & MSR_FP) { |
565 | /* | |
566 | * Note that on CPUs with VSX, giveup_fpu stores | |
567 | * both the traditional FP registers and the added VSX | |
568 | * registers into thread.fpr[]. | |
569 | */ | |
9d1ffdd8 PM |
570 | if (current->thread.regs->msr & MSR_FP) |
571 | giveup_fpu(current); | |
f05ed4d5 PM |
572 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) |
573 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | |
574 | ||
575 | vcpu->arch.fpscr = t->fpscr.val; | |
28c483b6 PM |
576 | |
577 | #ifdef CONFIG_VSX | |
578 | if (cpu_has_feature(CPU_FTR_VSX)) | |
579 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) | |
580 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | |
581 | #endif | |
582 | } | |
583 | ||
f05ed4d5 | 584 | #ifdef CONFIG_ALTIVEC |
28c483b6 | 585 | if (msr & MSR_VEC) { |
9d1ffdd8 PM |
586 | if (current->thread.regs->msr & MSR_VEC) |
587 | giveup_altivec(current); | |
f05ed4d5 PM |
588 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); |
589 | vcpu->arch.vscr = t->vscr; | |
f05ed4d5 | 590 | } |
28c483b6 | 591 | #endif |
f05ed4d5 | 592 | |
28c483b6 | 593 | vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); |
f05ed4d5 PM |
594 | kvmppc_recalc_shadow_msr(vcpu); |
595 | } | |
596 | ||
597 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |
598 | { | |
599 | ulong srr0 = kvmppc_get_pc(vcpu); | |
600 | u32 last_inst = kvmppc_get_last_inst(vcpu); | |
601 | int ret; | |
602 | ||
603 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | |
604 | if (ret == -ENOENT) { | |
605 | ulong msr = vcpu->arch.shared->msr; | |
606 | ||
607 | msr = kvmppc_set_field(msr, 33, 33, 1); | |
608 | msr = kvmppc_set_field(msr, 34, 36, 0); | |
609 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | |
610 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | |
611 | return EMULATE_AGAIN; | |
612 | } | |
613 | ||
614 | return EMULATE_DONE; | |
615 | } | |
616 | ||
617 | static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) | |
618 | { | |
619 | ||
620 | /* Need to do paired single emulation? */ | |
621 | if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) | |
622 | return EMULATE_DONE; | |
623 | ||
624 | /* Read out the instruction */ | |
625 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) | |
626 | /* Need to emulate */ | |
627 | return EMULATE_FAIL; | |
628 | ||
629 | return EMULATE_AGAIN; | |
630 | } | |
631 | ||
632 | /* Handle external providers (FPU, Altivec, VSX) */ | |
633 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
634 | ulong msr) | |
635 | { | |
636 | struct thread_struct *t = ¤t->thread; | |
637 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
638 | #ifdef CONFIG_VSX | |
639 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
640 | #endif | |
641 | u64 *thread_fpr = (u64*)t->fpr; | |
642 | int i; | |
643 | ||
644 | /* When we have paired singles, we emulate in software */ | |
645 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
646 | return RESUME_GUEST; | |
647 | ||
648 | if (!(vcpu->arch.shared->msr & msr)) { | |
649 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
650 | return RESUME_GUEST; | |
651 | } | |
652 | ||
28c483b6 PM |
653 | if (msr == MSR_VSX) { |
654 | /* No VSX? Give an illegal instruction interrupt */ | |
655 | #ifdef CONFIG_VSX | |
656 | if (!cpu_has_feature(CPU_FTR_VSX)) | |
657 | #endif | |
658 | { | |
659 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
660 | return RESUME_GUEST; | |
661 | } | |
662 | ||
663 | /* | |
664 | * We have to load up all the FP and VMX registers before | |
665 | * we can let the guest use VSX instructions. | |
666 | */ | |
667 | msr = MSR_FP | MSR_VEC | MSR_VSX; | |
f05ed4d5 PM |
668 | } |
669 | ||
28c483b6 PM |
670 | /* See if we already own all the ext(s) needed */ |
671 | msr &= ~vcpu->arch.guest_owned_ext; | |
672 | if (!msr) | |
673 | return RESUME_GUEST; | |
674 | ||
f05ed4d5 PM |
675 | #ifdef DEBUG_EXT |
676 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
677 | #endif | |
678 | ||
28c483b6 | 679 | if (msr & MSR_FP) { |
f05ed4d5 PM |
680 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) |
681 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | |
28c483b6 PM |
682 | #ifdef CONFIG_VSX |
683 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) | |
684 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | |
685 | #endif | |
f05ed4d5 PM |
686 | t->fpscr.val = vcpu->arch.fpscr; |
687 | t->fpexc_mode = 0; | |
688 | kvmppc_load_up_fpu(); | |
28c483b6 PM |
689 | } |
690 | ||
691 | if (msr & MSR_VEC) { | |
f05ed4d5 PM |
692 | #ifdef CONFIG_ALTIVEC |
693 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | |
694 | t->vscr = vcpu->arch.vscr; | |
695 | t->vrsave = -1; | |
696 | kvmppc_load_up_altivec(); | |
697 | #endif | |
f05ed4d5 PM |
698 | } |
699 | ||
9d1ffdd8 | 700 | current->thread.regs->msr |= msr; |
f05ed4d5 | 701 | vcpu->arch.guest_owned_ext |= msr; |
f05ed4d5 PM |
702 | kvmppc_recalc_shadow_msr(vcpu); |
703 | ||
704 | return RESUME_GUEST; | |
705 | } | |
706 | ||
9d1ffdd8 PM |
707 | /* |
708 | * Kernel code using FP or VMX could have flushed guest state to | |
709 | * the thread_struct; if so, get it back now. | |
710 | */ | |
711 | static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |
712 | { | |
713 | unsigned long lost_ext; | |
714 | ||
715 | lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; | |
716 | if (!lost_ext) | |
717 | return; | |
718 | ||
719 | if (lost_ext & MSR_FP) | |
720 | kvmppc_load_up_fpu(); | |
f2481771 | 721 | #ifdef CONFIG_ALTIVEC |
9d1ffdd8 PM |
722 | if (lost_ext & MSR_VEC) |
723 | kvmppc_load_up_altivec(); | |
f2481771 | 724 | #endif |
9d1ffdd8 PM |
725 | current->thread.regs->msr |= lost_ext; |
726 | } | |
727 | ||
f05ed4d5 PM |
728 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
729 | unsigned int exit_nr) | |
730 | { | |
731 | int r = RESUME_HOST; | |
7ee78855 | 732 | int s; |
f05ed4d5 PM |
733 | |
734 | vcpu->stat.sum_exits++; | |
735 | ||
736 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
737 | run->ready_for_interrupt_injection = 1; | |
738 | ||
bd2be683 | 739 | /* We get here with MSR.EE=1 */ |
3b1d9d7d | 740 | |
97c95059 | 741 | trace_kvm_exit(exit_nr, vcpu); |
706fb730 | 742 | kvm_guest_exit(); |
c63ddcb4 | 743 | |
f05ed4d5 PM |
744 | switch (exit_nr) { |
745 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
468a12c2 | 746 | { |
a2d56020 | 747 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
748 | vcpu->stat.pf_instruc++; |
749 | ||
750 | #ifdef CONFIG_PPC_BOOK3S_32 | |
751 | /* We set segments as unused segments when invalidating them. So | |
752 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
753 | { |
754 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
755 | u32 sr; | |
756 | ||
757 | svcpu = svcpu_get(vcpu); | |
758 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; | |
468a12c2 | 759 | svcpu_put(svcpu); |
a2d56020 PM |
760 | if (sr == SR_INVALID) { |
761 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
762 | r = RESUME_GUEST; | |
763 | break; | |
764 | } | |
f05ed4d5 PM |
765 | } |
766 | #endif | |
767 | ||
768 | /* only care about PTEG not found errors, but leave NX alone */ | |
468a12c2 | 769 | if (shadow_srr1 & 0x40000000) { |
93b159b4 | 770 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
f05ed4d5 | 771 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
93b159b4 | 772 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 PM |
773 | vcpu->stat.sp_instruc++; |
774 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
775 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
776 | /* | |
777 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
778 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
779 | * that no guest that needs the dcbz hack does NX. | |
780 | */ | |
781 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
782 | r = RESUME_GUEST; | |
783 | } else { | |
468a12c2 | 784 | vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; |
f05ed4d5 PM |
785 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
786 | r = RESUME_GUEST; | |
787 | } | |
788 | break; | |
468a12c2 | 789 | } |
f05ed4d5 PM |
790 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
791 | { | |
792 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
a2d56020 | 793 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
f05ed4d5 PM |
794 | vcpu->stat.pf_storage++; |
795 | ||
796 | #ifdef CONFIG_PPC_BOOK3S_32 | |
797 | /* We set segments as unused segments when invalidating them. So | |
798 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
799 | { |
800 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
801 | u32 sr; | |
802 | ||
803 | svcpu = svcpu_get(vcpu); | |
804 | sr = svcpu->sr[dar >> SID_SHIFT]; | |
468a12c2 | 805 | svcpu_put(svcpu); |
a2d56020 PM |
806 | if (sr == SR_INVALID) { |
807 | kvmppc_mmu_map_segment(vcpu, dar); | |
808 | r = RESUME_GUEST; | |
809 | break; | |
810 | } | |
f05ed4d5 PM |
811 | } |
812 | #endif | |
813 | ||
93b159b4 PM |
814 | /* |
815 | * We need to handle missing shadow PTEs, and | |
816 | * protection faults due to us mapping a page read-only | |
817 | * when the guest thinks it is writable. | |
818 | */ | |
819 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | |
820 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | |
f05ed4d5 | 821 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
93b159b4 | 822 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 PM |
823 | } else { |
824 | vcpu->arch.shared->dar = dar; | |
468a12c2 | 825 | vcpu->arch.shared->dsisr = fault_dsisr; |
f05ed4d5 PM |
826 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
827 | r = RESUME_GUEST; | |
828 | } | |
829 | break; | |
830 | } | |
831 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
832 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
833 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
834 | kvmppc_book3s_queue_irqprio(vcpu, | |
835 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
836 | } | |
837 | r = RESUME_GUEST; | |
838 | break; | |
839 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
840 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
841 | kvmppc_book3s_queue_irqprio(vcpu, | |
842 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
843 | } | |
844 | r = RESUME_GUEST; | |
845 | break; | |
846 | /* We're good on these - the host merely wanted to get our attention */ | |
847 | case BOOK3S_INTERRUPT_DECREMENTER: | |
4f225ae0 | 848 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
f05ed4d5 PM |
849 | vcpu->stat.dec_exits++; |
850 | r = RESUME_GUEST; | |
851 | break; | |
852 | case BOOK3S_INTERRUPT_EXTERNAL: | |
4f225ae0 AG |
853 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: |
854 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | |
f05ed4d5 PM |
855 | vcpu->stat.ext_intr_exits++; |
856 | r = RESUME_GUEST; | |
857 | break; | |
858 | case BOOK3S_INTERRUPT_PERFMON: | |
859 | r = RESUME_GUEST; | |
860 | break; | |
861 | case BOOK3S_INTERRUPT_PROGRAM: | |
4f225ae0 | 862 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
f05ed4d5 PM |
863 | { |
864 | enum emulation_result er; | |
865 | ulong flags; | |
866 | ||
867 | program_interrupt: | |
a2d56020 | 868 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
f05ed4d5 PM |
869 | |
870 | if (vcpu->arch.shared->msr & MSR_PR) { | |
871 | #ifdef EXIT_DEBUG | |
872 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
873 | #endif | |
874 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != | |
875 | (INS_DCBZ & 0xfffffff7)) { | |
876 | kvmppc_core_queue_program(vcpu, flags); | |
877 | r = RESUME_GUEST; | |
878 | break; | |
879 | } | |
880 | } | |
881 | ||
882 | vcpu->stat.emulated_inst_exits++; | |
883 | er = kvmppc_emulate_instruction(run, vcpu); | |
884 | switch (er) { | |
885 | case EMULATE_DONE: | |
886 | r = RESUME_GUEST_NV; | |
887 | break; | |
888 | case EMULATE_AGAIN: | |
889 | r = RESUME_GUEST; | |
890 | break; | |
891 | case EMULATE_FAIL: | |
892 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
893 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
894 | kvmppc_core_queue_program(vcpu, flags); | |
895 | r = RESUME_GUEST; | |
896 | break; | |
897 | case EMULATE_DO_MMIO: | |
898 | run->exit_reason = KVM_EXIT_MMIO; | |
899 | r = RESUME_HOST_NV; | |
900 | break; | |
c402a3f4 | 901 | case EMULATE_EXIT_USER: |
50c7bb80 AG |
902 | r = RESUME_HOST_NV; |
903 | break; | |
f05ed4d5 PM |
904 | default: |
905 | BUG(); | |
906 | } | |
907 | break; | |
908 | } | |
909 | case BOOK3S_INTERRUPT_SYSCALL: | |
a668f2bd | 910 | if (vcpu->arch.papr_enabled && |
8b23de29 | 911 | (kvmppc_get_last_sc(vcpu) == 0x44000022) && |
a668f2bd AG |
912 | !(vcpu->arch.shared->msr & MSR_PR)) { |
913 | /* SC 1 papr hypercalls */ | |
914 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
915 | int i; | |
916 | ||
96f38d72 | 917 | #ifdef CONFIG_KVM_BOOK3S_64_PR |
a668f2bd AG |
918 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
919 | r = RESUME_GUEST; | |
920 | break; | |
921 | } | |
96f38d72 | 922 | #endif |
a668f2bd AG |
923 | |
924 | run->papr_hcall.nr = cmd; | |
925 | for (i = 0; i < 9; ++i) { | |
926 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
927 | run->papr_hcall.args[i] = gpr; | |
928 | } | |
929 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
930 | vcpu->arch.hcall_needed = 1; | |
931 | r = RESUME_HOST; | |
932 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
933 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
934 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
935 | /* MOL hypercalls */ | |
936 | u64 *gprs = run->osi.gprs; | |
937 | int i; | |
938 | ||
939 | run->exit_reason = KVM_EXIT_OSI; | |
940 | for (i = 0; i < 32; i++) | |
941 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
942 | vcpu->arch.osi_needed = 1; | |
943 | r = RESUME_HOST_NV; | |
944 | } else if (!(vcpu->arch.shared->msr & MSR_PR) && | |
945 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | |
946 | /* KVM PV hypercalls */ | |
947 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
948 | r = RESUME_GUEST; | |
949 | } else { | |
950 | /* Guest syscalls */ | |
951 | vcpu->stat.syscall_exits++; | |
952 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
953 | r = RESUME_GUEST; | |
954 | } | |
955 | break; | |
956 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
957 | case BOOK3S_INTERRUPT_ALTIVEC: | |
958 | case BOOK3S_INTERRUPT_VSX: | |
959 | { | |
960 | int ext_msr = 0; | |
961 | ||
962 | switch (exit_nr) { | |
963 | case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; | |
964 | case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; | |
965 | case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; | |
966 | } | |
967 | ||
968 | switch (kvmppc_check_ext(vcpu, exit_nr)) { | |
969 | case EMULATE_DONE: | |
970 | /* everything ok - let's enable the ext */ | |
971 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
972 | break; | |
973 | case EMULATE_FAIL: | |
974 | /* we need to emulate this instruction */ | |
975 | goto program_interrupt; | |
976 | break; | |
977 | default: | |
978 | /* nothing to worry about - go again */ | |
979 | break; | |
980 | } | |
981 | break; | |
982 | } | |
983 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
984 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | |
985 | vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, | |
986 | kvmppc_get_last_inst(vcpu)); | |
987 | vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, | |
988 | kvmppc_get_last_inst(vcpu)); | |
989 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
990 | } | |
991 | r = RESUME_GUEST; | |
992 | break; | |
993 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
994 | case BOOK3S_INTERRUPT_TRACE: | |
995 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
996 | r = RESUME_GUEST; | |
997 | break; | |
998 | default: | |
468a12c2 | 999 | { |
a2d56020 | 1000 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
1001 | /* Ugh - bork here! What did we get? */ |
1002 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
468a12c2 | 1003 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
f05ed4d5 PM |
1004 | r = RESUME_HOST; |
1005 | BUG(); | |
1006 | break; | |
1007 | } | |
468a12c2 | 1008 | } |
f05ed4d5 PM |
1009 | |
1010 | if (!(r & RESUME_HOST)) { | |
1011 | /* To avoid clobbering exit_reason, only check for signals if | |
1012 | * we aren't already exiting to userspace for some other | |
1013 | * reason. */ | |
e371f713 AG |
1014 | |
1015 | /* | |
1016 | * Interrupts could be timers for the guest which we have to | |
1017 | * inject again, so let's postpone them until we're in the guest | |
1018 | * and if we really did time things so badly, then we just exit | |
1019 | * again due to a host external interrupt. | |
1020 | */ | |
bd2be683 | 1021 | local_irq_disable(); |
7ee78855 AG |
1022 | s = kvmppc_prepare_to_enter(vcpu); |
1023 | if (s <= 0) { | |
bd2be683 | 1024 | local_irq_enable(); |
7ee78855 | 1025 | r = s; |
24afa37b | 1026 | } else { |
5f1c248f | 1027 | kvmppc_fix_ee_before_entry(); |
f05ed4d5 | 1028 | } |
9d1ffdd8 | 1029 | kvmppc_handle_lost_ext(vcpu); |
f05ed4d5 PM |
1030 | } |
1031 | ||
1032 | trace_kvm_book3s_reenter(r, vcpu); | |
1033 | ||
1034 | return r; | |
1035 | } | |
1036 | ||
1037 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
1038 | struct kvm_sregs *sregs) | |
1039 | { | |
1040 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1041 | int i; | |
1042 | ||
1043 | sregs->pvr = vcpu->arch.pvr; | |
1044 | ||
1045 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
1046 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1047 | for (i = 0; i < 64; i++) { | |
1048 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
1049 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
1050 | } | |
1051 | } else { | |
1052 | for (i = 0; i < 16; i++) | |
1053 | sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; | |
1054 | ||
1055 | for (i = 0; i < 8; i++) { | |
1056 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
1057 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
1058 | } | |
1059 | } | |
1060 | ||
1061 | return 0; | |
1062 | } | |
1063 | ||
1064 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
1065 | struct kvm_sregs *sregs) | |
1066 | { | |
1067 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1068 | int i; | |
1069 | ||
1070 | kvmppc_set_pvr(vcpu, sregs->pvr); | |
1071 | ||
1072 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
1073 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1074 | for (i = 0; i < 64; i++) { | |
1075 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
1076 | sregs->u.s.ppc64.slb[i].slbe); | |
1077 | } | |
1078 | } else { | |
1079 | for (i = 0; i < 16; i++) { | |
1080 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
1081 | } | |
1082 | for (i = 0; i < 8; i++) { | |
1083 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
1084 | (u32)sregs->u.s.ppc32.ibat[i]); | |
1085 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
1086 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
1087 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
1088 | (u32)sregs->u.s.ppc32.dbat[i]); | |
1089 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
1090 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
1091 | } | |
1092 | } | |
1093 | ||
1094 | /* Flush the MMU after messing with the segments */ | |
1095 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
1096 | ||
1097 | return 0; | |
1098 | } | |
1099 | ||
a136a8bd | 1100 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) |
31f3438e | 1101 | { |
a136a8bd | 1102 | int r = 0; |
31f3438e | 1103 | |
a136a8bd | 1104 | switch (id) { |
31f3438e | 1105 | case KVM_REG_PPC_HIOR: |
a136a8bd | 1106 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
31f3438e | 1107 | break; |
a8bd19ef PM |
1108 | #ifdef CONFIG_VSX |
1109 | case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { | |
1110 | long int i = id - KVM_REG_PPC_VSR0; | |
1111 | ||
1112 | if (!cpu_has_feature(CPU_FTR_VSX)) { | |
1113 | r = -ENXIO; | |
1114 | break; | |
1115 | } | |
1116 | val->vsxval[0] = vcpu->arch.fpr[i]; | |
1117 | val->vsxval[1] = vcpu->arch.vsr[i]; | |
1118 | break; | |
1119 | } | |
1120 | #endif /* CONFIG_VSX */ | |
31f3438e | 1121 | default: |
a136a8bd | 1122 | r = -EINVAL; |
31f3438e PM |
1123 | break; |
1124 | } | |
1125 | ||
1126 | return r; | |
1127 | } | |
1128 | ||
a136a8bd | 1129 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val) |
31f3438e | 1130 | { |
a136a8bd | 1131 | int r = 0; |
31f3438e | 1132 | |
a136a8bd | 1133 | switch (id) { |
31f3438e | 1134 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
1135 | to_book3s(vcpu)->hior = set_reg_val(id, *val); |
1136 | to_book3s(vcpu)->hior_explicit = true; | |
31f3438e | 1137 | break; |
a8bd19ef PM |
1138 | #ifdef CONFIG_VSX |
1139 | case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { | |
1140 | long int i = id - KVM_REG_PPC_VSR0; | |
1141 | ||
1142 | if (!cpu_has_feature(CPU_FTR_VSX)) { | |
1143 | r = -ENXIO; | |
1144 | break; | |
1145 | } | |
1146 | vcpu->arch.fpr[i] = val->vsxval[0]; | |
1147 | vcpu->arch.vsr[i] = val->vsxval[1]; | |
1148 | break; | |
1149 | } | |
1150 | #endif /* CONFIG_VSX */ | |
31f3438e | 1151 | default: |
a136a8bd | 1152 | r = -EINVAL; |
31f3438e PM |
1153 | break; |
1154 | } | |
1155 | ||
1156 | return r; | |
1157 | } | |
1158 | ||
f05ed4d5 PM |
1159 | int kvmppc_core_check_processor_compat(void) |
1160 | { | |
1161 | return 0; | |
1162 | } | |
1163 | ||
1164 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
1165 | { | |
1166 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
1167 | struct kvm_vcpu *vcpu; | |
1168 | int err = -ENOMEM; | |
1169 | unsigned long p; | |
1170 | ||
3ff95502 PM |
1171 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
1172 | if (!vcpu) | |
1173 | goto out; | |
1174 | ||
f05ed4d5 PM |
1175 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1176 | if (!vcpu_book3s) | |
3ff95502 PM |
1177 | goto free_vcpu; |
1178 | vcpu->arch.book3s = vcpu_book3s; | |
f05ed4d5 | 1179 | |
a2d56020 | 1180 | #ifdef CONFIG_KVM_BOOK3S_32 |
3ff95502 PM |
1181 | vcpu->arch.shadow_vcpu = |
1182 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); | |
1183 | if (!vcpu->arch.shadow_vcpu) | |
1184 | goto free_vcpu3s; | |
a2d56020 | 1185 | #endif |
3ff95502 | 1186 | |
f05ed4d5 PM |
1187 | err = kvm_vcpu_init(vcpu, kvm, id); |
1188 | if (err) | |
1189 | goto free_shadow_vcpu; | |
1190 | ||
7c7b406e | 1191 | err = -ENOMEM; |
f05ed4d5 | 1192 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
f05ed4d5 PM |
1193 | if (!p) |
1194 | goto uninit_vcpu; | |
7c7b406e TLSC |
1195 | /* the real shared page fills the last 4k of our page */ |
1196 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); | |
f05ed4d5 | 1197 | |
f05ed4d5 | 1198 | #ifdef CONFIG_PPC_BOOK3S_64 |
a4a0f252 PM |
1199 | /* |
1200 | * Default to the same as the host if we're on sufficiently | |
1201 | * recent machine that we have 1TB segments; | |
1202 | * otherwise default to PPC970FX. | |
1203 | */ | |
f05ed4d5 | 1204 | vcpu->arch.pvr = 0x3C0301; |
a4a0f252 PM |
1205 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1206 | vcpu->arch.pvr = mfspr(SPRN_PVR); | |
f05ed4d5 PM |
1207 | #else |
1208 | /* default to book3s_32 (750) */ | |
1209 | vcpu->arch.pvr = 0x84202; | |
1210 | #endif | |
1211 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
1212 | vcpu->arch.slb_nr = 64; | |
1213 | ||
f05ed4d5 PM |
1214 | vcpu->arch.shadow_msr = MSR_USER64; |
1215 | ||
1216 | err = kvmppc_mmu_init(vcpu); | |
1217 | if (err < 0) | |
1218 | goto uninit_vcpu; | |
1219 | ||
1220 | return vcpu; | |
1221 | ||
1222 | uninit_vcpu: | |
1223 | kvm_vcpu_uninit(vcpu); | |
1224 | free_shadow_vcpu: | |
a2d56020 | 1225 | #ifdef CONFIG_KVM_BOOK3S_32 |
3ff95502 PM |
1226 | kfree(vcpu->arch.shadow_vcpu); |
1227 | free_vcpu3s: | |
a2d56020 | 1228 | #endif |
f05ed4d5 | 1229 | vfree(vcpu_book3s); |
3ff95502 PM |
1230 | free_vcpu: |
1231 | kmem_cache_free(kvm_vcpu_cache, vcpu); | |
f05ed4d5 PM |
1232 | out: |
1233 | return ERR_PTR(err); | |
1234 | } | |
1235 | ||
1236 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
1237 | { | |
1238 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
1239 | ||
1240 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | |
1241 | kvm_vcpu_uninit(vcpu); | |
3ff95502 PM |
1242 | #ifdef CONFIG_KVM_BOOK3S_32 |
1243 | kfree(vcpu->arch.shadow_vcpu); | |
1244 | #endif | |
f05ed4d5 | 1245 | vfree(vcpu_book3s); |
3ff95502 | 1246 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
f05ed4d5 PM |
1247 | } |
1248 | ||
df6909e5 | 1249 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1250 | { |
1251 | int ret; | |
1252 | double fpr[32][TS_FPRWIDTH]; | |
1253 | unsigned int fpscr; | |
1254 | int fpexc_mode; | |
1255 | #ifdef CONFIG_ALTIVEC | |
1256 | vector128 vr[32]; | |
1257 | vector128 vscr; | |
1258 | unsigned long uninitialized_var(vrsave); | |
1259 | int used_vr; | |
1260 | #endif | |
1261 | #ifdef CONFIG_VSX | |
1262 | int used_vsr; | |
1263 | #endif | |
1264 | ulong ext_msr; | |
1265 | ||
af8f38b3 AG |
1266 | /* Check if we can run the vcpu at all */ |
1267 | if (!vcpu->arch.sane) { | |
1268 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
7d82714d AG |
1269 | ret = -EINVAL; |
1270 | goto out; | |
af8f38b3 AG |
1271 | } |
1272 | ||
e371f713 AG |
1273 | /* |
1274 | * Interrupts could be timers for the guest which we have to inject | |
1275 | * again, so let's postpone them until we're in the guest and if we | |
1276 | * really did time things so badly, then we just exit again due to | |
1277 | * a host external interrupt. | |
1278 | */ | |
bd2be683 | 1279 | local_irq_disable(); |
7ee78855 AG |
1280 | ret = kvmppc_prepare_to_enter(vcpu); |
1281 | if (ret <= 0) { | |
bd2be683 | 1282 | local_irq_enable(); |
7d82714d | 1283 | goto out; |
f05ed4d5 PM |
1284 | } |
1285 | ||
1286 | /* Save FPU state in stack */ | |
1287 | if (current->thread.regs->msr & MSR_FP) | |
1288 | giveup_fpu(current); | |
1289 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | |
1290 | fpscr = current->thread.fpscr.val; | |
1291 | fpexc_mode = current->thread.fpexc_mode; | |
1292 | ||
1293 | #ifdef CONFIG_ALTIVEC | |
1294 | /* Save Altivec state in stack */ | |
1295 | used_vr = current->thread.used_vr; | |
1296 | if (used_vr) { | |
1297 | if (current->thread.regs->msr & MSR_VEC) | |
1298 | giveup_altivec(current); | |
1299 | memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); | |
1300 | vscr = current->thread.vscr; | |
1301 | vrsave = current->thread.vrsave; | |
1302 | } | |
1303 | #endif | |
1304 | ||
1305 | #ifdef CONFIG_VSX | |
1306 | /* Save VSX state in stack */ | |
1307 | used_vsr = current->thread.used_vsr; | |
1308 | if (used_vsr && (current->thread.regs->msr & MSR_VSX)) | |
28c483b6 | 1309 | __giveup_vsx(current); |
f05ed4d5 PM |
1310 | #endif |
1311 | ||
1312 | /* Remember the MSR with disabled extensions */ | |
1313 | ext_msr = current->thread.regs->msr; | |
1314 | ||
f05ed4d5 PM |
1315 | /* Preload FPU if it's enabled */ |
1316 | if (vcpu->arch.shared->msr & MSR_FP) | |
1317 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
1318 | ||
5f1c248f | 1319 | kvmppc_fix_ee_before_entry(); |
df6909e5 PM |
1320 | |
1321 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | |
1322 | ||
24afa37b AG |
1323 | /* No need for kvm_guest_exit. It's done in handle_exit. |
1324 | We also get here with interrupts enabled. */ | |
f05ed4d5 | 1325 | |
f05ed4d5 | 1326 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
28c483b6 PM |
1327 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
1328 | ||
1329 | current->thread.regs->msr = ext_msr; | |
f05ed4d5 | 1330 | |
28c483b6 | 1331 | /* Restore FPU/VSX state from stack */ |
f05ed4d5 PM |
1332 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); |
1333 | current->thread.fpscr.val = fpscr; | |
1334 | current->thread.fpexc_mode = fpexc_mode; | |
1335 | ||
1336 | #ifdef CONFIG_ALTIVEC | |
1337 | /* Restore Altivec state from stack */ | |
1338 | if (used_vr && current->thread.used_vr) { | |
1339 | memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); | |
1340 | current->thread.vscr = vscr; | |
1341 | current->thread.vrsave = vrsave; | |
1342 | } | |
1343 | current->thread.used_vr = used_vr; | |
1344 | #endif | |
1345 | ||
1346 | #ifdef CONFIG_VSX | |
1347 | current->thread.used_vsr = used_vsr; | |
1348 | #endif | |
1349 | ||
7d82714d | 1350 | out: |
0652eaae | 1351 | vcpu->mode = OUTSIDE_GUEST_MODE; |
f05ed4d5 PM |
1352 | return ret; |
1353 | } | |
1354 | ||
82ed3616 PM |
1355 | /* |
1356 | * Get (and clear) the dirty memory log for a memory slot. | |
1357 | */ | |
1358 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
1359 | struct kvm_dirty_log *log) | |
1360 | { | |
1361 | struct kvm_memory_slot *memslot; | |
1362 | struct kvm_vcpu *vcpu; | |
1363 | ulong ga, ga_end; | |
1364 | int is_dirty = 0; | |
1365 | int r; | |
1366 | unsigned long n; | |
1367 | ||
1368 | mutex_lock(&kvm->slots_lock); | |
1369 | ||
1370 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1371 | if (r) | |
1372 | goto out; | |
1373 | ||
1374 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1375 | if (is_dirty) { | |
1376 | memslot = id_to_memslot(kvm->memslots, log->slot); | |
1377 | ||
1378 | ga = memslot->base_gfn << PAGE_SHIFT; | |
1379 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1380 | ||
1381 | kvm_for_each_vcpu(n, vcpu, kvm) | |
1382 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
1383 | ||
1384 | n = kvm_dirty_bitmap_bytes(memslot); | |
1385 | memset(memslot->dirty_bitmap, 0, n); | |
1386 | } | |
1387 | ||
1388 | r = 0; | |
1389 | out: | |
1390 | mutex_unlock(&kvm->slots_lock); | |
1391 | return r; | |
1392 | } | |
1393 | ||
5b74716e BH |
1394 | #ifdef CONFIG_PPC64 |
1395 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | |
1396 | { | |
a4a0f252 PM |
1397 | long int i; |
1398 | struct kvm_vcpu *vcpu; | |
1399 | ||
1400 | info->flags = 0; | |
5b74716e BH |
1401 | |
1402 | /* SLB is always 64 entries */ | |
1403 | info->slb_size = 64; | |
1404 | ||
1405 | /* Standard 4k base page size segment */ | |
1406 | info->sps[0].page_shift = 12; | |
1407 | info->sps[0].slb_enc = 0; | |
1408 | info->sps[0].enc[0].page_shift = 12; | |
1409 | info->sps[0].enc[0].pte_enc = 0; | |
1410 | ||
a4a0f252 PM |
1411 | /* |
1412 | * 64k large page size. | |
1413 | * We only want to put this in if the CPUs we're emulating | |
1414 | * support it, but unfortunately we don't have a vcpu easily | |
1415 | * to hand here to test. Just pick the first vcpu, and if | |
1416 | * that doesn't exist yet, report the minimum capability, | |
1417 | * i.e., no 64k pages. | |
1418 | * 1T segment support goes along with 64k pages. | |
1419 | */ | |
1420 | i = 1; | |
1421 | vcpu = kvm_get_vcpu(kvm, 0); | |
1422 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | |
1423 | info->flags = KVM_PPC_1T_SEGMENTS; | |
1424 | info->sps[i].page_shift = 16; | |
1425 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; | |
1426 | info->sps[i].enc[0].page_shift = 16; | |
1427 | info->sps[i].enc[0].pte_enc = 1; | |
1428 | ++i; | |
1429 | } | |
1430 | ||
5b74716e | 1431 | /* Standard 16M large page size segment */ |
a4a0f252 PM |
1432 | info->sps[i].page_shift = 24; |
1433 | info->sps[i].slb_enc = SLB_VSID_L; | |
1434 | info->sps[i].enc[0].page_shift = 24; | |
1435 | info->sps[i].enc[0].pte_enc = 0; | |
5b74716e BH |
1436 | |
1437 | return 0; | |
1438 | } | |
1439 | #endif /* CONFIG_PPC64 */ | |
1440 | ||
a66b48c3 PM |
1441 | void kvmppc_core_free_memslot(struct kvm_memory_slot *free, |
1442 | struct kvm_memory_slot *dont) | |
1443 | { | |
1444 | } | |
1445 | ||
1446 | int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | |
1447 | unsigned long npages) | |
1448 | { | |
1449 | return 0; | |
1450 | } | |
1451 | ||
f9e0554d | 1452 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
a66b48c3 | 1453 | struct kvm_memory_slot *memslot, |
f9e0554d PM |
1454 | struct kvm_userspace_memory_region *mem) |
1455 | { | |
1456 | return 0; | |
1457 | } | |
1458 | ||
1459 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
dfe49dbd | 1460 | struct kvm_userspace_memory_region *mem, |
8482644a | 1461 | const struct kvm_memory_slot *old) |
dfe49dbd PM |
1462 | { |
1463 | } | |
1464 | ||
1465 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | |
f9e0554d PM |
1466 | { |
1467 | } | |
1468 | ||
a413f474 IM |
1469 | static unsigned int kvm_global_user_count = 0; |
1470 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); | |
1471 | ||
f9e0554d PM |
1472 | int kvmppc_core_init_vm(struct kvm *kvm) |
1473 | { | |
f31e65e1 BH |
1474 | #ifdef CONFIG_PPC64 |
1475 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | |
8e591cb7 | 1476 | INIT_LIST_HEAD(&kvm->arch.rtas_tokens); |
f31e65e1 | 1477 | #endif |
9308ab8e | 1478 | mutex_init(&kvm->arch.hpt_mutex); |
f31e65e1 | 1479 | |
a413f474 IM |
1480 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
1481 | spin_lock(&kvm_global_user_count_lock); | |
1482 | if (++kvm_global_user_count == 1) | |
1483 | pSeries_disable_reloc_on_exc(); | |
1484 | spin_unlock(&kvm_global_user_count_lock); | |
1485 | } | |
f9e0554d PM |
1486 | return 0; |
1487 | } | |
1488 | ||
1489 | void kvmppc_core_destroy_vm(struct kvm *kvm) | |
1490 | { | |
f31e65e1 BH |
1491 | #ifdef CONFIG_PPC64 |
1492 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | |
1493 | #endif | |
a413f474 IM |
1494 | |
1495 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | |
1496 | spin_lock(&kvm_global_user_count_lock); | |
1497 | BUG_ON(kvm_global_user_count == 0); | |
1498 | if (--kvm_global_user_count == 0) | |
1499 | pSeries_enable_reloc_on_exc(); | |
1500 | spin_unlock(&kvm_global_user_count_lock); | |
1501 | } | |
f9e0554d PM |
1502 | } |
1503 | ||
f05ed4d5 PM |
1504 | static int kvmppc_book3s_init(void) |
1505 | { | |
1506 | int r; | |
1507 | ||
3ff95502 | 1508 | r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
f05ed4d5 PM |
1509 | |
1510 | if (r) | |
1511 | return r; | |
1512 | ||
1513 | r = kvmppc_mmu_hpte_sysinit(); | |
1514 | ||
1515 | return r; | |
1516 | } | |
1517 | ||
1518 | static void kvmppc_book3s_exit(void) | |
1519 | { | |
1520 | kvmppc_mmu_hpte_sysexit(); | |
1521 | kvm_exit(); | |
1522 | } | |
1523 | ||
1524 | module_init(kvmppc_book3s_init); | |
1525 | module_exit(kvmppc_book3s_exit); |