kvm: powerpc: booke: Move booke related tracepoints to separate header
[deliverable/linux.git] / arch / powerpc / kvm / book3s_pr.c
CommitLineData
f05ed4d5
PM
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
93087948 23#include <linux/export.h>
f05ed4d5
PM
24#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
95327d08 36#include <asm/switch_to.h>
a413f474 37#include <asm/firmware.h>
deb26c27 38#include <asm/hvcall.h>
f05ed4d5
PM
39#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
43
3a167bea 44#include "book3s.h"
72c12535
AK
45
46#define CREATE_TRACE_POINTS
47#include "trace_pr.h"
f05ed4d5
PM
48
49/* #define EXIT_DEBUG */
50/* #define DEBUG_EXT */
51
52static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
53 ulong msr);
54
55/* Some compatibility defines */
56#ifdef CONFIG_PPC_BOOK3S_32
57#define MSR_USER32 MSR_USER
58#define MSR_USER64 MSR_USER
59#define HW_PAGE_SIZE PAGE_SIZE
60#endif
61
3a167bea 62static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
f05ed4d5
PM
63{
64#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
65 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
66 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
468a12c2
AG
67 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
68 svcpu_put(svcpu);
f05ed4d5 69#endif
a47d72f3 70 vcpu->cpu = smp_processor_id();
f05ed4d5 71#ifdef CONFIG_PPC_BOOK3S_32
3ff95502 72 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
f05ed4d5
PM
73#endif
74}
75
3a167bea 76static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
77{
78#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
79 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
80 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
468a12c2
AG
81 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
82 svcpu_put(svcpu);
f05ed4d5
PM
83#endif
84
28c483b6 85 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
a47d72f3 86 vcpu->cpu = -1;
f05ed4d5
PM
87}
88
a2d56020
PM
89/* Copy data needed by real-mode code from vcpu to shadow vcpu */
90void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
91 struct kvm_vcpu *vcpu)
92{
93 svcpu->gpr[0] = vcpu->arch.gpr[0];
94 svcpu->gpr[1] = vcpu->arch.gpr[1];
95 svcpu->gpr[2] = vcpu->arch.gpr[2];
96 svcpu->gpr[3] = vcpu->arch.gpr[3];
97 svcpu->gpr[4] = vcpu->arch.gpr[4];
98 svcpu->gpr[5] = vcpu->arch.gpr[5];
99 svcpu->gpr[6] = vcpu->arch.gpr[6];
100 svcpu->gpr[7] = vcpu->arch.gpr[7];
101 svcpu->gpr[8] = vcpu->arch.gpr[8];
102 svcpu->gpr[9] = vcpu->arch.gpr[9];
103 svcpu->gpr[10] = vcpu->arch.gpr[10];
104 svcpu->gpr[11] = vcpu->arch.gpr[11];
105 svcpu->gpr[12] = vcpu->arch.gpr[12];
106 svcpu->gpr[13] = vcpu->arch.gpr[13];
107 svcpu->cr = vcpu->arch.cr;
108 svcpu->xer = vcpu->arch.xer;
109 svcpu->ctr = vcpu->arch.ctr;
110 svcpu->lr = vcpu->arch.lr;
111 svcpu->pc = vcpu->arch.pc;
112}
113
114/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
115void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
116 struct kvmppc_book3s_shadow_vcpu *svcpu)
117{
118 vcpu->arch.gpr[0] = svcpu->gpr[0];
119 vcpu->arch.gpr[1] = svcpu->gpr[1];
120 vcpu->arch.gpr[2] = svcpu->gpr[2];
121 vcpu->arch.gpr[3] = svcpu->gpr[3];
122 vcpu->arch.gpr[4] = svcpu->gpr[4];
123 vcpu->arch.gpr[5] = svcpu->gpr[5];
124 vcpu->arch.gpr[6] = svcpu->gpr[6];
125 vcpu->arch.gpr[7] = svcpu->gpr[7];
126 vcpu->arch.gpr[8] = svcpu->gpr[8];
127 vcpu->arch.gpr[9] = svcpu->gpr[9];
128 vcpu->arch.gpr[10] = svcpu->gpr[10];
129 vcpu->arch.gpr[11] = svcpu->gpr[11];
130 vcpu->arch.gpr[12] = svcpu->gpr[12];
131 vcpu->arch.gpr[13] = svcpu->gpr[13];
132 vcpu->arch.cr = svcpu->cr;
133 vcpu->arch.xer = svcpu->xer;
134 vcpu->arch.ctr = svcpu->ctr;
135 vcpu->arch.lr = svcpu->lr;
136 vcpu->arch.pc = svcpu->pc;
137 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
138 vcpu->arch.fault_dar = svcpu->fault_dar;
139 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
140 vcpu->arch.last_inst = svcpu->last_inst;
141}
142
3a167bea 143static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
03d25c5b 144{
7c973a2e
AG
145 int r = 1; /* Indicate we want to get back into the guest */
146
9b0cb3c8
AG
147 /* We misuse TLB_FLUSH to indicate that we want to clear
148 all shadow cache entries */
149 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
150 kvmppc_mmu_pte_flush(vcpu, 0, 0);
7c973a2e
AG
151
152 return r;
03d25c5b
AG
153}
154
9b0cb3c8 155/************* MMU Notifiers *************/
491d6ecc
PM
156static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
157 unsigned long end)
158{
159 long i;
160 struct kvm_vcpu *vcpu;
161 struct kvm_memslots *slots;
162 struct kvm_memory_slot *memslot;
163
164 slots = kvm_memslots(kvm);
165 kvm_for_each_memslot(memslot, slots) {
166 unsigned long hva_start, hva_end;
167 gfn_t gfn, gfn_end;
168
169 hva_start = max(start, memslot->userspace_addr);
170 hva_end = min(end, memslot->userspace_addr +
171 (memslot->npages << PAGE_SHIFT));
172 if (hva_start >= hva_end)
173 continue;
174 /*
175 * {gfn(page) | page intersects with [hva_start, hva_end)} =
176 * {gfn, gfn+1, ..., gfn_end-1}.
177 */
178 gfn = hva_to_gfn_memslot(hva_start, memslot);
179 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
180 kvm_for_each_vcpu(i, vcpu, kvm)
181 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
182 gfn_end << PAGE_SHIFT);
183 }
184}
9b0cb3c8 185
3a167bea 186static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
187{
188 trace_kvm_unmap_hva(hva);
189
491d6ecc 190 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
191
192 return 0;
193}
194
3a167bea
AK
195static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
196 unsigned long end)
9b0cb3c8 197{
491d6ecc 198 do_kvm_unmap_hva(kvm, start, end);
9b0cb3c8
AG
199
200 return 0;
201}
202
3a167bea 203static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
204{
205 /* XXX could be more clever ;) */
206 return 0;
207}
208
3a167bea 209static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
210{
211 /* XXX could be more clever ;) */
212 return 0;
213}
214
3a167bea 215static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
9b0cb3c8
AG
216{
217 /* The page will get remapped properly on its next fault */
491d6ecc 218 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
219}
220
221/*****************************************/
222
f05ed4d5
PM
223static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
224{
225 ulong smsr = vcpu->arch.shared->msr;
226
227 /* Guest MSR values */
3a2e7b0d 228 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
f05ed4d5
PM
229 /* Process MSR values */
230 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
231 /* External providers the guest reserved */
232 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
233 /* 64-bit Process MSR values */
234#ifdef CONFIG_PPC_BOOK3S_64
235 smsr |= MSR_ISF | MSR_HV;
236#endif
237 vcpu->arch.shadow_msr = smsr;
238}
239
3a167bea 240static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
f05ed4d5
PM
241{
242 ulong old_msr = vcpu->arch.shared->msr;
243
244#ifdef EXIT_DEBUG
245 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
246#endif
247
248 msr &= to_book3s(vcpu)->msr_mask;
249 vcpu->arch.shared->msr = msr;
250 kvmppc_recalc_shadow_msr(vcpu);
251
252 if (msr & MSR_POW) {
253 if (!vcpu->arch.pending_exceptions) {
254 kvm_vcpu_block(vcpu);
966cd0f3 255 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
f05ed4d5
PM
256 vcpu->stat.halt_wakeup++;
257
258 /* Unset POW bit after we woke up */
259 msr &= ~MSR_POW;
260 vcpu->arch.shared->msr = msr;
261 }
262 }
263
264 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
265 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
266 kvmppc_mmu_flush_segments(vcpu);
267 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
268
269 /* Preload magic page segment when in kernel mode */
270 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
271 struct kvm_vcpu_arch *a = &vcpu->arch;
272
273 if (msr & MSR_DR)
274 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
275 else
276 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
277 }
278 }
279
bbcc9c06
BH
280 /*
281 * When switching from 32 to 64-bit, we may have a stale 32-bit
282 * magic page around, we need to flush it. Typically 32-bit magic
283 * page will be instanciated when calling into RTAS. Note: We
284 * assume that such transition only happens while in kernel mode,
285 * ie, we never transition from user 32-bit to kernel 64-bit with
286 * a 32-bit magic page around.
287 */
288 if (vcpu->arch.magic_page_pa &&
289 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
290 /* going from RTAS to normal kernel code */
291 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
292 ~0xFFFUL);
293 }
294
f05ed4d5
PM
295 /* Preload FPU if it's enabled */
296 if (vcpu->arch.shared->msr & MSR_FP)
297 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
298}
299
3a167bea 300void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
f05ed4d5
PM
301{
302 u32 host_pvr;
303
304 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
305 vcpu->arch.pvr = pvr;
306#ifdef CONFIG_PPC_BOOK3S_64
307 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
308 kvmppc_mmu_book3s_64_init(vcpu);
1022fc3d
AG
309 if (!to_book3s(vcpu)->hior_explicit)
310 to_book3s(vcpu)->hior = 0xfff00000;
f05ed4d5 311 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
af8f38b3 312 vcpu->arch.cpu_type = KVM_CPU_3S_64;
f05ed4d5
PM
313 } else
314#endif
315 {
316 kvmppc_mmu_book3s_32_init(vcpu);
1022fc3d
AG
317 if (!to_book3s(vcpu)->hior_explicit)
318 to_book3s(vcpu)->hior = 0;
f05ed4d5 319 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
af8f38b3 320 vcpu->arch.cpu_type = KVM_CPU_3S_32;
f05ed4d5
PM
321 }
322
af8f38b3
AG
323 kvmppc_sanity_check(vcpu);
324
f05ed4d5
PM
325 /* If we are in hypervisor level on 970, we can tell the CPU to
326 * treat DCBZ as 32 bytes store */
327 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
328 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
329 !strcmp(cur_cpu_spec->platform, "ppc970"))
330 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
331
332 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
333 really needs them in a VM on Cell and force disable them. */
334 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
335 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
336
a4a0f252
PM
337 /*
338 * If they're asking for POWER6 or later, set the flag
339 * indicating that we can do multiple large page sizes
340 * and 1TB segments.
341 * Also set the flag that indicates that tlbie has the large
342 * page bit in the RB operand instead of the instruction.
343 */
344 switch (PVR_VER(pvr)) {
345 case PVR_POWER6:
346 case PVR_POWER7:
347 case PVR_POWER7p:
348 case PVR_POWER8:
349 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
350 BOOK3S_HFLAG_NEW_TLBIE;
351 break;
352 }
353
f05ed4d5
PM
354#ifdef CONFIG_PPC_BOOK3S_32
355 /* 32 bit Book3S always has 32 byte dcbz */
356 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
357#endif
358
359 /* On some CPUs we can execute paired single operations natively */
360 asm ( "mfpvr %0" : "=r"(host_pvr));
361 switch (host_pvr) {
362 case 0x00080200: /* lonestar 2.0 */
363 case 0x00088202: /* lonestar 2.2 */
364 case 0x70000100: /* gekko 1.0 */
365 case 0x00080100: /* gekko 2.0 */
366 case 0x00083203: /* gekko 2.3a */
367 case 0x00083213: /* gekko 2.3b */
368 case 0x00083204: /* gekko 2.4 */
369 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
370 case 0x00087200: /* broadway */
371 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
372 /* Enable HID2.PSE - in case we need it later */
373 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
374 }
375}
376
377/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
378 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
379 * emulate 32 bytes dcbz length.
380 *
381 * The Book3s_64 inventors also realized this case and implemented a special bit
382 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
383 *
384 * My approach here is to patch the dcbz instruction on executing pages.
385 */
386static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
387{
388 struct page *hpage;
389 u64 hpage_offset;
390 u32 *page;
391 int i;
392
393 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
32cad84f 394 if (is_error_page(hpage))
f05ed4d5 395 return;
f05ed4d5
PM
396
397 hpage_offset = pte->raddr & ~PAGE_MASK;
398 hpage_offset &= ~0xFFFULL;
399 hpage_offset /= 4;
400
401 get_page(hpage);
2480b208 402 page = kmap_atomic(hpage);
f05ed4d5
PM
403
404 /* patch dcbz into reserved instruction, so we trap */
405 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
406 if ((page[i] & 0xff0007ff) == INS_DCBZ)
407 page[i] &= 0xfffffff7;
408
2480b208 409 kunmap_atomic(page);
f05ed4d5
PM
410 put_page(hpage);
411}
412
413static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
414{
415 ulong mp_pa = vcpu->arch.magic_page_pa;
416
bbcc9c06
BH
417 if (!(vcpu->arch.shared->msr & MSR_SF))
418 mp_pa = (uint32_t)mp_pa;
419
f05ed4d5
PM
420 if (unlikely(mp_pa) &&
421 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
422 return 1;
423 }
424
425 return kvm_is_visible_gfn(vcpu->kvm, gfn);
426}
427
428int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
429 ulong eaddr, int vec)
430{
431 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
93b159b4 432 bool iswrite = false;
f05ed4d5
PM
433 int r = RESUME_GUEST;
434 int relocated;
435 int page_found = 0;
436 struct kvmppc_pte pte;
437 bool is_mmio = false;
438 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
439 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
440 u64 vsid;
441
442 relocated = data ? dr : ir;
93b159b4
PM
443 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
444 iswrite = true;
f05ed4d5
PM
445
446 /* Resolve real address if translation turned on */
447 if (relocated) {
93b159b4 448 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
f05ed4d5
PM
449 } else {
450 pte.may_execute = true;
451 pte.may_read = true;
452 pte.may_write = true;
453 pte.raddr = eaddr & KVM_PAM;
454 pte.eaddr = eaddr;
455 pte.vpage = eaddr >> 12;
c9029c34 456 pte.page_size = MMU_PAGE_64K;
f05ed4d5
PM
457 }
458
459 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
460 case 0:
461 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
462 break;
463 case MSR_DR:
464 case MSR_IR:
465 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
466
467 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
468 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
469 else
470 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
471 pte.vpage |= vsid;
472
473 if (vsid == -1)
474 page_found = -EINVAL;
475 break;
476 }
477
478 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
479 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
480 /*
481 * If we do the dcbz hack, we have to NX on every execution,
482 * so we can patch the executing code. This renders our guest
483 * NX-less.
484 */
485 pte.may_execute = !data;
486 }
487
488 if (page_found == -ENOENT) {
489 /* Page not found in guest PTE entries */
490 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
a2d56020 491 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
f05ed4d5 492 vcpu->arch.shared->msr |=
a2d56020 493 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
f05ed4d5
PM
494 kvmppc_book3s_queue_irqprio(vcpu, vec);
495 } else if (page_found == -EPERM) {
496 /* Storage protection */
497 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
a2d56020 498 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
f05ed4d5
PM
499 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
500 vcpu->arch.shared->msr |=
a2d56020 501 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
f05ed4d5
PM
502 kvmppc_book3s_queue_irqprio(vcpu, vec);
503 } else if (page_found == -EINVAL) {
504 /* Page not found in guest SLB */
505 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
506 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
507 } else if (!is_mmio &&
508 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
93b159b4
PM
509 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
510 /*
511 * There is already a host HPTE there, presumably
512 * a read-only one for a page the guest thinks
513 * is writable, so get rid of it first.
514 */
515 kvmppc_mmu_unmap_page(vcpu, &pte);
516 }
f05ed4d5 517 /* The guest's PTE is not mapped yet. Map on the host */
93b159b4 518 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
f05ed4d5
PM
519 if (data)
520 vcpu->stat.sp_storage++;
521 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
93b159b4 522 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
f05ed4d5
PM
523 kvmppc_patch_dcbz(vcpu, &pte);
524 } else {
525 /* MMIO */
526 vcpu->stat.mmio_exits++;
527 vcpu->arch.paddr_accessed = pte.raddr;
6020c0f6 528 vcpu->arch.vaddr_accessed = pte.eaddr;
f05ed4d5
PM
529 r = kvmppc_emulate_mmio(run, vcpu);
530 if ( r == RESUME_HOST_NV )
531 r = RESUME_HOST;
532 }
533
534 return r;
535}
536
537static inline int get_fpr_index(int i)
538{
28c483b6 539 return i * TS_FPRWIDTH;
f05ed4d5
PM
540}
541
542/* Give up external provider (FPU, Altivec, VSX) */
543void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
544{
545 struct thread_struct *t = &current->thread;
546 u64 *vcpu_fpr = vcpu->arch.fpr;
547#ifdef CONFIG_VSX
548 u64 *vcpu_vsx = vcpu->arch.vsr;
549#endif
550 u64 *thread_fpr = (u64*)t->fpr;
551 int i;
552
28c483b6
PM
553 /*
554 * VSX instructions can access FP and vector registers, so if
555 * we are giving up VSX, make sure we give up FP and VMX as well.
556 */
557 if (msr & MSR_VSX)
558 msr |= MSR_FP | MSR_VEC;
559
560 msr &= vcpu->arch.guest_owned_ext;
561 if (!msr)
f05ed4d5
PM
562 return;
563
564#ifdef DEBUG_EXT
565 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
566#endif
567
28c483b6
PM
568 if (msr & MSR_FP) {
569 /*
570 * Note that on CPUs with VSX, giveup_fpu stores
571 * both the traditional FP registers and the added VSX
572 * registers into thread.fpr[].
573 */
9d1ffdd8
PM
574 if (current->thread.regs->msr & MSR_FP)
575 giveup_fpu(current);
f05ed4d5
PM
576 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
577 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
578
579 vcpu->arch.fpscr = t->fpscr.val;
28c483b6
PM
580
581#ifdef CONFIG_VSX
582 if (cpu_has_feature(CPU_FTR_VSX))
583 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
584 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
585#endif
586 }
587
f05ed4d5 588#ifdef CONFIG_ALTIVEC
28c483b6 589 if (msr & MSR_VEC) {
9d1ffdd8
PM
590 if (current->thread.regs->msr & MSR_VEC)
591 giveup_altivec(current);
f05ed4d5
PM
592 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
593 vcpu->arch.vscr = t->vscr;
f05ed4d5 594 }
28c483b6 595#endif
f05ed4d5 596
28c483b6 597 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
f05ed4d5
PM
598 kvmppc_recalc_shadow_msr(vcpu);
599}
600
601static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
602{
603 ulong srr0 = kvmppc_get_pc(vcpu);
604 u32 last_inst = kvmppc_get_last_inst(vcpu);
605 int ret;
606
607 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
608 if (ret == -ENOENT) {
609 ulong msr = vcpu->arch.shared->msr;
610
611 msr = kvmppc_set_field(msr, 33, 33, 1);
612 msr = kvmppc_set_field(msr, 34, 36, 0);
613 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
614 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
615 return EMULATE_AGAIN;
616 }
617
618 return EMULATE_DONE;
619}
620
621static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
622{
623
624 /* Need to do paired single emulation? */
625 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
626 return EMULATE_DONE;
627
628 /* Read out the instruction */
629 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
630 /* Need to emulate */
631 return EMULATE_FAIL;
632
633 return EMULATE_AGAIN;
634}
635
636/* Handle external providers (FPU, Altivec, VSX) */
637static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
638 ulong msr)
639{
640 struct thread_struct *t = &current->thread;
641 u64 *vcpu_fpr = vcpu->arch.fpr;
642#ifdef CONFIG_VSX
643 u64 *vcpu_vsx = vcpu->arch.vsr;
644#endif
645 u64 *thread_fpr = (u64*)t->fpr;
646 int i;
647
648 /* When we have paired singles, we emulate in software */
649 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
650 return RESUME_GUEST;
651
652 if (!(vcpu->arch.shared->msr & msr)) {
653 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
654 return RESUME_GUEST;
655 }
656
28c483b6
PM
657 if (msr == MSR_VSX) {
658 /* No VSX? Give an illegal instruction interrupt */
659#ifdef CONFIG_VSX
660 if (!cpu_has_feature(CPU_FTR_VSX))
661#endif
662 {
663 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
664 return RESUME_GUEST;
665 }
666
667 /*
668 * We have to load up all the FP and VMX registers before
669 * we can let the guest use VSX instructions.
670 */
671 msr = MSR_FP | MSR_VEC | MSR_VSX;
f05ed4d5
PM
672 }
673
28c483b6
PM
674 /* See if we already own all the ext(s) needed */
675 msr &= ~vcpu->arch.guest_owned_ext;
676 if (!msr)
677 return RESUME_GUEST;
678
f05ed4d5
PM
679#ifdef DEBUG_EXT
680 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
681#endif
682
28c483b6 683 if (msr & MSR_FP) {
f05ed4d5
PM
684 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
685 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
28c483b6
PM
686#ifdef CONFIG_VSX
687 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
688 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
689#endif
f05ed4d5
PM
690 t->fpscr.val = vcpu->arch.fpscr;
691 t->fpexc_mode = 0;
692 kvmppc_load_up_fpu();
28c483b6
PM
693 }
694
695 if (msr & MSR_VEC) {
f05ed4d5
PM
696#ifdef CONFIG_ALTIVEC
697 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
698 t->vscr = vcpu->arch.vscr;
699 t->vrsave = -1;
700 kvmppc_load_up_altivec();
701#endif
f05ed4d5
PM
702 }
703
9d1ffdd8 704 current->thread.regs->msr |= msr;
f05ed4d5 705 vcpu->arch.guest_owned_ext |= msr;
f05ed4d5
PM
706 kvmppc_recalc_shadow_msr(vcpu);
707
708 return RESUME_GUEST;
709}
710
9d1ffdd8
PM
711/*
712 * Kernel code using FP or VMX could have flushed guest state to
713 * the thread_struct; if so, get it back now.
714 */
715static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
716{
717 unsigned long lost_ext;
718
719 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
720 if (!lost_ext)
721 return;
722
723 if (lost_ext & MSR_FP)
724 kvmppc_load_up_fpu();
f2481771 725#ifdef CONFIG_ALTIVEC
9d1ffdd8
PM
726 if (lost_ext & MSR_VEC)
727 kvmppc_load_up_altivec();
f2481771 728#endif
9d1ffdd8
PM
729 current->thread.regs->msr |= lost_ext;
730}
731
3a167bea
AK
732int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
733 unsigned int exit_nr)
f05ed4d5
PM
734{
735 int r = RESUME_HOST;
7ee78855 736 int s;
f05ed4d5
PM
737
738 vcpu->stat.sum_exits++;
739
740 run->exit_reason = KVM_EXIT_UNKNOWN;
741 run->ready_for_interrupt_injection = 1;
742
bd2be683 743 /* We get here with MSR.EE=1 */
3b1d9d7d 744
97c95059 745 trace_kvm_exit(exit_nr, vcpu);
706fb730 746 kvm_guest_exit();
c63ddcb4 747
f05ed4d5
PM
748 switch (exit_nr) {
749 case BOOK3S_INTERRUPT_INST_STORAGE:
468a12c2 750 {
a2d56020 751 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
752 vcpu->stat.pf_instruc++;
753
754#ifdef CONFIG_PPC_BOOK3S_32
755 /* We set segments as unused segments when invalidating them. So
756 * treat the respective fault as segment fault. */
a2d56020
PM
757 {
758 struct kvmppc_book3s_shadow_vcpu *svcpu;
759 u32 sr;
760
761 svcpu = svcpu_get(vcpu);
762 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
468a12c2 763 svcpu_put(svcpu);
a2d56020
PM
764 if (sr == SR_INVALID) {
765 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
766 r = RESUME_GUEST;
767 break;
768 }
f05ed4d5
PM
769 }
770#endif
771
772 /* only care about PTEG not found errors, but leave NX alone */
468a12c2 773 if (shadow_srr1 & 0x40000000) {
93b159b4 774 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 775 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
93b159b4 776 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
777 vcpu->stat.sp_instruc++;
778 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
779 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
780 /*
781 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
782 * so we can't use the NX bit inside the guest. Let's cross our fingers,
783 * that no guest that needs the dcbz hack does NX.
784 */
785 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
786 r = RESUME_GUEST;
787 } else {
468a12c2 788 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
f05ed4d5
PM
789 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
790 r = RESUME_GUEST;
791 }
792 break;
468a12c2 793 }
f05ed4d5
PM
794 case BOOK3S_INTERRUPT_DATA_STORAGE:
795 {
796 ulong dar = kvmppc_get_fault_dar(vcpu);
a2d56020 797 u32 fault_dsisr = vcpu->arch.fault_dsisr;
f05ed4d5
PM
798 vcpu->stat.pf_storage++;
799
800#ifdef CONFIG_PPC_BOOK3S_32
801 /* We set segments as unused segments when invalidating them. So
802 * treat the respective fault as segment fault. */
a2d56020
PM
803 {
804 struct kvmppc_book3s_shadow_vcpu *svcpu;
805 u32 sr;
806
807 svcpu = svcpu_get(vcpu);
808 sr = svcpu->sr[dar >> SID_SHIFT];
468a12c2 809 svcpu_put(svcpu);
a2d56020
PM
810 if (sr == SR_INVALID) {
811 kvmppc_mmu_map_segment(vcpu, dar);
812 r = RESUME_GUEST;
813 break;
814 }
f05ed4d5
PM
815 }
816#endif
817
93b159b4
PM
818 /*
819 * We need to handle missing shadow PTEs, and
820 * protection faults due to us mapping a page read-only
821 * when the guest thinks it is writable.
822 */
823 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
824 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 825 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
93b159b4 826 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
827 } else {
828 vcpu->arch.shared->dar = dar;
468a12c2 829 vcpu->arch.shared->dsisr = fault_dsisr;
f05ed4d5
PM
830 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
831 r = RESUME_GUEST;
832 }
833 break;
834 }
835 case BOOK3S_INTERRUPT_DATA_SEGMENT:
836 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
837 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
838 kvmppc_book3s_queue_irqprio(vcpu,
839 BOOK3S_INTERRUPT_DATA_SEGMENT);
840 }
841 r = RESUME_GUEST;
842 break;
843 case BOOK3S_INTERRUPT_INST_SEGMENT:
844 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
845 kvmppc_book3s_queue_irqprio(vcpu,
846 BOOK3S_INTERRUPT_INST_SEGMENT);
847 }
848 r = RESUME_GUEST;
849 break;
850 /* We're good on these - the host merely wanted to get our attention */
851 case BOOK3S_INTERRUPT_DECREMENTER:
4f225ae0 852 case BOOK3S_INTERRUPT_HV_DECREMENTER:
f05ed4d5
PM
853 vcpu->stat.dec_exits++;
854 r = RESUME_GUEST;
855 break;
856 case BOOK3S_INTERRUPT_EXTERNAL:
4f225ae0
AG
857 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
858 case BOOK3S_INTERRUPT_EXTERNAL_HV:
f05ed4d5
PM
859 vcpu->stat.ext_intr_exits++;
860 r = RESUME_GUEST;
861 break;
862 case BOOK3S_INTERRUPT_PERFMON:
863 r = RESUME_GUEST;
864 break;
865 case BOOK3S_INTERRUPT_PROGRAM:
4f225ae0 866 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
f05ed4d5
PM
867 {
868 enum emulation_result er;
869 ulong flags;
870
871program_interrupt:
a2d56020 872 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
f05ed4d5
PM
873
874 if (vcpu->arch.shared->msr & MSR_PR) {
875#ifdef EXIT_DEBUG
876 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
877#endif
878 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
879 (INS_DCBZ & 0xfffffff7)) {
880 kvmppc_core_queue_program(vcpu, flags);
881 r = RESUME_GUEST;
882 break;
883 }
884 }
885
886 vcpu->stat.emulated_inst_exits++;
887 er = kvmppc_emulate_instruction(run, vcpu);
888 switch (er) {
889 case EMULATE_DONE:
890 r = RESUME_GUEST_NV;
891 break;
892 case EMULATE_AGAIN:
893 r = RESUME_GUEST;
894 break;
895 case EMULATE_FAIL:
896 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
897 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
898 kvmppc_core_queue_program(vcpu, flags);
899 r = RESUME_GUEST;
900 break;
901 case EMULATE_DO_MMIO:
902 run->exit_reason = KVM_EXIT_MMIO;
903 r = RESUME_HOST_NV;
904 break;
c402a3f4 905 case EMULATE_EXIT_USER:
50c7bb80
AG
906 r = RESUME_HOST_NV;
907 break;
f05ed4d5
PM
908 default:
909 BUG();
910 }
911 break;
912 }
913 case BOOK3S_INTERRUPT_SYSCALL:
a668f2bd 914 if (vcpu->arch.papr_enabled &&
8b23de29 915 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
a668f2bd
AG
916 !(vcpu->arch.shared->msr & MSR_PR)) {
917 /* SC 1 papr hypercalls */
918 ulong cmd = kvmppc_get_gpr(vcpu, 3);
919 int i;
920
96f38d72 921#ifdef CONFIG_KVM_BOOK3S_64_PR
a668f2bd
AG
922 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
923 r = RESUME_GUEST;
924 break;
925 }
96f38d72 926#endif
a668f2bd
AG
927
928 run->papr_hcall.nr = cmd;
929 for (i = 0; i < 9; ++i) {
930 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
931 run->papr_hcall.args[i] = gpr;
932 }
933 run->exit_reason = KVM_EXIT_PAPR_HCALL;
934 vcpu->arch.hcall_needed = 1;
935 r = RESUME_HOST;
936 } else if (vcpu->arch.osi_enabled &&
f05ed4d5
PM
937 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
938 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
939 /* MOL hypercalls */
940 u64 *gprs = run->osi.gprs;
941 int i;
942
943 run->exit_reason = KVM_EXIT_OSI;
944 for (i = 0; i < 32; i++)
945 gprs[i] = kvmppc_get_gpr(vcpu, i);
946 vcpu->arch.osi_needed = 1;
947 r = RESUME_HOST_NV;
948 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
949 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
950 /* KVM PV hypercalls */
951 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
952 r = RESUME_GUEST;
953 } else {
954 /* Guest syscalls */
955 vcpu->stat.syscall_exits++;
956 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
957 r = RESUME_GUEST;
958 }
959 break;
960 case BOOK3S_INTERRUPT_FP_UNAVAIL:
961 case BOOK3S_INTERRUPT_ALTIVEC:
962 case BOOK3S_INTERRUPT_VSX:
963 {
964 int ext_msr = 0;
965
966 switch (exit_nr) {
967 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
968 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
969 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
970 }
971
972 switch (kvmppc_check_ext(vcpu, exit_nr)) {
973 case EMULATE_DONE:
974 /* everything ok - let's enable the ext */
975 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
976 break;
977 case EMULATE_FAIL:
978 /* we need to emulate this instruction */
979 goto program_interrupt;
980 break;
981 default:
982 /* nothing to worry about - go again */
983 break;
984 }
985 break;
986 }
987 case BOOK3S_INTERRUPT_ALIGNMENT:
988 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
989 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
990 kvmppc_get_last_inst(vcpu));
991 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
992 kvmppc_get_last_inst(vcpu));
993 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
994 }
995 r = RESUME_GUEST;
996 break;
997 case BOOK3S_INTERRUPT_MACHINE_CHECK:
998 case BOOK3S_INTERRUPT_TRACE:
999 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1000 r = RESUME_GUEST;
1001 break;
1002 default:
468a12c2 1003 {
a2d56020 1004 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
1005 /* Ugh - bork here! What did we get? */
1006 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
468a12c2 1007 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
f05ed4d5
PM
1008 r = RESUME_HOST;
1009 BUG();
1010 break;
1011 }
468a12c2 1012 }
f05ed4d5
PM
1013
1014 if (!(r & RESUME_HOST)) {
1015 /* To avoid clobbering exit_reason, only check for signals if
1016 * we aren't already exiting to userspace for some other
1017 * reason. */
e371f713
AG
1018
1019 /*
1020 * Interrupts could be timers for the guest which we have to
1021 * inject again, so let's postpone them until we're in the guest
1022 * and if we really did time things so badly, then we just exit
1023 * again due to a host external interrupt.
1024 */
bd2be683 1025 local_irq_disable();
7ee78855
AG
1026 s = kvmppc_prepare_to_enter(vcpu);
1027 if (s <= 0) {
bd2be683 1028 local_irq_enable();
7ee78855 1029 r = s;
24afa37b 1030 } else {
5f1c248f 1031 kvmppc_fix_ee_before_entry();
f05ed4d5 1032 }
9d1ffdd8 1033 kvmppc_handle_lost_ext(vcpu);
f05ed4d5
PM
1034 }
1035
1036 trace_kvm_book3s_reenter(r, vcpu);
1037
1038 return r;
1039}
1040
3a167bea
AK
1041static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1042 struct kvm_sregs *sregs)
f05ed4d5
PM
1043{
1044 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1045 int i;
1046
1047 sregs->pvr = vcpu->arch.pvr;
1048
1049 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1050 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1051 for (i = 0; i < 64; i++) {
1052 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1053 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1054 }
1055 } else {
1056 for (i = 0; i < 16; i++)
1057 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1058
1059 for (i = 0; i < 8; i++) {
1060 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1061 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1062 }
1063 }
1064
1065 return 0;
1066}
1067
3a167bea
AK
1068static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1069 struct kvm_sregs *sregs)
f05ed4d5
PM
1070{
1071 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1072 int i;
1073
3a167bea 1074 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
f05ed4d5
PM
1075
1076 vcpu3s->sdr1 = sregs->u.s.sdr1;
1077 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1078 for (i = 0; i < 64; i++) {
1079 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1080 sregs->u.s.ppc64.slb[i].slbe);
1081 }
1082 } else {
1083 for (i = 0; i < 16; i++) {
1084 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1085 }
1086 for (i = 0; i < 8; i++) {
1087 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1088 (u32)sregs->u.s.ppc32.ibat[i]);
1089 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1090 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1091 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1092 (u32)sregs->u.s.ppc32.dbat[i]);
1093 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1094 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1095 }
1096 }
1097
1098 /* Flush the MMU after messing with the segments */
1099 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1100
1101 return 0;
1102}
1103
3a167bea
AK
1104static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1105 union kvmppc_one_reg *val)
31f3438e 1106{
a136a8bd 1107 int r = 0;
31f3438e 1108
a136a8bd 1109 switch (id) {
31f3438e 1110 case KVM_REG_PPC_HIOR:
a136a8bd 1111 *val = get_reg_val(id, to_book3s(vcpu)->hior);
31f3438e 1112 break;
a8bd19ef
PM
1113#ifdef CONFIG_VSX
1114 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1115 long int i = id - KVM_REG_PPC_VSR0;
1116
1117 if (!cpu_has_feature(CPU_FTR_VSX)) {
1118 r = -ENXIO;
1119 break;
1120 }
1121 val->vsxval[0] = vcpu->arch.fpr[i];
1122 val->vsxval[1] = vcpu->arch.vsr[i];
1123 break;
1124 }
1125#endif /* CONFIG_VSX */
31f3438e 1126 default:
a136a8bd 1127 r = -EINVAL;
31f3438e
PM
1128 break;
1129 }
1130
1131 return r;
1132}
1133
3a167bea
AK
1134static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1135 union kvmppc_one_reg *val)
31f3438e 1136{
a136a8bd 1137 int r = 0;
31f3438e 1138
a136a8bd 1139 switch (id) {
31f3438e 1140 case KVM_REG_PPC_HIOR:
a136a8bd
PM
1141 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1142 to_book3s(vcpu)->hior_explicit = true;
31f3438e 1143 break;
a8bd19ef
PM
1144#ifdef CONFIG_VSX
1145 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1146 long int i = id - KVM_REG_PPC_VSR0;
1147
1148 if (!cpu_has_feature(CPU_FTR_VSX)) {
1149 r = -ENXIO;
1150 break;
1151 }
1152 vcpu->arch.fpr[i] = val->vsxval[0];
1153 vcpu->arch.vsr[i] = val->vsxval[1];
1154 break;
1155 }
1156#endif /* CONFIG_VSX */
31f3438e 1157 default:
a136a8bd 1158 r = -EINVAL;
31f3438e
PM
1159 break;
1160 }
1161
1162 return r;
1163}
1164
3a167bea
AK
1165static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1166 unsigned int id)
f05ed4d5
PM
1167{
1168 struct kvmppc_vcpu_book3s *vcpu_book3s;
1169 struct kvm_vcpu *vcpu;
1170 int err = -ENOMEM;
1171 unsigned long p;
1172
3ff95502
PM
1173 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1174 if (!vcpu)
1175 goto out;
1176
f05ed4d5
PM
1177 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1178 if (!vcpu_book3s)
3ff95502
PM
1179 goto free_vcpu;
1180 vcpu->arch.book3s = vcpu_book3s;
f05ed4d5 1181
a2d56020 1182#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1183 vcpu->arch.shadow_vcpu =
1184 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1185 if (!vcpu->arch.shadow_vcpu)
1186 goto free_vcpu3s;
a2d56020 1187#endif
3ff95502 1188
f05ed4d5
PM
1189 err = kvm_vcpu_init(vcpu, kvm, id);
1190 if (err)
1191 goto free_shadow_vcpu;
1192
7c7b406e 1193 err = -ENOMEM;
f05ed4d5 1194 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
f05ed4d5
PM
1195 if (!p)
1196 goto uninit_vcpu;
7c7b406e
TLSC
1197 /* the real shared page fills the last 4k of our page */
1198 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
f05ed4d5 1199
f05ed4d5 1200#ifdef CONFIG_PPC_BOOK3S_64
a4a0f252
PM
1201 /*
1202 * Default to the same as the host if we're on sufficiently
1203 * recent machine that we have 1TB segments;
1204 * otherwise default to PPC970FX.
1205 */
f05ed4d5 1206 vcpu->arch.pvr = 0x3C0301;
a4a0f252
PM
1207 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1208 vcpu->arch.pvr = mfspr(SPRN_PVR);
f05ed4d5
PM
1209#else
1210 /* default to book3s_32 (750) */
1211 vcpu->arch.pvr = 0x84202;
1212#endif
3a167bea 1213 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
f05ed4d5
PM
1214 vcpu->arch.slb_nr = 64;
1215
f05ed4d5
PM
1216 vcpu->arch.shadow_msr = MSR_USER64;
1217
1218 err = kvmppc_mmu_init(vcpu);
1219 if (err < 0)
1220 goto uninit_vcpu;
1221
1222 return vcpu;
1223
1224uninit_vcpu:
1225 kvm_vcpu_uninit(vcpu);
1226free_shadow_vcpu:
a2d56020 1227#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1228 kfree(vcpu->arch.shadow_vcpu);
1229free_vcpu3s:
a2d56020 1230#endif
f05ed4d5 1231 vfree(vcpu_book3s);
3ff95502
PM
1232free_vcpu:
1233 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1234out:
1235 return ERR_PTR(err);
1236}
1237
3a167bea 1238static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
1239{
1240 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1241
1242 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1243 kvm_vcpu_uninit(vcpu);
3ff95502
PM
1244#ifdef CONFIG_KVM_BOOK3S_32
1245 kfree(vcpu->arch.shadow_vcpu);
1246#endif
f05ed4d5 1247 vfree(vcpu_book3s);
3ff95502 1248 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1249}
1250
3a167bea 1251static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
f05ed4d5
PM
1252{
1253 int ret;
1254 double fpr[32][TS_FPRWIDTH];
1255 unsigned int fpscr;
1256 int fpexc_mode;
1257#ifdef CONFIG_ALTIVEC
1258 vector128 vr[32];
1259 vector128 vscr;
1260 unsigned long uninitialized_var(vrsave);
1261 int used_vr;
1262#endif
1263#ifdef CONFIG_VSX
1264 int used_vsr;
1265#endif
1266 ulong ext_msr;
1267
af8f38b3
AG
1268 /* Check if we can run the vcpu at all */
1269 if (!vcpu->arch.sane) {
1270 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7d82714d
AG
1271 ret = -EINVAL;
1272 goto out;
af8f38b3
AG
1273 }
1274
e371f713
AG
1275 /*
1276 * Interrupts could be timers for the guest which we have to inject
1277 * again, so let's postpone them until we're in the guest and if we
1278 * really did time things so badly, then we just exit again due to
1279 * a host external interrupt.
1280 */
bd2be683 1281 local_irq_disable();
7ee78855
AG
1282 ret = kvmppc_prepare_to_enter(vcpu);
1283 if (ret <= 0) {
bd2be683 1284 local_irq_enable();
7d82714d 1285 goto out;
f05ed4d5
PM
1286 }
1287
1288 /* Save FPU state in stack */
1289 if (current->thread.regs->msr & MSR_FP)
1290 giveup_fpu(current);
1291 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1292 fpscr = current->thread.fpscr.val;
1293 fpexc_mode = current->thread.fpexc_mode;
1294
1295#ifdef CONFIG_ALTIVEC
1296 /* Save Altivec state in stack */
1297 used_vr = current->thread.used_vr;
1298 if (used_vr) {
1299 if (current->thread.regs->msr & MSR_VEC)
1300 giveup_altivec(current);
1301 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1302 vscr = current->thread.vscr;
1303 vrsave = current->thread.vrsave;
1304 }
1305#endif
1306
1307#ifdef CONFIG_VSX
1308 /* Save VSX state in stack */
1309 used_vsr = current->thread.used_vsr;
1310 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
28c483b6 1311 __giveup_vsx(current);
f05ed4d5
PM
1312#endif
1313
1314 /* Remember the MSR with disabled extensions */
1315 ext_msr = current->thread.regs->msr;
1316
f05ed4d5
PM
1317 /* Preload FPU if it's enabled */
1318 if (vcpu->arch.shared->msr & MSR_FP)
1319 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1320
5f1c248f 1321 kvmppc_fix_ee_before_entry();
df6909e5
PM
1322
1323 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1324
24afa37b
AG
1325 /* No need for kvm_guest_exit. It's done in handle_exit.
1326 We also get here with interrupts enabled. */
f05ed4d5 1327
f05ed4d5 1328 /* Make sure we save the guest FPU/Altivec/VSX state */
28c483b6
PM
1329 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1330
1331 current->thread.regs->msr = ext_msr;
f05ed4d5 1332
28c483b6 1333 /* Restore FPU/VSX state from stack */
f05ed4d5
PM
1334 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1335 current->thread.fpscr.val = fpscr;
1336 current->thread.fpexc_mode = fpexc_mode;
1337
1338#ifdef CONFIG_ALTIVEC
1339 /* Restore Altivec state from stack */
1340 if (used_vr && current->thread.used_vr) {
1341 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1342 current->thread.vscr = vscr;
1343 current->thread.vrsave = vrsave;
1344 }
1345 current->thread.used_vr = used_vr;
1346#endif
1347
1348#ifdef CONFIG_VSX
1349 current->thread.used_vsr = used_vsr;
1350#endif
1351
7d82714d 1352out:
0652eaae 1353 vcpu->mode = OUTSIDE_GUEST_MODE;
f05ed4d5
PM
1354 return ret;
1355}
1356
82ed3616
PM
1357/*
1358 * Get (and clear) the dirty memory log for a memory slot.
1359 */
3a167bea
AK
1360static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1361 struct kvm_dirty_log *log)
82ed3616
PM
1362{
1363 struct kvm_memory_slot *memslot;
1364 struct kvm_vcpu *vcpu;
1365 ulong ga, ga_end;
1366 int is_dirty = 0;
1367 int r;
1368 unsigned long n;
1369
1370 mutex_lock(&kvm->slots_lock);
1371
1372 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1373 if (r)
1374 goto out;
1375
1376 /* If nothing is dirty, don't bother messing with page tables. */
1377 if (is_dirty) {
1378 memslot = id_to_memslot(kvm->memslots, log->slot);
1379
1380 ga = memslot->base_gfn << PAGE_SHIFT;
1381 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1382
1383 kvm_for_each_vcpu(n, vcpu, kvm)
1384 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1385
1386 n = kvm_dirty_bitmap_bytes(memslot);
1387 memset(memslot->dirty_bitmap, 0, n);
1388 }
1389
1390 r = 0;
1391out:
1392 mutex_unlock(&kvm->slots_lock);
1393 return r;
1394}
1395
3a167bea
AK
1396static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1397 struct kvm_memory_slot *memslot)
1398{
1399 return;
1400}
1401
1402static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1403 struct kvm_memory_slot *memslot,
1404 struct kvm_userspace_memory_region *mem)
1405{
1406 return 0;
1407}
1408
1409static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1410 struct kvm_userspace_memory_region *mem,
1411 const struct kvm_memory_slot *old)
1412{
1413 return;
1414}
1415
1416static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1417 struct kvm_memory_slot *dont)
1418{
1419 return;
1420}
1421
1422static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1423 unsigned long npages)
1424{
1425 return 0;
1426}
1427
1428
5b74716e 1429#ifdef CONFIG_PPC64
3a167bea
AK
1430static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1431 struct kvm_ppc_smmu_info *info)
5b74716e 1432{
a4a0f252
PM
1433 long int i;
1434 struct kvm_vcpu *vcpu;
1435
1436 info->flags = 0;
5b74716e
BH
1437
1438 /* SLB is always 64 entries */
1439 info->slb_size = 64;
1440
1441 /* Standard 4k base page size segment */
1442 info->sps[0].page_shift = 12;
1443 info->sps[0].slb_enc = 0;
1444 info->sps[0].enc[0].page_shift = 12;
1445 info->sps[0].enc[0].pte_enc = 0;
1446
a4a0f252
PM
1447 /*
1448 * 64k large page size.
1449 * We only want to put this in if the CPUs we're emulating
1450 * support it, but unfortunately we don't have a vcpu easily
1451 * to hand here to test. Just pick the first vcpu, and if
1452 * that doesn't exist yet, report the minimum capability,
1453 * i.e., no 64k pages.
1454 * 1T segment support goes along with 64k pages.
1455 */
1456 i = 1;
1457 vcpu = kvm_get_vcpu(kvm, 0);
1458 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1459 info->flags = KVM_PPC_1T_SEGMENTS;
1460 info->sps[i].page_shift = 16;
1461 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1462 info->sps[i].enc[0].page_shift = 16;
1463 info->sps[i].enc[0].pte_enc = 1;
1464 ++i;
1465 }
1466
5b74716e 1467 /* Standard 16M large page size segment */
a4a0f252
PM
1468 info->sps[i].page_shift = 24;
1469 info->sps[i].slb_enc = SLB_VSID_L;
1470 info->sps[i].enc[0].page_shift = 24;
1471 info->sps[i].enc[0].pte_enc = 0;
5b74716e
BH
1472
1473 return 0;
1474}
3a167bea
AK
1475#else
1476static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1477 struct kvm_ppc_smmu_info *info)
f9e0554d 1478{
3a167bea
AK
1479 /* We should not get called */
1480 BUG();
f9e0554d 1481}
3a167bea 1482#endif /* CONFIG_PPC64 */
f9e0554d 1483
a413f474
IM
1484static unsigned int kvm_global_user_count = 0;
1485static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1486
3a167bea 1487static int kvmppc_core_init_vm_pr(struct kvm *kvm)
f9e0554d 1488{
9308ab8e 1489 mutex_init(&kvm->arch.hpt_mutex);
f31e65e1 1490
a413f474
IM
1491 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1492 spin_lock(&kvm_global_user_count_lock);
1493 if (++kvm_global_user_count == 1)
1494 pSeries_disable_reloc_on_exc();
1495 spin_unlock(&kvm_global_user_count_lock);
1496 }
f9e0554d
PM
1497 return 0;
1498}
1499
3a167bea 1500static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
f9e0554d 1501{
f31e65e1
BH
1502#ifdef CONFIG_PPC64
1503 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1504#endif
a413f474
IM
1505
1506 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1507 spin_lock(&kvm_global_user_count_lock);
1508 BUG_ON(kvm_global_user_count == 0);
1509 if (--kvm_global_user_count == 0)
1510 pSeries_enable_reloc_on_exc();
1511 spin_unlock(&kvm_global_user_count_lock);
1512 }
f9e0554d
PM
1513}
1514
3a167bea
AK
1515static int kvmppc_core_check_processor_compat_pr(void)
1516{
1517 /* we are always compatible */
1518 return 0;
1519}
1520
1521static long kvm_arch_vm_ioctl_pr(struct file *filp,
1522 unsigned int ioctl, unsigned long arg)
1523{
1524 return -ENOTTY;
1525}
1526
1527static struct kvmppc_ops kvmppc_pr_ops = {
699cc876 1528 .is_hv_enabled = false,
3a167bea
AK
1529 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1530 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1531 .get_one_reg = kvmppc_get_one_reg_pr,
1532 .set_one_reg = kvmppc_set_one_reg_pr,
1533 .vcpu_load = kvmppc_core_vcpu_load_pr,
1534 .vcpu_put = kvmppc_core_vcpu_put_pr,
1535 .set_msr = kvmppc_set_msr_pr,
1536 .vcpu_run = kvmppc_vcpu_run_pr,
1537 .vcpu_create = kvmppc_core_vcpu_create_pr,
1538 .vcpu_free = kvmppc_core_vcpu_free_pr,
1539 .check_requests = kvmppc_core_check_requests_pr,
1540 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1541 .flush_memslot = kvmppc_core_flush_memslot_pr,
1542 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1543 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1544 .unmap_hva = kvm_unmap_hva_pr,
1545 .unmap_hva_range = kvm_unmap_hva_range_pr,
1546 .age_hva = kvm_age_hva_pr,
1547 .test_age_hva = kvm_test_age_hva_pr,
1548 .set_spte_hva = kvm_set_spte_hva_pr,
1549 .mmu_destroy = kvmppc_mmu_destroy_pr,
1550 .free_memslot = kvmppc_core_free_memslot_pr,
1551 .create_memslot = kvmppc_core_create_memslot_pr,
1552 .init_vm = kvmppc_core_init_vm_pr,
1553 .destroy_vm = kvmppc_core_destroy_vm_pr,
1554 .check_processor_compat = kvmppc_core_check_processor_compat_pr,
1555 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1556 .emulate_op = kvmppc_core_emulate_op_pr,
1557 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1558 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1559 .fast_vcpu_kick = kvm_vcpu_kick,
1560 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1561};
1562
1563static int kvmppc_book3s_init_pr(void)
f05ed4d5
PM
1564{
1565 int r;
1566
3a167bea 1567 r = kvm_init(&kvmppc_pr_ops, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
f05ed4d5
PM
1568
1569 if (r)
1570 return r;
1571
1572 r = kvmppc_mmu_hpte_sysinit();
1573
1574 return r;
1575}
1576
3a167bea 1577static void kvmppc_book3s_exit_pr(void)
f05ed4d5
PM
1578{
1579 kvmppc_mmu_hpte_sysexit();
1580 kvm_exit();
1581}
1582
3a167bea
AK
1583module_init(kvmppc_book3s_init_pr);
1584module_exit(kvmppc_book3s_exit_pr);
This page took 0.189725 seconds and 5 git commands to generate.