2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <asm/firmware.h>
38 #include <asm/hvcall.h>
39 #include <linux/gfp.h>
40 #include <linux/sched.h>
41 #include <linux/vmalloc.h>
42 #include <linux/highmem.h>
43 #include <linux/module.h>
44 #include <linux/miscdevice.h>
48 #define CREATE_TRACE_POINTS
51 /* #define EXIT_DEBUG */
52 /* #define DEBUG_EXT */
54 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
56 static void kvmppc_giveup_fac(struct kvm_vcpu
*vcpu
, ulong fac
);
58 /* Some compatibility defines */
59 #ifdef CONFIG_PPC_BOOK3S_32
60 #define MSR_USER32 MSR_USER
61 #define MSR_USER64 MSR_USER
62 #define HW_PAGE_SIZE PAGE_SIZE
65 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu
*vcpu
, int cpu
)
67 #ifdef CONFIG_PPC_BOOK3S_64
68 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
69 memcpy(svcpu
->slb
, to_book3s(vcpu
)->slb_shadow
, sizeof(svcpu
->slb
));
70 svcpu
->slb_max
= to_book3s(vcpu
)->slb_shadow_max
;
75 /* Disable AIL if supported */
76 if (cpu_has_feature(CPU_FTR_HVMODE
) &&
77 cpu_has_feature(CPU_FTR_ARCH_207S
))
78 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) & ~LPCR_AIL
);
80 vcpu
->cpu
= smp_processor_id();
81 #ifdef CONFIG_PPC_BOOK3S_32
82 current
->thread
.kvm_shadow_vcpu
= vcpu
->arch
.shadow_vcpu
;
86 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu
*vcpu
)
88 #ifdef CONFIG_PPC_BOOK3S_64
89 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
91 kvmppc_copy_from_svcpu(vcpu
, svcpu
);
93 memcpy(to_book3s(vcpu
)->slb_shadow
, svcpu
->slb
, sizeof(svcpu
->slb
));
94 to_book3s(vcpu
)->slb_shadow_max
= svcpu
->slb_max
;
98 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
99 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
101 /* Enable AIL if supported */
102 if (cpu_has_feature(CPU_FTR_HVMODE
) &&
103 cpu_has_feature(CPU_FTR_ARCH_207S
))
104 mtspr(SPRN_LPCR
, mfspr(SPRN_LPCR
) | LPCR_AIL_3
);
109 /* Copy data needed by real-mode code from vcpu to shadow vcpu */
110 void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu
*svcpu
,
111 struct kvm_vcpu
*vcpu
)
113 svcpu
->gpr
[0] = vcpu
->arch
.gpr
[0];
114 svcpu
->gpr
[1] = vcpu
->arch
.gpr
[1];
115 svcpu
->gpr
[2] = vcpu
->arch
.gpr
[2];
116 svcpu
->gpr
[3] = vcpu
->arch
.gpr
[3];
117 svcpu
->gpr
[4] = vcpu
->arch
.gpr
[4];
118 svcpu
->gpr
[5] = vcpu
->arch
.gpr
[5];
119 svcpu
->gpr
[6] = vcpu
->arch
.gpr
[6];
120 svcpu
->gpr
[7] = vcpu
->arch
.gpr
[7];
121 svcpu
->gpr
[8] = vcpu
->arch
.gpr
[8];
122 svcpu
->gpr
[9] = vcpu
->arch
.gpr
[9];
123 svcpu
->gpr
[10] = vcpu
->arch
.gpr
[10];
124 svcpu
->gpr
[11] = vcpu
->arch
.gpr
[11];
125 svcpu
->gpr
[12] = vcpu
->arch
.gpr
[12];
126 svcpu
->gpr
[13] = vcpu
->arch
.gpr
[13];
127 svcpu
->cr
= vcpu
->arch
.cr
;
128 svcpu
->xer
= vcpu
->arch
.xer
;
129 svcpu
->ctr
= vcpu
->arch
.ctr
;
130 svcpu
->lr
= vcpu
->arch
.lr
;
131 svcpu
->pc
= vcpu
->arch
.pc
;
132 #ifdef CONFIG_PPC_BOOK3S_64
133 svcpu
->shadow_fscr
= vcpu
->arch
.shadow_fscr
;
136 * Now also save the current time base value. We use this
137 * to find the guest purr and spurr value.
139 vcpu
->arch
.entry_tb
= get_tb();
140 vcpu
->arch
.entry_vtb
= get_vtb();
141 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
142 vcpu
->arch
.entry_ic
= mfspr(SPRN_IC
);
143 svcpu
->in_use
= true;
146 /* Copy data touched by real-mode code from shadow vcpu back to vcpu */
147 void kvmppc_copy_from_svcpu(struct kvm_vcpu
*vcpu
,
148 struct kvmppc_book3s_shadow_vcpu
*svcpu
)
151 * vcpu_put would just call us again because in_use hasn't
157 * Maybe we were already preempted and synced the svcpu from
158 * our preempt notifiers. Don't bother touching this svcpu then.
163 vcpu
->arch
.gpr
[0] = svcpu
->gpr
[0];
164 vcpu
->arch
.gpr
[1] = svcpu
->gpr
[1];
165 vcpu
->arch
.gpr
[2] = svcpu
->gpr
[2];
166 vcpu
->arch
.gpr
[3] = svcpu
->gpr
[3];
167 vcpu
->arch
.gpr
[4] = svcpu
->gpr
[4];
168 vcpu
->arch
.gpr
[5] = svcpu
->gpr
[5];
169 vcpu
->arch
.gpr
[6] = svcpu
->gpr
[6];
170 vcpu
->arch
.gpr
[7] = svcpu
->gpr
[7];
171 vcpu
->arch
.gpr
[8] = svcpu
->gpr
[8];
172 vcpu
->arch
.gpr
[9] = svcpu
->gpr
[9];
173 vcpu
->arch
.gpr
[10] = svcpu
->gpr
[10];
174 vcpu
->arch
.gpr
[11] = svcpu
->gpr
[11];
175 vcpu
->arch
.gpr
[12] = svcpu
->gpr
[12];
176 vcpu
->arch
.gpr
[13] = svcpu
->gpr
[13];
177 vcpu
->arch
.cr
= svcpu
->cr
;
178 vcpu
->arch
.xer
= svcpu
->xer
;
179 vcpu
->arch
.ctr
= svcpu
->ctr
;
180 vcpu
->arch
.lr
= svcpu
->lr
;
181 vcpu
->arch
.pc
= svcpu
->pc
;
182 vcpu
->arch
.shadow_srr1
= svcpu
->shadow_srr1
;
183 vcpu
->arch
.fault_dar
= svcpu
->fault_dar
;
184 vcpu
->arch
.fault_dsisr
= svcpu
->fault_dsisr
;
185 vcpu
->arch
.last_inst
= svcpu
->last_inst
;
186 #ifdef CONFIG_PPC_BOOK3S_64
187 vcpu
->arch
.shadow_fscr
= svcpu
->shadow_fscr
;
190 * Update purr and spurr using time base on exit.
192 vcpu
->arch
.purr
+= get_tb() - vcpu
->arch
.entry_tb
;
193 vcpu
->arch
.spurr
+= get_tb() - vcpu
->arch
.entry_tb
;
194 vcpu
->arch
.vtb
+= get_vtb() - vcpu
->arch
.entry_vtb
;
195 if (cpu_has_feature(CPU_FTR_ARCH_207S
))
196 vcpu
->arch
.ic
+= mfspr(SPRN_IC
) - vcpu
->arch
.entry_ic
;
197 svcpu
->in_use
= false;
203 static int kvmppc_core_check_requests_pr(struct kvm_vcpu
*vcpu
)
205 int r
= 1; /* Indicate we want to get back into the guest */
207 /* We misuse TLB_FLUSH to indicate that we want to clear
208 all shadow cache entries */
209 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
210 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
215 /************* MMU Notifiers *************/
216 static void do_kvm_unmap_hva(struct kvm
*kvm
, unsigned long start
,
220 struct kvm_vcpu
*vcpu
;
221 struct kvm_memslots
*slots
;
222 struct kvm_memory_slot
*memslot
;
224 slots
= kvm_memslots(kvm
);
225 kvm_for_each_memslot(memslot
, slots
) {
226 unsigned long hva_start
, hva_end
;
229 hva_start
= max(start
, memslot
->userspace_addr
);
230 hva_end
= min(end
, memslot
->userspace_addr
+
231 (memslot
->npages
<< PAGE_SHIFT
));
232 if (hva_start
>= hva_end
)
235 * {gfn(page) | page intersects with [hva_start, hva_end)} =
236 * {gfn, gfn+1, ..., gfn_end-1}.
238 gfn
= hva_to_gfn_memslot(hva_start
, memslot
);
239 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
240 kvm_for_each_vcpu(i
, vcpu
, kvm
)
241 kvmppc_mmu_pte_pflush(vcpu
, gfn
<< PAGE_SHIFT
,
242 gfn_end
<< PAGE_SHIFT
);
246 static int kvm_unmap_hva_pr(struct kvm
*kvm
, unsigned long hva
)
248 trace_kvm_unmap_hva(hva
);
250 do_kvm_unmap_hva(kvm
, hva
, hva
+ PAGE_SIZE
);
255 static int kvm_unmap_hva_range_pr(struct kvm
*kvm
, unsigned long start
,
258 do_kvm_unmap_hva(kvm
, start
, end
);
263 static int kvm_age_hva_pr(struct kvm
*kvm
, unsigned long hva
)
265 /* XXX could be more clever ;) */
269 static int kvm_test_age_hva_pr(struct kvm
*kvm
, unsigned long hva
)
271 /* XXX could be more clever ;) */
275 static void kvm_set_spte_hva_pr(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
277 /* The page will get remapped properly on its next fault */
278 do_kvm_unmap_hva(kvm
, hva
, hva
+ PAGE_SIZE
);
281 /*****************************************/
283 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu
*vcpu
)
285 ulong guest_msr
= kvmppc_get_msr(vcpu
);
286 ulong smsr
= guest_msr
;
288 /* Guest MSR values */
289 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
| MSR_LE
;
290 /* Process MSR values */
291 smsr
|= MSR_ME
| MSR_RI
| MSR_IR
| MSR_DR
| MSR_PR
| MSR_EE
;
292 /* External providers the guest reserved */
293 smsr
|= (guest_msr
& vcpu
->arch
.guest_owned_ext
);
294 /* 64-bit Process MSR values */
295 #ifdef CONFIG_PPC_BOOK3S_64
296 smsr
|= MSR_ISF
| MSR_HV
;
298 vcpu
->arch
.shadow_msr
= smsr
;
301 static void kvmppc_set_msr_pr(struct kvm_vcpu
*vcpu
, u64 msr
)
303 ulong old_msr
= kvmppc_get_msr(vcpu
);
306 printk(KERN_INFO
"KVM: Set MSR to 0x%llx\n", msr
);
309 msr
&= to_book3s(vcpu
)->msr_mask
;
310 kvmppc_set_msr_fast(vcpu
, msr
);
311 kvmppc_recalc_shadow_msr(vcpu
);
314 if (!vcpu
->arch
.pending_exceptions
) {
315 kvm_vcpu_block(vcpu
);
316 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
317 vcpu
->stat
.halt_wakeup
++;
319 /* Unset POW bit after we woke up */
321 kvmppc_set_msr_fast(vcpu
, msr
);
325 if ((kvmppc_get_msr(vcpu
) & (MSR_PR
|MSR_IR
|MSR_DR
)) !=
326 (old_msr
& (MSR_PR
|MSR_IR
|MSR_DR
))) {
327 kvmppc_mmu_flush_segments(vcpu
);
328 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
330 /* Preload magic page segment when in kernel mode */
331 if (!(msr
& MSR_PR
) && vcpu
->arch
.magic_page_pa
) {
332 struct kvm_vcpu_arch
*a
= &vcpu
->arch
;
335 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_ea
);
337 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_pa
);
342 * When switching from 32 to 64-bit, we may have a stale 32-bit
343 * magic page around, we need to flush it. Typically 32-bit magic
344 * page will be instanciated when calling into RTAS. Note: We
345 * assume that such transition only happens while in kernel mode,
346 * ie, we never transition from user 32-bit to kernel 64-bit with
347 * a 32-bit magic page around.
349 if (vcpu
->arch
.magic_page_pa
&&
350 !(old_msr
& MSR_PR
) && !(old_msr
& MSR_SF
) && (msr
& MSR_SF
)) {
351 /* going from RTAS to normal kernel code */
352 kvmppc_mmu_pte_flush(vcpu
, (uint32_t)vcpu
->arch
.magic_page_pa
,
356 /* Preload FPU if it's enabled */
357 if (kvmppc_get_msr(vcpu
) & MSR_FP
)
358 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
361 void kvmppc_set_pvr_pr(struct kvm_vcpu
*vcpu
, u32 pvr
)
365 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SLB
;
366 vcpu
->arch
.pvr
= pvr
;
367 #ifdef CONFIG_PPC_BOOK3S_64
368 if ((pvr
>= 0x330000) && (pvr
< 0x70330000)) {
369 kvmppc_mmu_book3s_64_init(vcpu
);
370 if (!to_book3s(vcpu
)->hior_explicit
)
371 to_book3s(vcpu
)->hior
= 0xfff00000;
372 to_book3s(vcpu
)->msr_mask
= 0xffffffffffffffffULL
;
373 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
377 kvmppc_mmu_book3s_32_init(vcpu
);
378 if (!to_book3s(vcpu
)->hior_explicit
)
379 to_book3s(vcpu
)->hior
= 0;
380 to_book3s(vcpu
)->msr_mask
= 0xffffffffULL
;
381 vcpu
->arch
.cpu_type
= KVM_CPU_3S_32
;
384 kvmppc_sanity_check(vcpu
);
386 /* If we are in hypervisor level on 970, we can tell the CPU to
387 * treat DCBZ as 32 bytes store */
388 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_DCBZ32
;
389 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) && (mfmsr() & MSR_HV
) &&
390 !strcmp(cur_cpu_spec
->platform
, "ppc970"))
391 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
393 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
394 really needs them in a VM on Cell and force disable them. */
395 if (!strcmp(cur_cpu_spec
->platform
, "ppc-cell-be"))
396 to_book3s(vcpu
)->msr_mask
&= ~(MSR_FE0
| MSR_FE1
);
399 * If they're asking for POWER6 or later, set the flag
400 * indicating that we can do multiple large page sizes
402 * Also set the flag that indicates that tlbie has the large
403 * page bit in the RB operand instead of the instruction.
405 switch (PVR_VER(pvr
)) {
410 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_MULTI_PGSIZE
|
411 BOOK3S_HFLAG_NEW_TLBIE
;
415 #ifdef CONFIG_PPC_BOOK3S_32
416 /* 32 bit Book3S always has 32 byte dcbz */
417 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
420 /* On some CPUs we can execute paired single operations natively */
421 asm ( "mfpvr %0" : "=r"(host_pvr
));
423 case 0x00080200: /* lonestar 2.0 */
424 case 0x00088202: /* lonestar 2.2 */
425 case 0x70000100: /* gekko 1.0 */
426 case 0x00080100: /* gekko 2.0 */
427 case 0x00083203: /* gekko 2.3a */
428 case 0x00083213: /* gekko 2.3b */
429 case 0x00083204: /* gekko 2.4 */
430 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
431 case 0x00087200: /* broadway */
432 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_NATIVE_PS
;
433 /* Enable HID2.PSE - in case we need it later */
434 mtspr(SPRN_HID2_GEKKO
, mfspr(SPRN_HID2_GEKKO
) | (1 << 29));
438 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
439 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
440 * emulate 32 bytes dcbz length.
442 * The Book3s_64 inventors also realized this case and implemented a special bit
443 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
445 * My approach here is to patch the dcbz instruction on executing pages.
447 static void kvmppc_patch_dcbz(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
454 hpage
= gfn_to_page(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
455 if (is_error_page(hpage
))
458 hpage_offset
= pte
->raddr
& ~PAGE_MASK
;
459 hpage_offset
&= ~0xFFFULL
;
463 page
= kmap_atomic(hpage
);
465 /* patch dcbz into reserved instruction, so we trap */
466 for (i
=hpage_offset
; i
< hpage_offset
+ (HW_PAGE_SIZE
/ 4); i
++)
467 if ((be32_to_cpu(page
[i
]) & 0xff0007ff) == INS_DCBZ
)
468 page
[i
] &= cpu_to_be32(0xfffffff7);
474 static int kvmppc_visible_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
476 ulong mp_pa
= vcpu
->arch
.magic_page_pa
;
478 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
))
479 mp_pa
= (uint32_t)mp_pa
;
481 if (unlikely(mp_pa
) &&
482 unlikely((mp_pa
& KVM_PAM
) >> PAGE_SHIFT
== gfn
)) {
486 return kvm_is_visible_gfn(vcpu
->kvm
, gfn
);
489 int kvmppc_handle_pagefault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
490 ulong eaddr
, int vec
)
492 bool data
= (vec
== BOOK3S_INTERRUPT_DATA_STORAGE
);
493 bool iswrite
= false;
494 int r
= RESUME_GUEST
;
497 struct kvmppc_pte pte
;
498 bool is_mmio
= false;
499 bool dr
= (kvmppc_get_msr(vcpu
) & MSR_DR
) ? true : false;
500 bool ir
= (kvmppc_get_msr(vcpu
) & MSR_IR
) ? true : false;
503 relocated
= data
? dr
: ir
;
504 if (data
&& (vcpu
->arch
.fault_dsisr
& DSISR_ISSTORE
))
507 /* Resolve real address if translation turned on */
509 page_found
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, &pte
, data
, iswrite
);
511 pte
.may_execute
= true;
513 pte
.may_write
= true;
514 pte
.raddr
= eaddr
& KVM_PAM
;
516 pte
.vpage
= eaddr
>> 12;
517 pte
.page_size
= MMU_PAGE_64K
;
520 switch (kvmppc_get_msr(vcpu
) & (MSR_DR
|MSR_IR
)) {
522 pte
.vpage
|= ((u64
)VSID_REAL
<< (SID_SHIFT
- 12));
526 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, eaddr
>> SID_SHIFT
, &vsid
);
528 if ((kvmppc_get_msr(vcpu
) & (MSR_DR
|MSR_IR
)) == MSR_DR
)
529 pte
.vpage
|= ((u64
)VSID_REAL_DR
<< (SID_SHIFT
- 12));
531 pte
.vpage
|= ((u64
)VSID_REAL_IR
<< (SID_SHIFT
- 12));
535 page_found
= -EINVAL
;
539 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
540 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
542 * If we do the dcbz hack, we have to NX on every execution,
543 * so we can patch the executing code. This renders our guest
546 pte
.may_execute
= !data
;
549 if (page_found
== -ENOENT
) {
550 /* Page not found in guest PTE entries */
551 u64 ssrr1
= vcpu
->arch
.shadow_srr1
;
552 u64 msr
= kvmppc_get_msr(vcpu
);
553 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
554 kvmppc_set_dsisr(vcpu
, vcpu
->arch
.fault_dsisr
);
555 kvmppc_set_msr_fast(vcpu
, msr
| (ssrr1
& 0xf8000000ULL
));
556 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
557 } else if (page_found
== -EPERM
) {
558 /* Storage protection */
559 u32 dsisr
= vcpu
->arch
.fault_dsisr
;
560 u64 ssrr1
= vcpu
->arch
.shadow_srr1
;
561 u64 msr
= kvmppc_get_msr(vcpu
);
562 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
563 dsisr
= (dsisr
& ~DSISR_NOHPTE
) | DSISR_PROTFAULT
;
564 kvmppc_set_dsisr(vcpu
, dsisr
);
565 kvmppc_set_msr_fast(vcpu
, msr
| (ssrr1
& 0xf8000000ULL
));
566 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
567 } else if (page_found
== -EINVAL
) {
568 /* Page not found in guest SLB */
569 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
570 kvmppc_book3s_queue_irqprio(vcpu
, vec
+ 0x80);
571 } else if (!is_mmio
&&
572 kvmppc_visible_gfn(vcpu
, pte
.raddr
>> PAGE_SHIFT
)) {
573 if (data
&& !(vcpu
->arch
.fault_dsisr
& DSISR_NOHPTE
)) {
575 * There is already a host HPTE there, presumably
576 * a read-only one for a page the guest thinks
577 * is writable, so get rid of it first.
579 kvmppc_mmu_unmap_page(vcpu
, &pte
);
581 /* The guest's PTE is not mapped yet. Map on the host */
582 kvmppc_mmu_map_page(vcpu
, &pte
, iswrite
);
584 vcpu
->stat
.sp_storage
++;
585 else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
586 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
)))
587 kvmppc_patch_dcbz(vcpu
, &pte
);
590 vcpu
->stat
.mmio_exits
++;
591 vcpu
->arch
.paddr_accessed
= pte
.raddr
;
592 vcpu
->arch
.vaddr_accessed
= pte
.eaddr
;
593 r
= kvmppc_emulate_mmio(run
, vcpu
);
594 if ( r
== RESUME_HOST_NV
)
601 static inline int get_fpr_index(int i
)
603 return i
* TS_FPRWIDTH
;
606 /* Give up external provider (FPU, Altivec, VSX) */
607 void kvmppc_giveup_ext(struct kvm_vcpu
*vcpu
, ulong msr
)
609 struct thread_struct
*t
= ¤t
->thread
;
612 * VSX instructions can access FP and vector registers, so if
613 * we are giving up VSX, make sure we give up FP and VMX as well.
616 msr
|= MSR_FP
| MSR_VEC
;
618 msr
&= vcpu
->arch
.guest_owned_ext
;
623 printk(KERN_INFO
"Giving up ext 0x%lx\n", msr
);
628 * Note that on CPUs with VSX, giveup_fpu stores
629 * both the traditional FP registers and the added VSX
630 * registers into thread.fp_state.fpr[].
632 if (t
->regs
->msr
& MSR_FP
)
634 t
->fp_save_area
= NULL
;
637 #ifdef CONFIG_ALTIVEC
639 if (current
->thread
.regs
->msr
& MSR_VEC
)
640 giveup_altivec(current
);
641 t
->vr_save_area
= NULL
;
645 vcpu
->arch
.guest_owned_ext
&= ~(msr
| MSR_VSX
);
646 kvmppc_recalc_shadow_msr(vcpu
);
649 /* Give up facility (TAR / EBB / DSCR) */
650 static void kvmppc_giveup_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
652 #ifdef CONFIG_PPC_BOOK3S_64
653 if (!(vcpu
->arch
.shadow_fscr
& (1ULL << fac
))) {
654 /* Facility not available to the guest, ignore giveup request*/
660 vcpu
->arch
.tar
= mfspr(SPRN_TAR
);
661 mtspr(SPRN_TAR
, current
->thread
.tar
);
662 vcpu
->arch
.shadow_fscr
&= ~FSCR_TAR
;
668 static int kvmppc_read_inst(struct kvm_vcpu
*vcpu
)
670 ulong srr0
= kvmppc_get_pc(vcpu
);
671 u32 last_inst
= kvmppc_get_last_inst(vcpu
);
674 ret
= kvmppc_ld(vcpu
, &srr0
, sizeof(u32
), &last_inst
, false);
675 if (ret
== -ENOENT
) {
676 ulong msr
= kvmppc_get_msr(vcpu
);
678 msr
= kvmppc_set_field(msr
, 33, 33, 1);
679 msr
= kvmppc_set_field(msr
, 34, 36, 0);
680 msr
= kvmppc_set_field(msr
, 42, 47, 0);
681 kvmppc_set_msr_fast(vcpu
, msr
);
682 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
683 return EMULATE_AGAIN
;
689 static int kvmppc_check_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
)
692 /* Need to do paired single emulation? */
693 if (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
))
696 /* Read out the instruction */
697 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
)
698 /* Need to emulate */
701 return EMULATE_AGAIN
;
704 /* Handle external providers (FPU, Altivec, VSX) */
705 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
708 struct thread_struct
*t
= ¤t
->thread
;
710 /* When we have paired singles, we emulate in software */
711 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
)
714 if (!(kvmppc_get_msr(vcpu
) & msr
)) {
715 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
719 if (msr
== MSR_VSX
) {
720 /* No VSX? Give an illegal instruction interrupt */
722 if (!cpu_has_feature(CPU_FTR_VSX
))
725 kvmppc_core_queue_program(vcpu
, SRR1_PROGILL
);
730 * We have to load up all the FP and VMX registers before
731 * we can let the guest use VSX instructions.
733 msr
= MSR_FP
| MSR_VEC
| MSR_VSX
;
736 /* See if we already own all the ext(s) needed */
737 msr
&= ~vcpu
->arch
.guest_owned_ext
;
742 printk(KERN_INFO
"Loading up ext 0x%lx\n", msr
);
748 load_fp_state(&vcpu
->arch
.fp
);
749 t
->fp_save_area
= &vcpu
->arch
.fp
;
754 #ifdef CONFIG_ALTIVEC
756 enable_kernel_altivec();
757 load_vr_state(&vcpu
->arch
.vr
);
758 t
->vr_save_area
= &vcpu
->arch
.vr
;
764 vcpu
->arch
.guest_owned_ext
|= msr
;
765 kvmppc_recalc_shadow_msr(vcpu
);
771 * Kernel code using FP or VMX could have flushed guest state to
772 * the thread_struct; if so, get it back now.
774 static void kvmppc_handle_lost_ext(struct kvm_vcpu
*vcpu
)
776 unsigned long lost_ext
;
778 lost_ext
= vcpu
->arch
.guest_owned_ext
& ~current
->thread
.regs
->msr
;
782 if (lost_ext
& MSR_FP
) {
785 load_fp_state(&vcpu
->arch
.fp
);
788 #ifdef CONFIG_ALTIVEC
789 if (lost_ext
& MSR_VEC
) {
791 enable_kernel_altivec();
792 load_vr_state(&vcpu
->arch
.vr
);
796 current
->thread
.regs
->msr
|= lost_ext
;
799 #ifdef CONFIG_PPC_BOOK3S_64
801 static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu
*vcpu
, ulong fac
)
803 /* Inject the Interrupt Cause field and trigger a guest interrupt */
804 vcpu
->arch
.fscr
&= ~(0xffULL
<< 56);
805 vcpu
->arch
.fscr
|= (fac
<< 56);
806 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_FAC_UNAVAIL
);
809 static void kvmppc_emulate_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
811 enum emulation_result er
= EMULATE_FAIL
;
813 if (!(kvmppc_get_msr(vcpu
) & MSR_PR
))
814 er
= kvmppc_emulate_instruction(vcpu
->run
, vcpu
);
816 if ((er
!= EMULATE_DONE
) && (er
!= EMULATE_AGAIN
)) {
817 /* Couldn't emulate, trigger interrupt in guest */
818 kvmppc_trigger_fac_interrupt(vcpu
, fac
);
822 /* Enable facilities (TAR, EBB, DSCR) for the guest */
823 static int kvmppc_handle_fac(struct kvm_vcpu
*vcpu
, ulong fac
)
825 bool guest_fac_enabled
;
826 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S
));
829 * Not every facility is enabled by FSCR bits, check whether the
830 * guest has this facility enabled at all.
835 guest_fac_enabled
= (vcpu
->arch
.fscr
& (1ULL << fac
));
838 guest_fac_enabled
= kvmppc_get_msr(vcpu
) & MSR_TM
;
841 guest_fac_enabled
= false;
845 if (!guest_fac_enabled
) {
846 /* Facility not enabled by the guest */
847 kvmppc_trigger_fac_interrupt(vcpu
, fac
);
853 /* TAR switching isn't lazy in Linux yet */
854 current
->thread
.tar
= mfspr(SPRN_TAR
);
855 mtspr(SPRN_TAR
, vcpu
->arch
.tar
);
856 vcpu
->arch
.shadow_fscr
|= FSCR_TAR
;
859 kvmppc_emulate_fac(vcpu
, fac
);
867 int kvmppc_handle_exit_pr(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
868 unsigned int exit_nr
)
873 vcpu
->stat
.sum_exits
++;
875 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
876 run
->ready_for_interrupt_injection
= 1;
878 /* We get here with MSR.EE=1 */
880 trace_kvm_exit(exit_nr
, vcpu
);
884 case BOOK3S_INTERRUPT_INST_STORAGE
:
886 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
887 vcpu
->stat
.pf_instruc
++;
889 #ifdef CONFIG_PPC_BOOK3S_32
890 /* We set segments as unused segments when invalidating them. So
891 * treat the respective fault as segment fault. */
893 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
896 svcpu
= svcpu_get(vcpu
);
897 sr
= svcpu
->sr
[kvmppc_get_pc(vcpu
) >> SID_SHIFT
];
899 if (sr
== SR_INVALID
) {
900 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
907 /* only care about PTEG not found errors, but leave NX alone */
908 if (shadow_srr1
& 0x40000000) {
909 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
910 r
= kvmppc_handle_pagefault(run
, vcpu
, kvmppc_get_pc(vcpu
), exit_nr
);
911 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
912 vcpu
->stat
.sp_instruc
++;
913 } else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
914 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
916 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
917 * so we can't use the NX bit inside the guest. Let's cross our fingers,
918 * that no guest that needs the dcbz hack does NX.
920 kvmppc_mmu_pte_flush(vcpu
, kvmppc_get_pc(vcpu
), ~0xFFFUL
);
923 u64 msr
= kvmppc_get_msr(vcpu
);
924 msr
|= shadow_srr1
& 0x58000000;
925 kvmppc_set_msr_fast(vcpu
, msr
);
926 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
931 case BOOK3S_INTERRUPT_DATA_STORAGE
:
933 ulong dar
= kvmppc_get_fault_dar(vcpu
);
934 u32 fault_dsisr
= vcpu
->arch
.fault_dsisr
;
935 vcpu
->stat
.pf_storage
++;
937 #ifdef CONFIG_PPC_BOOK3S_32
938 /* We set segments as unused segments when invalidating them. So
939 * treat the respective fault as segment fault. */
941 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
944 svcpu
= svcpu_get(vcpu
);
945 sr
= svcpu
->sr
[dar
>> SID_SHIFT
];
947 if (sr
== SR_INVALID
) {
948 kvmppc_mmu_map_segment(vcpu
, dar
);
956 * We need to handle missing shadow PTEs, and
957 * protection faults due to us mapping a page read-only
958 * when the guest thinks it is writable.
960 if (fault_dsisr
& (DSISR_NOHPTE
| DSISR_PROTFAULT
)) {
961 int idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
962 r
= kvmppc_handle_pagefault(run
, vcpu
, dar
, exit_nr
);
963 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
965 kvmppc_set_dar(vcpu
, dar
);
966 kvmppc_set_dsisr(vcpu
, fault_dsisr
);
967 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
972 case BOOK3S_INTERRUPT_DATA_SEGMENT
:
973 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_fault_dar(vcpu
)) < 0) {
974 kvmppc_set_dar(vcpu
, kvmppc_get_fault_dar(vcpu
));
975 kvmppc_book3s_queue_irqprio(vcpu
,
976 BOOK3S_INTERRUPT_DATA_SEGMENT
);
980 case BOOK3S_INTERRUPT_INST_SEGMENT
:
981 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
)) < 0) {
982 kvmppc_book3s_queue_irqprio(vcpu
,
983 BOOK3S_INTERRUPT_INST_SEGMENT
);
987 /* We're good on these - the host merely wanted to get our attention */
988 case BOOK3S_INTERRUPT_DECREMENTER
:
989 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
990 case BOOK3S_INTERRUPT_DOORBELL
:
991 case BOOK3S_INTERRUPT_H_DOORBELL
:
992 vcpu
->stat
.dec_exits
++;
995 case BOOK3S_INTERRUPT_EXTERNAL
:
996 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL
:
997 case BOOK3S_INTERRUPT_EXTERNAL_HV
:
998 vcpu
->stat
.ext_intr_exits
++;
1001 case BOOK3S_INTERRUPT_PERFMON
:
1004 case BOOK3S_INTERRUPT_PROGRAM
:
1005 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
1007 enum emulation_result er
;
1011 flags
= vcpu
->arch
.shadow_srr1
& 0x1f0000ull
;
1013 if (kvmppc_get_msr(vcpu
) & MSR_PR
) {
1015 printk(KERN_INFO
"Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
1017 if ((kvmppc_get_last_inst(vcpu
) & 0xff0007ff) !=
1018 (INS_DCBZ
& 0xfffffff7)) {
1019 kvmppc_core_queue_program(vcpu
, flags
);
1025 vcpu
->stat
.emulated_inst_exits
++;
1026 er
= kvmppc_emulate_instruction(run
, vcpu
);
1029 r
= RESUME_GUEST_NV
;
1035 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
1036 __func__
, kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
1037 kvmppc_core_queue_program(vcpu
, flags
);
1040 case EMULATE_DO_MMIO
:
1041 run
->exit_reason
= KVM_EXIT_MMIO
;
1044 case EMULATE_EXIT_USER
:
1052 case BOOK3S_INTERRUPT_SYSCALL
:
1053 if (vcpu
->arch
.papr_enabled
&&
1054 (kvmppc_get_last_sc(vcpu
) == 0x44000022) &&
1055 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
1056 /* SC 1 papr hypercalls */
1057 ulong cmd
= kvmppc_get_gpr(vcpu
, 3);
1060 #ifdef CONFIG_PPC_BOOK3S_64
1061 if (kvmppc_h_pr(vcpu
, cmd
) == EMULATE_DONE
) {
1067 run
->papr_hcall
.nr
= cmd
;
1068 for (i
= 0; i
< 9; ++i
) {
1069 ulong gpr
= kvmppc_get_gpr(vcpu
, 4 + i
);
1070 run
->papr_hcall
.args
[i
] = gpr
;
1072 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
1073 vcpu
->arch
.hcall_needed
= 1;
1075 } else if (vcpu
->arch
.osi_enabled
&&
1076 (((u32
)kvmppc_get_gpr(vcpu
, 3)) == OSI_SC_MAGIC_R3
) &&
1077 (((u32
)kvmppc_get_gpr(vcpu
, 4)) == OSI_SC_MAGIC_R4
)) {
1078 /* MOL hypercalls */
1079 u64
*gprs
= run
->osi
.gprs
;
1082 run
->exit_reason
= KVM_EXIT_OSI
;
1083 for (i
= 0; i
< 32; i
++)
1084 gprs
[i
] = kvmppc_get_gpr(vcpu
, i
);
1085 vcpu
->arch
.osi_needed
= 1;
1087 } else if (!(kvmppc_get_msr(vcpu
) & MSR_PR
) &&
1088 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
1089 /* KVM PV hypercalls */
1090 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
1093 /* Guest syscalls */
1094 vcpu
->stat
.syscall_exits
++;
1095 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1099 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
1100 case BOOK3S_INTERRUPT_ALTIVEC
:
1101 case BOOK3S_INTERRUPT_VSX
:
1106 case BOOK3S_INTERRUPT_FP_UNAVAIL
: ext_msr
= MSR_FP
; break;
1107 case BOOK3S_INTERRUPT_ALTIVEC
: ext_msr
= MSR_VEC
; break;
1108 case BOOK3S_INTERRUPT_VSX
: ext_msr
= MSR_VSX
; break;
1111 switch (kvmppc_check_ext(vcpu
, exit_nr
)) {
1113 /* everything ok - let's enable the ext */
1114 r
= kvmppc_handle_ext(vcpu
, exit_nr
, ext_msr
);
1117 /* we need to emulate this instruction */
1118 goto program_interrupt
;
1121 /* nothing to worry about - go again */
1126 case BOOK3S_INTERRUPT_ALIGNMENT
:
1127 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
) {
1128 u32 last_inst
= kvmppc_get_last_inst(vcpu
);
1132 dsisr
= kvmppc_alignment_dsisr(vcpu
, last_inst
);
1133 dar
= kvmppc_alignment_dar(vcpu
, last_inst
);
1135 kvmppc_set_dsisr(vcpu
, dsisr
);
1136 kvmppc_set_dar(vcpu
, dar
);
1138 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1142 #ifdef CONFIG_PPC_BOOK3S_64
1143 case BOOK3S_INTERRUPT_FAC_UNAVAIL
:
1144 kvmppc_handle_fac(vcpu
, vcpu
->arch
.shadow_fscr
>> 56);
1148 case BOOK3S_INTERRUPT_MACHINE_CHECK
:
1149 case BOOK3S_INTERRUPT_TRACE
:
1150 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
1155 ulong shadow_srr1
= vcpu
->arch
.shadow_srr1
;
1156 /* Ugh - bork here! What did we get? */
1157 printk(KERN_EMERG
"exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
1158 exit_nr
, kvmppc_get_pc(vcpu
), shadow_srr1
);
1165 if (!(r
& RESUME_HOST
)) {
1166 /* To avoid clobbering exit_reason, only check for signals if
1167 * we aren't already exiting to userspace for some other
1171 * Interrupts could be timers for the guest which we have to
1172 * inject again, so let's postpone them until we're in the guest
1173 * and if we really did time things so badly, then we just exit
1174 * again due to a host external interrupt.
1176 s
= kvmppc_prepare_to_enter(vcpu
);
1180 /* interrupts now hard-disabled */
1181 kvmppc_fix_ee_before_entry();
1184 kvmppc_handle_lost_ext(vcpu
);
1187 trace_kvm_book3s_reenter(r
, vcpu
);
1192 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu
*vcpu
,
1193 struct kvm_sregs
*sregs
)
1195 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1198 sregs
->pvr
= vcpu
->arch
.pvr
;
1200 sregs
->u
.s
.sdr1
= to_book3s(vcpu
)->sdr1
;
1201 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1202 for (i
= 0; i
< 64; i
++) {
1203 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
| i
;
1204 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
1207 for (i
= 0; i
< 16; i
++)
1208 sregs
->u
.s
.ppc32
.sr
[i
] = kvmppc_get_sr(vcpu
, i
);
1210 for (i
= 0; i
< 8; i
++) {
1211 sregs
->u
.s
.ppc32
.ibat
[i
] = vcpu3s
->ibat
[i
].raw
;
1212 sregs
->u
.s
.ppc32
.dbat
[i
] = vcpu3s
->dbat
[i
].raw
;
1219 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu
*vcpu
,
1220 struct kvm_sregs
*sregs
)
1222 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
1225 kvmppc_set_pvr_pr(vcpu
, sregs
->pvr
);
1227 vcpu3s
->sdr1
= sregs
->u
.s
.sdr1
;
1228 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
1229 for (i
= 0; i
< 64; i
++) {
1230 vcpu
->arch
.mmu
.slbmte(vcpu
, sregs
->u
.s
.ppc64
.slb
[i
].slbv
,
1231 sregs
->u
.s
.ppc64
.slb
[i
].slbe
);
1234 for (i
= 0; i
< 16; i
++) {
1235 vcpu
->arch
.mmu
.mtsrin(vcpu
, i
, sregs
->u
.s
.ppc32
.sr
[i
]);
1237 for (i
= 0; i
< 8; i
++) {
1238 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), false,
1239 (u32
)sregs
->u
.s
.ppc32
.ibat
[i
]);
1240 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), true,
1241 (u32
)(sregs
->u
.s
.ppc32
.ibat
[i
] >> 32));
1242 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), false,
1243 (u32
)sregs
->u
.s
.ppc32
.dbat
[i
]);
1244 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), true,
1245 (u32
)(sregs
->u
.s
.ppc32
.dbat
[i
] >> 32));
1249 /* Flush the MMU after messing with the segments */
1250 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
1255 static int kvmppc_get_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1256 union kvmppc_one_reg
*val
)
1261 case KVM_REG_PPC_HIOR
:
1262 *val
= get_reg_val(id
, to_book3s(vcpu
)->hior
);
1264 case KVM_REG_PPC_LPCR
:
1266 * We are only interested in the LPCR_ILE bit
1268 if (vcpu
->arch
.intr_msr
& MSR_LE
)
1269 *val
= get_reg_val(id
, LPCR_ILE
);
1271 *val
= get_reg_val(id
, 0);
1281 static void kvmppc_set_lpcr_pr(struct kvm_vcpu
*vcpu
, u64 new_lpcr
)
1283 if (new_lpcr
& LPCR_ILE
)
1284 vcpu
->arch
.intr_msr
|= MSR_LE
;
1286 vcpu
->arch
.intr_msr
&= ~MSR_LE
;
1289 static int kvmppc_set_one_reg_pr(struct kvm_vcpu
*vcpu
, u64 id
,
1290 union kvmppc_one_reg
*val
)
1295 case KVM_REG_PPC_HIOR
:
1296 to_book3s(vcpu
)->hior
= set_reg_val(id
, *val
);
1297 to_book3s(vcpu
)->hior_explicit
= true;
1299 case KVM_REG_PPC_LPCR
:
1300 kvmppc_set_lpcr_pr(vcpu
, set_reg_val(id
, *val
));
1310 static struct kvm_vcpu
*kvmppc_core_vcpu_create_pr(struct kvm
*kvm
,
1313 struct kvmppc_vcpu_book3s
*vcpu_book3s
;
1314 struct kvm_vcpu
*vcpu
;
1318 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1322 vcpu_book3s
= vzalloc(sizeof(struct kvmppc_vcpu_book3s
));
1325 vcpu
->arch
.book3s
= vcpu_book3s
;
1327 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1328 vcpu
->arch
.shadow_vcpu
=
1329 kzalloc(sizeof(*vcpu
->arch
.shadow_vcpu
), GFP_KERNEL
);
1330 if (!vcpu
->arch
.shadow_vcpu
)
1334 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
1336 goto free_shadow_vcpu
;
1339 p
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
1342 /* the real shared page fills the last 4k of our page */
1343 vcpu
->arch
.shared
= (void *)(p
+ PAGE_SIZE
- 4096);
1344 #ifdef CONFIG_PPC_BOOK3S_64
1345 /* Always start the shared struct in native endian mode */
1346 #ifdef __BIG_ENDIAN__
1347 vcpu
->arch
.shared_big_endian
= true;
1349 vcpu
->arch
.shared_big_endian
= false;
1353 * Default to the same as the host if we're on sufficiently
1354 * recent machine that we have 1TB segments;
1355 * otherwise default to PPC970FX.
1357 vcpu
->arch
.pvr
= 0x3C0301;
1358 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1359 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
1360 vcpu
->arch
.intr_msr
= MSR_SF
;
1362 /* default to book3s_32 (750) */
1363 vcpu
->arch
.pvr
= 0x84202;
1365 kvmppc_set_pvr_pr(vcpu
, vcpu
->arch
.pvr
);
1366 vcpu
->arch
.slb_nr
= 64;
1368 vcpu
->arch
.shadow_msr
= MSR_USER64
& ~MSR_LE
;
1370 err
= kvmppc_mmu_init(vcpu
);
1377 kvm_vcpu_uninit(vcpu
);
1379 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1380 kfree(vcpu
->arch
.shadow_vcpu
);
1385 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1387 return ERR_PTR(err
);
1390 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu
*vcpu
)
1392 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
1394 free_page((unsigned long)vcpu
->arch
.shared
& PAGE_MASK
);
1395 kvm_vcpu_uninit(vcpu
);
1396 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1397 kfree(vcpu
->arch
.shadow_vcpu
);
1400 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1403 static int kvmppc_vcpu_run_pr(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1406 #ifdef CONFIG_ALTIVEC
1407 unsigned long uninitialized_var(vrsave
);
1410 /* Check if we can run the vcpu at all */
1411 if (!vcpu
->arch
.sane
) {
1412 kvm_run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1418 * Interrupts could be timers for the guest which we have to inject
1419 * again, so let's postpone them until we're in the guest and if we
1420 * really did time things so badly, then we just exit again due to
1421 * a host external interrupt.
1423 ret
= kvmppc_prepare_to_enter(vcpu
);
1426 /* interrupts now hard-disabled */
1428 /* Save FPU state in thread_struct */
1429 if (current
->thread
.regs
->msr
& MSR_FP
)
1430 giveup_fpu(current
);
1432 #ifdef CONFIG_ALTIVEC
1433 /* Save Altivec state in thread_struct */
1434 if (current
->thread
.regs
->msr
& MSR_VEC
)
1435 giveup_altivec(current
);
1439 /* Save VSX state in thread_struct */
1440 if (current
->thread
.regs
->msr
& MSR_VSX
)
1441 __giveup_vsx(current
);
1444 /* Preload FPU if it's enabled */
1445 if (kvmppc_get_msr(vcpu
) & MSR_FP
)
1446 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
1448 kvmppc_fix_ee_before_entry();
1450 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
1452 /* No need for kvm_guest_exit. It's done in handle_exit.
1453 We also get here with interrupts enabled. */
1455 /* Make sure we save the guest FPU/Altivec/VSX state */
1456 kvmppc_giveup_ext(vcpu
, MSR_FP
| MSR_VEC
| MSR_VSX
);
1458 /* Make sure we save the guest TAR/EBB/DSCR state */
1459 kvmppc_giveup_fac(vcpu
, FSCR_TAR_LG
);
1462 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
1467 * Get (and clear) the dirty memory log for a memory slot.
1469 static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm
*kvm
,
1470 struct kvm_dirty_log
*log
)
1472 struct kvm_memory_slot
*memslot
;
1473 struct kvm_vcpu
*vcpu
;
1479 mutex_lock(&kvm
->slots_lock
);
1481 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1485 /* If nothing is dirty, don't bother messing with page tables. */
1487 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
1489 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
1490 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
1492 kvm_for_each_vcpu(n
, vcpu
, kvm
)
1493 kvmppc_mmu_pte_pflush(vcpu
, ga
, ga_end
);
1495 n
= kvm_dirty_bitmap_bytes(memslot
);
1496 memset(memslot
->dirty_bitmap
, 0, n
);
1501 mutex_unlock(&kvm
->slots_lock
);
1505 static void kvmppc_core_flush_memslot_pr(struct kvm
*kvm
,
1506 struct kvm_memory_slot
*memslot
)
1511 static int kvmppc_core_prepare_memory_region_pr(struct kvm
*kvm
,
1512 struct kvm_memory_slot
*memslot
,
1513 struct kvm_userspace_memory_region
*mem
)
1518 static void kvmppc_core_commit_memory_region_pr(struct kvm
*kvm
,
1519 struct kvm_userspace_memory_region
*mem
,
1520 const struct kvm_memory_slot
*old
)
1525 static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot
*free
,
1526 struct kvm_memory_slot
*dont
)
1531 static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot
*slot
,
1532 unsigned long npages
)
1539 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
1540 struct kvm_ppc_smmu_info
*info
)
1543 struct kvm_vcpu
*vcpu
;
1547 /* SLB is always 64 entries */
1548 info
->slb_size
= 64;
1550 /* Standard 4k base page size segment */
1551 info
->sps
[0].page_shift
= 12;
1552 info
->sps
[0].slb_enc
= 0;
1553 info
->sps
[0].enc
[0].page_shift
= 12;
1554 info
->sps
[0].enc
[0].pte_enc
= 0;
1557 * 64k large page size.
1558 * We only want to put this in if the CPUs we're emulating
1559 * support it, but unfortunately we don't have a vcpu easily
1560 * to hand here to test. Just pick the first vcpu, and if
1561 * that doesn't exist yet, report the minimum capability,
1562 * i.e., no 64k pages.
1563 * 1T segment support goes along with 64k pages.
1566 vcpu
= kvm_get_vcpu(kvm
, 0);
1567 if (vcpu
&& (vcpu
->arch
.hflags
& BOOK3S_HFLAG_MULTI_PGSIZE
)) {
1568 info
->flags
= KVM_PPC_1T_SEGMENTS
;
1569 info
->sps
[i
].page_shift
= 16;
1570 info
->sps
[i
].slb_enc
= SLB_VSID_L
| SLB_VSID_LP_01
;
1571 info
->sps
[i
].enc
[0].page_shift
= 16;
1572 info
->sps
[i
].enc
[0].pte_enc
= 1;
1576 /* Standard 16M large page size segment */
1577 info
->sps
[i
].page_shift
= 24;
1578 info
->sps
[i
].slb_enc
= SLB_VSID_L
;
1579 info
->sps
[i
].enc
[0].page_shift
= 24;
1580 info
->sps
[i
].enc
[0].pte_enc
= 0;
1585 static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm
*kvm
,
1586 struct kvm_ppc_smmu_info
*info
)
1588 /* We should not get called */
1591 #endif /* CONFIG_PPC64 */
1593 static unsigned int kvm_global_user_count
= 0;
1594 static DEFINE_SPINLOCK(kvm_global_user_count_lock
);
1596 static int kvmppc_core_init_vm_pr(struct kvm
*kvm
)
1598 mutex_init(&kvm
->arch
.hpt_mutex
);
1600 #ifdef CONFIG_PPC_BOOK3S_64
1601 /* Start out with the default set of hcalls enabled */
1602 kvmppc_pr_init_default_hcalls(kvm
);
1605 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
1606 spin_lock(&kvm_global_user_count_lock
);
1607 if (++kvm_global_user_count
== 1)
1608 pSeries_disable_reloc_on_exc();
1609 spin_unlock(&kvm_global_user_count_lock
);
1614 static void kvmppc_core_destroy_vm_pr(struct kvm
*kvm
)
1617 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
1620 if (firmware_has_feature(FW_FEATURE_SET_MODE
)) {
1621 spin_lock(&kvm_global_user_count_lock
);
1622 BUG_ON(kvm_global_user_count
== 0);
1623 if (--kvm_global_user_count
== 0)
1624 pSeries_enable_reloc_on_exc();
1625 spin_unlock(&kvm_global_user_count_lock
);
1629 static int kvmppc_core_check_processor_compat_pr(void)
1631 /* we are always compatible */
1635 static long kvm_arch_vm_ioctl_pr(struct file
*filp
,
1636 unsigned int ioctl
, unsigned long arg
)
1641 static struct kvmppc_ops kvm_ops_pr
= {
1642 .get_sregs
= kvm_arch_vcpu_ioctl_get_sregs_pr
,
1643 .set_sregs
= kvm_arch_vcpu_ioctl_set_sregs_pr
,
1644 .get_one_reg
= kvmppc_get_one_reg_pr
,
1645 .set_one_reg
= kvmppc_set_one_reg_pr
,
1646 .vcpu_load
= kvmppc_core_vcpu_load_pr
,
1647 .vcpu_put
= kvmppc_core_vcpu_put_pr
,
1648 .set_msr
= kvmppc_set_msr_pr
,
1649 .vcpu_run
= kvmppc_vcpu_run_pr
,
1650 .vcpu_create
= kvmppc_core_vcpu_create_pr
,
1651 .vcpu_free
= kvmppc_core_vcpu_free_pr
,
1652 .check_requests
= kvmppc_core_check_requests_pr
,
1653 .get_dirty_log
= kvm_vm_ioctl_get_dirty_log_pr
,
1654 .flush_memslot
= kvmppc_core_flush_memslot_pr
,
1655 .prepare_memory_region
= kvmppc_core_prepare_memory_region_pr
,
1656 .commit_memory_region
= kvmppc_core_commit_memory_region_pr
,
1657 .unmap_hva
= kvm_unmap_hva_pr
,
1658 .unmap_hva_range
= kvm_unmap_hva_range_pr
,
1659 .age_hva
= kvm_age_hva_pr
,
1660 .test_age_hva
= kvm_test_age_hva_pr
,
1661 .set_spte_hva
= kvm_set_spte_hva_pr
,
1662 .mmu_destroy
= kvmppc_mmu_destroy_pr
,
1663 .free_memslot
= kvmppc_core_free_memslot_pr
,
1664 .create_memslot
= kvmppc_core_create_memslot_pr
,
1665 .init_vm
= kvmppc_core_init_vm_pr
,
1666 .destroy_vm
= kvmppc_core_destroy_vm_pr
,
1667 .get_smmu_info
= kvm_vm_ioctl_get_smmu_info_pr
,
1668 .emulate_op
= kvmppc_core_emulate_op_pr
,
1669 .emulate_mtspr
= kvmppc_core_emulate_mtspr_pr
,
1670 .emulate_mfspr
= kvmppc_core_emulate_mfspr_pr
,
1671 .fast_vcpu_kick
= kvm_vcpu_kick
,
1672 .arch_vm_ioctl
= kvm_arch_vm_ioctl_pr
,
1676 int kvmppc_book3s_init_pr(void)
1680 r
= kvmppc_core_check_processor_compat_pr();
1684 kvm_ops_pr
.owner
= THIS_MODULE
;
1685 kvmppc_pr_ops
= &kvm_ops_pr
;
1687 r
= kvmppc_mmu_hpte_sysinit();
1691 void kvmppc_book3s_exit_pr(void)
1693 kvmppc_pr_ops
= NULL
;
1694 kvmppc_mmu_hpte_sysexit();
1698 * We only support separate modules for book3s 64
1700 #ifdef CONFIG_PPC_BOOK3S_64
1702 module_init(kvmppc_book3s_init_pr
);
1703 module_exit(kvmppc_book3s_exit_pr
);
1705 MODULE_LICENSE("GPL");
1706 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
1707 MODULE_ALIAS("devname:kvm");