2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <linux/export.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/cacheflush.h>
30 #include <asm/tlbflush.h>
31 #include <asm/uaccess.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/switch_to.h>
37 #include <linux/gfp.h>
38 #include <linux/sched.h>
39 #include <linux/vmalloc.h>
40 #include <linux/highmem.h>
44 /* #define EXIT_DEBUG */
45 /* #define DEBUG_EXT */
47 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
50 /* Some compatibility defines */
51 #ifdef CONFIG_PPC_BOOK3S_32
52 #define MSR_USER32 MSR_USER
53 #define MSR_USER64 MSR_USER
54 #define HW_PAGE_SIZE PAGE_SIZE
55 #define __hard_irq_disable local_irq_disable
56 #define __hard_irq_enable local_irq_enable
59 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
61 #ifdef CONFIG_PPC_BOOK3S_64
62 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
63 memcpy(svcpu
->slb
, to_book3s(vcpu
)->slb_shadow
, sizeof(svcpu
->slb
));
64 memcpy(&get_paca()->shadow_vcpu
, to_book3s(vcpu
)->shadow_vcpu
,
65 sizeof(get_paca()->shadow_vcpu
));
66 svcpu
->slb_max
= to_book3s(vcpu
)->slb_shadow_max
;
70 #ifdef CONFIG_PPC_BOOK3S_32
71 current
->thread
.kvm_shadow_vcpu
= to_book3s(vcpu
)->shadow_vcpu
;
75 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
77 #ifdef CONFIG_PPC_BOOK3S_64
78 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
79 memcpy(to_book3s(vcpu
)->slb_shadow
, svcpu
->slb
, sizeof(svcpu
->slb
));
80 memcpy(to_book3s(vcpu
)->shadow_vcpu
, &get_paca()->shadow_vcpu
,
81 sizeof(get_paca()->shadow_vcpu
));
82 to_book3s(vcpu
)->slb_shadow_max
= svcpu
->slb_max
;
86 kvmppc_giveup_ext(vcpu
, MSR_FP
);
87 kvmppc_giveup_ext(vcpu
, MSR_VEC
);
88 kvmppc_giveup_ext(vcpu
, MSR_VSX
);
91 void kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
)
93 /* We misuse TLB_FLUSH to indicate that we want to clear
94 all shadow cache entries */
95 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
))
96 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
99 /************* MMU Notifiers *************/
101 int kvm_unmap_hva(struct kvm
*kvm
, unsigned long hva
)
103 trace_kvm_unmap_hva(hva
);
106 * Flush all shadow tlb entries everywhere. This is slow, but
107 * we are 100% sure that we catch the to be unmapped page
109 kvm_flush_remote_tlbs(kvm
);
114 int kvm_unmap_hva_range(struct kvm
*kvm
, unsigned long start
, unsigned long end
)
116 /* kvm_unmap_hva flushes everything anyways */
117 kvm_unmap_hva(kvm
, start
);
122 int kvm_age_hva(struct kvm
*kvm
, unsigned long hva
)
124 /* XXX could be more clever ;) */
128 int kvm_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
130 /* XXX could be more clever ;) */
134 void kvm_set_spte_hva(struct kvm
*kvm
, unsigned long hva
, pte_t pte
)
136 /* The page will get remapped properly on its next fault */
137 kvm_unmap_hva(kvm
, hva
);
140 /*****************************************/
142 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu
*vcpu
)
144 ulong smsr
= vcpu
->arch
.shared
->msr
;
146 /* Guest MSR values */
147 smsr
&= MSR_FE0
| MSR_FE1
| MSR_SF
| MSR_SE
| MSR_BE
| MSR_DE
;
148 /* Process MSR values */
149 smsr
|= MSR_ME
| MSR_RI
| MSR_IR
| MSR_DR
| MSR_PR
| MSR_EE
;
150 /* External providers the guest reserved */
151 smsr
|= (vcpu
->arch
.shared
->msr
& vcpu
->arch
.guest_owned_ext
);
152 /* 64-bit Process MSR values */
153 #ifdef CONFIG_PPC_BOOK3S_64
154 smsr
|= MSR_ISF
| MSR_HV
;
156 vcpu
->arch
.shadow_msr
= smsr
;
159 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
161 ulong old_msr
= vcpu
->arch
.shared
->msr
;
164 printk(KERN_INFO
"KVM: Set MSR to 0x%llx\n", msr
);
167 msr
&= to_book3s(vcpu
)->msr_mask
;
168 vcpu
->arch
.shared
->msr
= msr
;
169 kvmppc_recalc_shadow_msr(vcpu
);
172 if (!vcpu
->arch
.pending_exceptions
) {
173 kvm_vcpu_block(vcpu
);
174 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
175 vcpu
->stat
.halt_wakeup
++;
177 /* Unset POW bit after we woke up */
179 vcpu
->arch
.shared
->msr
= msr
;
183 if ((vcpu
->arch
.shared
->msr
& (MSR_PR
|MSR_IR
|MSR_DR
)) !=
184 (old_msr
& (MSR_PR
|MSR_IR
|MSR_DR
))) {
185 kvmppc_mmu_flush_segments(vcpu
);
186 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
188 /* Preload magic page segment when in kernel mode */
189 if (!(msr
& MSR_PR
) && vcpu
->arch
.magic_page_pa
) {
190 struct kvm_vcpu_arch
*a
= &vcpu
->arch
;
193 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_ea
);
195 kvmppc_mmu_map_segment(vcpu
, a
->magic_page_pa
);
200 * When switching from 32 to 64-bit, we may have a stale 32-bit
201 * magic page around, we need to flush it. Typically 32-bit magic
202 * page will be instanciated when calling into RTAS. Note: We
203 * assume that such transition only happens while in kernel mode,
204 * ie, we never transition from user 32-bit to kernel 64-bit with
205 * a 32-bit magic page around.
207 if (vcpu
->arch
.magic_page_pa
&&
208 !(old_msr
& MSR_PR
) && !(old_msr
& MSR_SF
) && (msr
& MSR_SF
)) {
209 /* going from RTAS to normal kernel code */
210 kvmppc_mmu_pte_flush(vcpu
, (uint32_t)vcpu
->arch
.magic_page_pa
,
214 /* Preload FPU if it's enabled */
215 if (vcpu
->arch
.shared
->msr
& MSR_FP
)
216 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
219 void kvmppc_set_pvr(struct kvm_vcpu
*vcpu
, u32 pvr
)
223 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_SLB
;
224 vcpu
->arch
.pvr
= pvr
;
225 #ifdef CONFIG_PPC_BOOK3S_64
226 if ((pvr
>= 0x330000) && (pvr
< 0x70330000)) {
227 kvmppc_mmu_book3s_64_init(vcpu
);
228 if (!to_book3s(vcpu
)->hior_explicit
)
229 to_book3s(vcpu
)->hior
= 0xfff00000;
230 to_book3s(vcpu
)->msr_mask
= 0xffffffffffffffffULL
;
231 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
235 kvmppc_mmu_book3s_32_init(vcpu
);
236 if (!to_book3s(vcpu
)->hior_explicit
)
237 to_book3s(vcpu
)->hior
= 0;
238 to_book3s(vcpu
)->msr_mask
= 0xffffffffULL
;
239 vcpu
->arch
.cpu_type
= KVM_CPU_3S_32
;
242 kvmppc_sanity_check(vcpu
);
244 /* If we are in hypervisor level on 970, we can tell the CPU to
245 * treat DCBZ as 32 bytes store */
246 vcpu
->arch
.hflags
&= ~BOOK3S_HFLAG_DCBZ32
;
247 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) && (mfmsr() & MSR_HV
) &&
248 !strcmp(cur_cpu_spec
->platform
, "ppc970"))
249 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
251 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
252 really needs them in a VM on Cell and force disable them. */
253 if (!strcmp(cur_cpu_spec
->platform
, "ppc-cell-be"))
254 to_book3s(vcpu
)->msr_mask
&= ~(MSR_FE0
| MSR_FE1
);
256 #ifdef CONFIG_PPC_BOOK3S_32
257 /* 32 bit Book3S always has 32 byte dcbz */
258 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_DCBZ32
;
261 /* On some CPUs we can execute paired single operations natively */
262 asm ( "mfpvr %0" : "=r"(host_pvr
));
264 case 0x00080200: /* lonestar 2.0 */
265 case 0x00088202: /* lonestar 2.2 */
266 case 0x70000100: /* gekko 1.0 */
267 case 0x00080100: /* gekko 2.0 */
268 case 0x00083203: /* gekko 2.3a */
269 case 0x00083213: /* gekko 2.3b */
270 case 0x00083204: /* gekko 2.4 */
271 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
272 case 0x00087200: /* broadway */
273 vcpu
->arch
.hflags
|= BOOK3S_HFLAG_NATIVE_PS
;
274 /* Enable HID2.PSE - in case we need it later */
275 mtspr(SPRN_HID2_GEKKO
, mfspr(SPRN_HID2_GEKKO
) | (1 << 29));
279 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
280 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
281 * emulate 32 bytes dcbz length.
283 * The Book3s_64 inventors also realized this case and implemented a special bit
284 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
286 * My approach here is to patch the dcbz instruction on executing pages.
288 static void kvmppc_patch_dcbz(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
295 hpage
= gfn_to_page(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
296 if (is_error_page(hpage
))
299 hpage_offset
= pte
->raddr
& ~PAGE_MASK
;
300 hpage_offset
&= ~0xFFFULL
;
304 page
= kmap_atomic(hpage
);
306 /* patch dcbz into reserved instruction, so we trap */
307 for (i
=hpage_offset
; i
< hpage_offset
+ (HW_PAGE_SIZE
/ 4); i
++)
308 if ((page
[i
] & 0xff0007ff) == INS_DCBZ
)
309 page
[i
] &= 0xfffffff7;
315 static int kvmppc_visible_gfn(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
317 ulong mp_pa
= vcpu
->arch
.magic_page_pa
;
319 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
))
320 mp_pa
= (uint32_t)mp_pa
;
322 if (unlikely(mp_pa
) &&
323 unlikely((mp_pa
& KVM_PAM
) >> PAGE_SHIFT
== gfn
)) {
327 return kvm_is_visible_gfn(vcpu
->kvm
, gfn
);
330 int kvmppc_handle_pagefault(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
331 ulong eaddr
, int vec
)
333 bool data
= (vec
== BOOK3S_INTERRUPT_DATA_STORAGE
);
334 int r
= RESUME_GUEST
;
337 struct kvmppc_pte pte
;
338 bool is_mmio
= false;
339 bool dr
= (vcpu
->arch
.shared
->msr
& MSR_DR
) ? true : false;
340 bool ir
= (vcpu
->arch
.shared
->msr
& MSR_IR
) ? true : false;
343 relocated
= data
? dr
: ir
;
345 /* Resolve real address if translation turned on */
347 page_found
= vcpu
->arch
.mmu
.xlate(vcpu
, eaddr
, &pte
, data
);
349 pte
.may_execute
= true;
351 pte
.may_write
= true;
352 pte
.raddr
= eaddr
& KVM_PAM
;
354 pte
.vpage
= eaddr
>> 12;
357 switch (vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) {
359 pte
.vpage
|= ((u64
)VSID_REAL
<< (SID_SHIFT
- 12));
363 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, eaddr
>> SID_SHIFT
, &vsid
);
365 if ((vcpu
->arch
.shared
->msr
& (MSR_DR
|MSR_IR
)) == MSR_DR
)
366 pte
.vpage
|= ((u64
)VSID_REAL_DR
<< (SID_SHIFT
- 12));
368 pte
.vpage
|= ((u64
)VSID_REAL_IR
<< (SID_SHIFT
- 12));
372 page_found
= -EINVAL
;
376 if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
377 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
379 * If we do the dcbz hack, we have to NX on every execution,
380 * so we can patch the executing code. This renders our guest
383 pte
.may_execute
= !data
;
386 if (page_found
== -ENOENT
) {
387 /* Page not found in guest PTE entries */
388 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
389 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
390 vcpu
->arch
.shared
->dsisr
= svcpu
->fault_dsisr
;
391 vcpu
->arch
.shared
->msr
|=
392 (svcpu
->shadow_srr1
& 0x00000000f8000000ULL
);
394 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
395 } else if (page_found
== -EPERM
) {
396 /* Storage protection */
397 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
398 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
399 vcpu
->arch
.shared
->dsisr
= svcpu
->fault_dsisr
& ~DSISR_NOHPTE
;
400 vcpu
->arch
.shared
->dsisr
|= DSISR_PROTFAULT
;
401 vcpu
->arch
.shared
->msr
|=
402 svcpu
->shadow_srr1
& 0x00000000f8000000ULL
;
404 kvmppc_book3s_queue_irqprio(vcpu
, vec
);
405 } else if (page_found
== -EINVAL
) {
406 /* Page not found in guest SLB */
407 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
408 kvmppc_book3s_queue_irqprio(vcpu
, vec
+ 0x80);
409 } else if (!is_mmio
&&
410 kvmppc_visible_gfn(vcpu
, pte
.raddr
>> PAGE_SHIFT
)) {
411 /* The guest's PTE is not mapped yet. Map on the host */
412 kvmppc_mmu_map_page(vcpu
, &pte
);
414 vcpu
->stat
.sp_storage
++;
415 else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
416 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
)))
417 kvmppc_patch_dcbz(vcpu
, &pte
);
420 vcpu
->stat
.mmio_exits
++;
421 vcpu
->arch
.paddr_accessed
= pte
.raddr
;
422 vcpu
->arch
.vaddr_accessed
= pte
.eaddr
;
423 r
= kvmppc_emulate_mmio(run
, vcpu
);
424 if ( r
== RESUME_HOST_NV
)
431 static inline int get_fpr_index(int i
)
439 /* Give up external provider (FPU, Altivec, VSX) */
440 void kvmppc_giveup_ext(struct kvm_vcpu
*vcpu
, ulong msr
)
442 struct thread_struct
*t
= ¤t
->thread
;
443 u64
*vcpu_fpr
= vcpu
->arch
.fpr
;
445 u64
*vcpu_vsx
= vcpu
->arch
.vsr
;
447 u64
*thread_fpr
= (u64
*)t
->fpr
;
450 if (!(vcpu
->arch
.guest_owned_ext
& msr
))
454 printk(KERN_INFO
"Giving up ext 0x%lx\n", msr
);
460 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++)
461 vcpu_fpr
[i
] = thread_fpr
[get_fpr_index(i
)];
463 vcpu
->arch
.fpscr
= t
->fpscr
.val
;
466 #ifdef CONFIG_ALTIVEC
467 giveup_altivec(current
);
468 memcpy(vcpu
->arch
.vr
, t
->vr
, sizeof(vcpu
->arch
.vr
));
469 vcpu
->arch
.vscr
= t
->vscr
;
474 __giveup_vsx(current
);
475 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.vsr
); i
++)
476 vcpu_vsx
[i
] = thread_fpr
[get_fpr_index(i
) + 1];
483 vcpu
->arch
.guest_owned_ext
&= ~msr
;
484 current
->thread
.regs
->msr
&= ~msr
;
485 kvmppc_recalc_shadow_msr(vcpu
);
488 static int kvmppc_read_inst(struct kvm_vcpu
*vcpu
)
490 ulong srr0
= kvmppc_get_pc(vcpu
);
491 u32 last_inst
= kvmppc_get_last_inst(vcpu
);
494 ret
= kvmppc_ld(vcpu
, &srr0
, sizeof(u32
), &last_inst
, false);
495 if (ret
== -ENOENT
) {
496 ulong msr
= vcpu
->arch
.shared
->msr
;
498 msr
= kvmppc_set_field(msr
, 33, 33, 1);
499 msr
= kvmppc_set_field(msr
, 34, 36, 0);
500 vcpu
->arch
.shared
->msr
= kvmppc_set_field(msr
, 42, 47, 0);
501 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_INST_STORAGE
);
502 return EMULATE_AGAIN
;
508 static int kvmppc_check_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
)
511 /* Need to do paired single emulation? */
512 if (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
))
515 /* Read out the instruction */
516 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
)
517 /* Need to emulate */
520 return EMULATE_AGAIN
;
523 /* Handle external providers (FPU, Altivec, VSX) */
524 static int kvmppc_handle_ext(struct kvm_vcpu
*vcpu
, unsigned int exit_nr
,
527 struct thread_struct
*t
= ¤t
->thread
;
528 u64
*vcpu_fpr
= vcpu
->arch
.fpr
;
530 u64
*vcpu_vsx
= vcpu
->arch
.vsr
;
532 u64
*thread_fpr
= (u64
*)t
->fpr
;
535 /* When we have paired singles, we emulate in software */
536 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_PAIRED_SINGLE
)
539 if (!(vcpu
->arch
.shared
->msr
& msr
)) {
540 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
544 /* We already own the ext */
545 if (vcpu
->arch
.guest_owned_ext
& msr
) {
550 printk(KERN_INFO
"Loading up ext 0x%lx\n", msr
);
553 current
->thread
.regs
->msr
|= msr
;
557 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.fpr
); i
++)
558 thread_fpr
[get_fpr_index(i
)] = vcpu_fpr
[i
];
560 t
->fpscr
.val
= vcpu
->arch
.fpscr
;
562 kvmppc_load_up_fpu();
565 #ifdef CONFIG_ALTIVEC
566 memcpy(t
->vr
, vcpu
->arch
.vr
, sizeof(vcpu
->arch
.vr
));
567 t
->vscr
= vcpu
->arch
.vscr
;
569 kvmppc_load_up_altivec();
574 for (i
= 0; i
< ARRAY_SIZE(vcpu
->arch
.vsr
); i
++)
575 thread_fpr
[get_fpr_index(i
) + 1] = vcpu_vsx
[i
];
576 kvmppc_load_up_vsx();
583 vcpu
->arch
.guest_owned_ext
|= msr
;
585 kvmppc_recalc_shadow_msr(vcpu
);
590 int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
591 unsigned int exit_nr
)
595 vcpu
->stat
.sum_exits
++;
597 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
598 run
->ready_for_interrupt_injection
= 1;
600 /* We get here with MSR.EE=0, so enable it to be a nice citizen */
603 trace_kvm_exit(exit_nr
, vcpu
);
607 case BOOK3S_INTERRUPT_INST_STORAGE
:
609 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
610 ulong shadow_srr1
= svcpu
->shadow_srr1
;
611 vcpu
->stat
.pf_instruc
++;
613 #ifdef CONFIG_PPC_BOOK3S_32
614 /* We set segments as unused segments when invalidating them. So
615 * treat the respective fault as segment fault. */
616 if (svcpu
->sr
[kvmppc_get_pc(vcpu
) >> SID_SHIFT
] == SR_INVALID
) {
617 kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
));
625 /* only care about PTEG not found errors, but leave NX alone */
626 if (shadow_srr1
& 0x40000000) {
627 r
= kvmppc_handle_pagefault(run
, vcpu
, kvmppc_get_pc(vcpu
), exit_nr
);
628 vcpu
->stat
.sp_instruc
++;
629 } else if (vcpu
->arch
.mmu
.is_dcbz32(vcpu
) &&
630 (!(vcpu
->arch
.hflags
& BOOK3S_HFLAG_DCBZ32
))) {
632 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
633 * so we can't use the NX bit inside the guest. Let's cross our fingers,
634 * that no guest that needs the dcbz hack does NX.
636 kvmppc_mmu_pte_flush(vcpu
, kvmppc_get_pc(vcpu
), ~0xFFFUL
);
639 vcpu
->arch
.shared
->msr
|= shadow_srr1
& 0x58000000;
640 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
645 case BOOK3S_INTERRUPT_DATA_STORAGE
:
647 ulong dar
= kvmppc_get_fault_dar(vcpu
);
648 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
649 u32 fault_dsisr
= svcpu
->fault_dsisr
;
650 vcpu
->stat
.pf_storage
++;
652 #ifdef CONFIG_PPC_BOOK3S_32
653 /* We set segments as unused segments when invalidating them. So
654 * treat the respective fault as segment fault. */
655 if ((svcpu
->sr
[dar
>> SID_SHIFT
]) == SR_INVALID
) {
656 kvmppc_mmu_map_segment(vcpu
, dar
);
664 /* The only case we need to handle is missing shadow PTEs */
665 if (fault_dsisr
& DSISR_NOHPTE
) {
666 r
= kvmppc_handle_pagefault(run
, vcpu
, dar
, exit_nr
);
668 vcpu
->arch
.shared
->dar
= dar
;
669 vcpu
->arch
.shared
->dsisr
= fault_dsisr
;
670 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
675 case BOOK3S_INTERRUPT_DATA_SEGMENT
:
676 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_fault_dar(vcpu
)) < 0) {
677 vcpu
->arch
.shared
->dar
= kvmppc_get_fault_dar(vcpu
);
678 kvmppc_book3s_queue_irqprio(vcpu
,
679 BOOK3S_INTERRUPT_DATA_SEGMENT
);
683 case BOOK3S_INTERRUPT_INST_SEGMENT
:
684 if (kvmppc_mmu_map_segment(vcpu
, kvmppc_get_pc(vcpu
)) < 0) {
685 kvmppc_book3s_queue_irqprio(vcpu
,
686 BOOK3S_INTERRUPT_INST_SEGMENT
);
690 /* We're good on these - the host merely wanted to get our attention */
691 case BOOK3S_INTERRUPT_DECREMENTER
:
692 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
693 vcpu
->stat
.dec_exits
++;
696 case BOOK3S_INTERRUPT_EXTERNAL
:
697 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL
:
698 case BOOK3S_INTERRUPT_EXTERNAL_HV
:
699 vcpu
->stat
.ext_intr_exits
++;
702 case BOOK3S_INTERRUPT_PERFMON
:
705 case BOOK3S_INTERRUPT_PROGRAM
:
706 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
708 enum emulation_result er
;
709 struct kvmppc_book3s_shadow_vcpu
*svcpu
;
713 svcpu
= svcpu_get(vcpu
);
714 flags
= svcpu
->shadow_srr1
& 0x1f0000ull
;
717 if (vcpu
->arch
.shared
->msr
& MSR_PR
) {
719 printk(KERN_INFO
"Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
721 if ((kvmppc_get_last_inst(vcpu
) & 0xff0007ff) !=
722 (INS_DCBZ
& 0xfffffff7)) {
723 kvmppc_core_queue_program(vcpu
, flags
);
729 vcpu
->stat
.emulated_inst_exits
++;
730 er
= kvmppc_emulate_instruction(run
, vcpu
);
739 printk(KERN_CRIT
"%s: emulation at %lx failed (%08x)\n",
740 __func__
, kvmppc_get_pc(vcpu
), kvmppc_get_last_inst(vcpu
));
741 kvmppc_core_queue_program(vcpu
, flags
);
744 case EMULATE_DO_MMIO
:
745 run
->exit_reason
= KVM_EXIT_MMIO
;
753 case BOOK3S_INTERRUPT_SYSCALL
:
754 if (vcpu
->arch
.papr_enabled
&&
755 (kvmppc_get_last_inst(vcpu
) == 0x44000022) &&
756 !(vcpu
->arch
.shared
->msr
& MSR_PR
)) {
757 /* SC 1 papr hypercalls */
758 ulong cmd
= kvmppc_get_gpr(vcpu
, 3);
761 #ifdef CONFIG_KVM_BOOK3S_64_PR
762 if (kvmppc_h_pr(vcpu
, cmd
) == EMULATE_DONE
) {
768 run
->papr_hcall
.nr
= cmd
;
769 for (i
= 0; i
< 9; ++i
) {
770 ulong gpr
= kvmppc_get_gpr(vcpu
, 4 + i
);
771 run
->papr_hcall
.args
[i
] = gpr
;
773 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
774 vcpu
->arch
.hcall_needed
= 1;
776 } else if (vcpu
->arch
.osi_enabled
&&
777 (((u32
)kvmppc_get_gpr(vcpu
, 3)) == OSI_SC_MAGIC_R3
) &&
778 (((u32
)kvmppc_get_gpr(vcpu
, 4)) == OSI_SC_MAGIC_R4
)) {
780 u64
*gprs
= run
->osi
.gprs
;
783 run
->exit_reason
= KVM_EXIT_OSI
;
784 for (i
= 0; i
< 32; i
++)
785 gprs
[i
] = kvmppc_get_gpr(vcpu
, i
);
786 vcpu
->arch
.osi_needed
= 1;
788 } else if (!(vcpu
->arch
.shared
->msr
& MSR_PR
) &&
789 (((u32
)kvmppc_get_gpr(vcpu
, 0)) == KVM_SC_MAGIC_R0
)) {
790 /* KVM PV hypercalls */
791 kvmppc_set_gpr(vcpu
, 3, kvmppc_kvm_pv(vcpu
));
795 vcpu
->stat
.syscall_exits
++;
796 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
800 case BOOK3S_INTERRUPT_FP_UNAVAIL
:
801 case BOOK3S_INTERRUPT_ALTIVEC
:
802 case BOOK3S_INTERRUPT_VSX
:
807 case BOOK3S_INTERRUPT_FP_UNAVAIL
: ext_msr
= MSR_FP
; break;
808 case BOOK3S_INTERRUPT_ALTIVEC
: ext_msr
= MSR_VEC
; break;
809 case BOOK3S_INTERRUPT_VSX
: ext_msr
= MSR_VSX
; break;
812 switch (kvmppc_check_ext(vcpu
, exit_nr
)) {
814 /* everything ok - let's enable the ext */
815 r
= kvmppc_handle_ext(vcpu
, exit_nr
, ext_msr
);
818 /* we need to emulate this instruction */
819 goto program_interrupt
;
822 /* nothing to worry about - go again */
827 case BOOK3S_INTERRUPT_ALIGNMENT
:
828 if (kvmppc_read_inst(vcpu
) == EMULATE_DONE
) {
829 vcpu
->arch
.shared
->dsisr
= kvmppc_alignment_dsisr(vcpu
,
830 kvmppc_get_last_inst(vcpu
));
831 vcpu
->arch
.shared
->dar
= kvmppc_alignment_dar(vcpu
,
832 kvmppc_get_last_inst(vcpu
));
833 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
837 case BOOK3S_INTERRUPT_MACHINE_CHECK
:
838 case BOOK3S_INTERRUPT_TRACE
:
839 kvmppc_book3s_queue_irqprio(vcpu
, exit_nr
);
844 struct kvmppc_book3s_shadow_vcpu
*svcpu
= svcpu_get(vcpu
);
845 ulong shadow_srr1
= svcpu
->shadow_srr1
;
847 /* Ugh - bork here! What did we get? */
848 printk(KERN_EMERG
"exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
849 exit_nr
, kvmppc_get_pc(vcpu
), shadow_srr1
);
857 if (!(r
& RESUME_HOST
)) {
858 /* To avoid clobbering exit_reason, only check for signals if
859 * we aren't already exiting to userspace for some other
863 * Interrupts could be timers for the guest which we have to
864 * inject again, so let's postpone them until we're in the guest
865 * and if we really did time things so badly, then we just exit
866 * again due to a host external interrupt.
868 __hard_irq_disable();
869 if (kvmppc_prepare_to_enter(vcpu
)) {
870 run
->exit_reason
= KVM_EXIT_INTR
;
875 trace_kvm_book3s_reenter(r
, vcpu
);
880 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
881 struct kvm_sregs
*sregs
)
883 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
886 sregs
->pvr
= vcpu
->arch
.pvr
;
888 sregs
->u
.s
.sdr1
= to_book3s(vcpu
)->sdr1
;
889 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
890 for (i
= 0; i
< 64; i
++) {
891 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
| i
;
892 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
895 for (i
= 0; i
< 16; i
++)
896 sregs
->u
.s
.ppc32
.sr
[i
] = vcpu
->arch
.shared
->sr
[i
];
898 for (i
= 0; i
< 8; i
++) {
899 sregs
->u
.s
.ppc32
.ibat
[i
] = vcpu3s
->ibat
[i
].raw
;
900 sregs
->u
.s
.ppc32
.dbat
[i
] = vcpu3s
->dbat
[i
].raw
;
907 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
908 struct kvm_sregs
*sregs
)
910 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
913 kvmppc_set_pvr(vcpu
, sregs
->pvr
);
915 vcpu3s
->sdr1
= sregs
->u
.s
.sdr1
;
916 if (vcpu
->arch
.hflags
& BOOK3S_HFLAG_SLB
) {
917 for (i
= 0; i
< 64; i
++) {
918 vcpu
->arch
.mmu
.slbmte(vcpu
, sregs
->u
.s
.ppc64
.slb
[i
].slbv
,
919 sregs
->u
.s
.ppc64
.slb
[i
].slbe
);
922 for (i
= 0; i
< 16; i
++) {
923 vcpu
->arch
.mmu
.mtsrin(vcpu
, i
, sregs
->u
.s
.ppc32
.sr
[i
]);
925 for (i
= 0; i
< 8; i
++) {
926 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), false,
927 (u32
)sregs
->u
.s
.ppc32
.ibat
[i
]);
928 kvmppc_set_bat(vcpu
, &(vcpu3s
->ibat
[i
]), true,
929 (u32
)(sregs
->u
.s
.ppc32
.ibat
[i
] >> 32));
930 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), false,
931 (u32
)sregs
->u
.s
.ppc32
.dbat
[i
]);
932 kvmppc_set_bat(vcpu
, &(vcpu3s
->dbat
[i
]), true,
933 (u32
)(sregs
->u
.s
.ppc32
.dbat
[i
] >> 32));
937 /* Flush the MMU after messing with the segments */
938 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
943 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
948 case KVM_REG_PPC_HIOR
:
949 r
= copy_to_user((u64 __user
*)(long)reg
->addr
,
950 &to_book3s(vcpu
)->hior
, sizeof(u64
));
959 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
964 case KVM_REG_PPC_HIOR
:
965 r
= copy_from_user(&to_book3s(vcpu
)->hior
,
966 (u64 __user
*)(long)reg
->addr
, sizeof(u64
));
968 to_book3s(vcpu
)->hior_explicit
= true;
977 int kvmppc_core_check_processor_compat(void)
982 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
984 struct kvmppc_vcpu_book3s
*vcpu_book3s
;
985 struct kvm_vcpu
*vcpu
;
989 vcpu_book3s
= vzalloc(sizeof(struct kvmppc_vcpu_book3s
));
993 vcpu_book3s
->shadow_vcpu
= (struct kvmppc_book3s_shadow_vcpu
*)
994 kzalloc(sizeof(*vcpu_book3s
->shadow_vcpu
), GFP_KERNEL
);
995 if (!vcpu_book3s
->shadow_vcpu
)
998 vcpu
= &vcpu_book3s
->vcpu
;
999 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
1001 goto free_shadow_vcpu
;
1003 p
= __get_free_page(GFP_KERNEL
|__GFP_ZERO
);
1004 /* the real shared page fills the last 4k of our page */
1005 vcpu
->arch
.shared
= (void*)(p
+ PAGE_SIZE
- 4096);
1009 #ifdef CONFIG_PPC_BOOK3S_64
1010 /* default to book3s_64 (970fx) */
1011 vcpu
->arch
.pvr
= 0x3C0301;
1013 /* default to book3s_32 (750) */
1014 vcpu
->arch
.pvr
= 0x84202;
1016 kvmppc_set_pvr(vcpu
, vcpu
->arch
.pvr
);
1017 vcpu
->arch
.slb_nr
= 64;
1019 vcpu
->arch
.shadow_msr
= MSR_USER64
;
1021 err
= kvmppc_mmu_init(vcpu
);
1028 kvm_vcpu_uninit(vcpu
);
1030 kfree(vcpu_book3s
->shadow_vcpu
);
1034 return ERR_PTR(err
);
1037 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
1039 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
1041 free_page((unsigned long)vcpu
->arch
.shared
& PAGE_MASK
);
1042 kvm_vcpu_uninit(vcpu
);
1043 kfree(vcpu_book3s
->shadow_vcpu
);
1047 int kvmppc_vcpu_run(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1050 double fpr
[32][TS_FPRWIDTH
];
1053 #ifdef CONFIG_ALTIVEC
1056 unsigned long uninitialized_var(vrsave
);
1066 /* Check if we can run the vcpu at all */
1067 if (!vcpu
->arch
.sane
) {
1068 kvm_run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1074 * Interrupts could be timers for the guest which we have to inject
1075 * again, so let's postpone them until we're in the guest and if we
1076 * really did time things so badly, then we just exit again due to
1077 * a host external interrupt.
1079 __hard_irq_disable();
1080 if (kvmppc_prepare_to_enter(vcpu
)) {
1081 __hard_irq_enable();
1082 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1087 /* Save FPU state in stack */
1088 if (current
->thread
.regs
->msr
& MSR_FP
)
1089 giveup_fpu(current
);
1090 memcpy(fpr
, current
->thread
.fpr
, sizeof(current
->thread
.fpr
));
1091 fpscr
= current
->thread
.fpscr
.val
;
1092 fpexc_mode
= current
->thread
.fpexc_mode
;
1094 #ifdef CONFIG_ALTIVEC
1095 /* Save Altivec state in stack */
1096 used_vr
= current
->thread
.used_vr
;
1098 if (current
->thread
.regs
->msr
& MSR_VEC
)
1099 giveup_altivec(current
);
1100 memcpy(vr
, current
->thread
.vr
, sizeof(current
->thread
.vr
));
1101 vscr
= current
->thread
.vscr
;
1102 vrsave
= current
->thread
.vrsave
;
1107 /* Save VSX state in stack */
1108 used_vsr
= current
->thread
.used_vsr
;
1109 if (used_vsr
&& (current
->thread
.regs
->msr
& MSR_VSX
))
1110 __giveup_vsx(current
);
1113 /* Remember the MSR with disabled extensions */
1114 ext_msr
= current
->thread
.regs
->msr
;
1116 /* Preload FPU if it's enabled */
1117 if (vcpu
->arch
.shared
->msr
& MSR_FP
)
1118 kvmppc_handle_ext(vcpu
, BOOK3S_INTERRUPT_FP_UNAVAIL
, MSR_FP
);
1122 ret
= __kvmppc_vcpu_run(kvm_run
, vcpu
);
1126 current
->thread
.regs
->msr
= ext_msr
;
1128 /* Make sure we save the guest FPU/Altivec/VSX state */
1129 kvmppc_giveup_ext(vcpu
, MSR_FP
);
1130 kvmppc_giveup_ext(vcpu
, MSR_VEC
);
1131 kvmppc_giveup_ext(vcpu
, MSR_VSX
);
1133 /* Restore FPU state from stack */
1134 memcpy(current
->thread
.fpr
, fpr
, sizeof(current
->thread
.fpr
));
1135 current
->thread
.fpscr
.val
= fpscr
;
1136 current
->thread
.fpexc_mode
= fpexc_mode
;
1138 #ifdef CONFIG_ALTIVEC
1139 /* Restore Altivec state from stack */
1140 if (used_vr
&& current
->thread
.used_vr
) {
1141 memcpy(current
->thread
.vr
, vr
, sizeof(current
->thread
.vr
));
1142 current
->thread
.vscr
= vscr
;
1143 current
->thread
.vrsave
= vrsave
;
1145 current
->thread
.used_vr
= used_vr
;
1149 current
->thread
.used_vsr
= used_vsr
;
1158 * Get (and clear) the dirty memory log for a memory slot.
1160 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
1161 struct kvm_dirty_log
*log
)
1163 struct kvm_memory_slot
*memslot
;
1164 struct kvm_vcpu
*vcpu
;
1170 mutex_lock(&kvm
->slots_lock
);
1172 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
1176 /* If nothing is dirty, don't bother messing with page tables. */
1178 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
1180 ga
= memslot
->base_gfn
<< PAGE_SHIFT
;
1181 ga_end
= ga
+ (memslot
->npages
<< PAGE_SHIFT
);
1183 kvm_for_each_vcpu(n
, vcpu
, kvm
)
1184 kvmppc_mmu_pte_pflush(vcpu
, ga
, ga_end
);
1186 n
= kvm_dirty_bitmap_bytes(memslot
);
1187 memset(memslot
->dirty_bitmap
, 0, n
);
1192 mutex_unlock(&kvm
->slots_lock
);
1197 int kvm_vm_ioctl_get_smmu_info(struct kvm
*kvm
, struct kvm_ppc_smmu_info
*info
)
1202 /* SLB is always 64 entries */
1203 info
->slb_size
= 64;
1205 /* Standard 4k base page size segment */
1206 info
->sps
[0].page_shift
= 12;
1207 info
->sps
[0].slb_enc
= 0;
1208 info
->sps
[0].enc
[0].page_shift
= 12;
1209 info
->sps
[0].enc
[0].pte_enc
= 0;
1211 /* Standard 16M large page size segment */
1212 info
->sps
[1].page_shift
= 24;
1213 info
->sps
[1].slb_enc
= SLB_VSID_L
;
1214 info
->sps
[1].enc
[0].page_shift
= 24;
1215 info
->sps
[1].enc
[0].pte_enc
= 0;
1219 #endif /* CONFIG_PPC64 */
1221 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
1222 struct kvm_userspace_memory_region
*mem
)
1227 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
1228 struct kvm_userspace_memory_region
*mem
)
1232 int kvmppc_core_init_vm(struct kvm
*kvm
)
1235 INIT_LIST_HEAD(&kvm
->arch
.spapr_tce_tables
);
1241 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
1244 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
1248 static int kvmppc_book3s_init(void)
1252 r
= kvm_init(NULL
, sizeof(struct kvmppc_vcpu_book3s
), 0,
1258 r
= kvmppc_mmu_hpte_sysinit();
1263 static void kvmppc_book3s_exit(void)
1265 kvmppc_mmu_hpte_sysexit();
1269 module_init(kvmppc_book3s_init
);
1270 module_exit(kvmppc_book3s_exit
);