2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hrtimer.h>
28 #include <linux/slab.h>
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/tlbflush.h>
34 #include "../mm/mmu_decl.h"
36 #define CREATE_TRACE_POINTS
39 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
41 return !(v
->arch
.shared
->msr
& MSR_WE
) ||
42 !!(v
->arch
.pending_exceptions
);
45 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
47 int nr
= kvmppc_get_gpr(vcpu
, 11);
49 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
50 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
51 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
52 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
55 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
64 case HC_VENDOR_KVM
| KVM_HC_PPC_MAP_MAGIC_PAGE
:
66 vcpu
->arch
.magic_page_pa
= param1
;
67 vcpu
->arch
.magic_page_ea
= param2
;
72 case HC_VENDOR_KVM
| KVM_HC_FEATURES
:
74 #if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
75 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
78 /* Second return value is in r4 */
79 kvmppc_set_gpr(vcpu
, 4, r2
);
82 r
= HC_EV_UNIMPLEMENTED
;
89 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
91 enum emulation_result er
;
94 er
= kvmppc_emulate_instruction(run
, vcpu
);
97 /* Future optimization: only reload non-volatiles if they were
98 * actually modified. */
101 case EMULATE_DO_MMIO
:
102 run
->exit_reason
= KVM_EXIT_MMIO
;
103 /* We must reload nonvolatiles because "update" load/store
104 * instructions modify register state. */
105 /* Future optimization: only reload non-volatiles if they were
106 * actually modified. */
110 /* XXX Deliver Program interrupt to guest. */
111 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
112 kvmppc_get_last_inst(vcpu
));
122 int kvm_arch_hardware_enable(void *garbage
)
127 void kvm_arch_hardware_disable(void *garbage
)
131 int kvm_arch_hardware_setup(void)
136 void kvm_arch_hardware_unsetup(void)
140 void kvm_arch_check_processor_compat(void *rtn
)
142 *(int *)rtn
= kvmppc_core_check_processor_compat();
145 struct kvm
*kvm_arch_create_vm(void)
149 kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
151 return ERR_PTR(-ENOMEM
);
156 static void kvmppc_free_vcpus(struct kvm
*kvm
)
159 struct kvm_vcpu
*vcpu
;
161 kvm_for_each_vcpu(i
, vcpu
, kvm
)
162 kvm_arch_vcpu_free(vcpu
);
164 mutex_lock(&kvm
->lock
);
165 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
166 kvm
->vcpus
[i
] = NULL
;
168 atomic_set(&kvm
->online_vcpus
, 0);
169 mutex_unlock(&kvm
->lock
);
172 void kvm_arch_sync_events(struct kvm
*kvm
)
176 void kvm_arch_destroy_vm(struct kvm
*kvm
)
178 kvmppc_free_vcpus(kvm
);
179 kvm_free_physmem(kvm
);
180 cleanup_srcu_struct(&kvm
->srcu
);
184 int kvm_dev_ioctl_check_extension(long ext
)
189 case KVM_CAP_PPC_SEGSTATE
:
190 case KVM_CAP_PPC_PAIRED_SINGLES
:
191 case KVM_CAP_PPC_UNSET_IRQ
:
192 case KVM_CAP_ENABLE_CAP
:
193 case KVM_CAP_PPC_OSI
:
196 case KVM_CAP_COALESCED_MMIO
:
197 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
207 long kvm_arch_dev_ioctl(struct file
*filp
,
208 unsigned int ioctl
, unsigned long arg
)
213 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
214 struct kvm_memory_slot
*memslot
,
215 struct kvm_memory_slot old
,
216 struct kvm_userspace_memory_region
*mem
,
222 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
223 struct kvm_userspace_memory_region
*mem
,
224 struct kvm_memory_slot old
,
231 void kvm_arch_flush_shadow(struct kvm
*kvm
)
235 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
237 struct kvm_vcpu
*vcpu
;
238 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
240 kvmppc_create_vcpu_debugfs(vcpu
, id
);
244 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
246 /* Make sure we're not using the vcpu anymore */
247 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
248 tasklet_kill(&vcpu
->arch
.tasklet
);
250 kvmppc_remove_vcpu_debugfs(vcpu
);
251 kvmppc_core_vcpu_free(vcpu
);
254 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
256 kvm_arch_vcpu_free(vcpu
);
259 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
261 return kvmppc_core_pending_dec(vcpu
);
264 static void kvmppc_decrementer_func(unsigned long data
)
266 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
268 kvmppc_core_queue_dec(vcpu
);
270 if (waitqueue_active(&vcpu
->wq
)) {
271 wake_up_interruptible(&vcpu
->wq
);
272 vcpu
->stat
.halt_wakeup
++;
277 * low level hrtimer wake routine. Because this runs in hardirq context
278 * we schedule a tasklet to do the real work.
280 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
282 struct kvm_vcpu
*vcpu
;
284 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
285 tasklet_schedule(&vcpu
->arch
.tasklet
);
287 return HRTIMER_NORESTART
;
290 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
292 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
293 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
294 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
299 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
301 kvmppc_mmu_destroy(vcpu
);
304 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
306 kvmppc_core_vcpu_load(vcpu
, cpu
);
309 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
311 kvmppc_core_vcpu_put(vcpu
);
314 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
315 struct kvm_guest_debug
*dbg
)
320 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
323 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
326 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
329 u64
uninitialized_var(gpr
);
331 if (run
->mmio
.len
> sizeof(gpr
)) {
332 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
336 if (vcpu
->arch
.mmio_is_bigendian
) {
337 switch (run
->mmio
.len
) {
338 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
339 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
340 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
341 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
344 /* Convert BE data from userland back to LE. */
345 switch (run
->mmio
.len
) {
346 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
347 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
348 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
352 if (vcpu
->arch
.mmio_sign_extend
) {
353 switch (run
->mmio
.len
) {
368 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
370 switch (vcpu
->arch
.io_gpr
& KVM_REG_EXT_MASK
) {
372 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
375 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_REG_MASK
] = gpr
;
377 #ifdef CONFIG_PPC_BOOK3S
379 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_REG_MASK
] = gpr
;
382 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_REG_MASK
] = gpr
;
383 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_REG_MASK
] = gpr
;
391 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
392 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
394 if (bytes
> sizeof(run
->mmio
.data
)) {
395 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
399 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
400 run
->mmio
.len
= bytes
;
401 run
->mmio
.is_write
= 0;
403 vcpu
->arch
.io_gpr
= rt
;
404 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
405 vcpu
->mmio_needed
= 1;
406 vcpu
->mmio_is_write
= 0;
407 vcpu
->arch
.mmio_sign_extend
= 0;
409 return EMULATE_DO_MMIO
;
412 /* Same as above, but sign extends */
413 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
414 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
418 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_bigendian
);
419 vcpu
->arch
.mmio_sign_extend
= 1;
424 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
425 u64 val
, unsigned int bytes
, int is_bigendian
)
427 void *data
= run
->mmio
.data
;
429 if (bytes
> sizeof(run
->mmio
.data
)) {
430 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
434 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
435 run
->mmio
.len
= bytes
;
436 run
->mmio
.is_write
= 1;
437 vcpu
->mmio_needed
= 1;
438 vcpu
->mmio_is_write
= 1;
440 /* Store the value at the lowest bytes in 'data'. */
443 case 8: *(u64
*)data
= val
; break;
444 case 4: *(u32
*)data
= val
; break;
445 case 2: *(u16
*)data
= val
; break;
446 case 1: *(u8
*)data
= val
; break;
449 /* Store LE value into 'data'. */
451 case 4: st_le32(data
, val
); break;
452 case 2: st_le16(data
, val
); break;
453 case 1: *(u8
*)data
= val
; break;
457 return EMULATE_DO_MMIO
;
460 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
465 if (vcpu
->sigset_active
)
466 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
468 if (vcpu
->mmio_needed
) {
469 if (!vcpu
->mmio_is_write
)
470 kvmppc_complete_mmio_load(vcpu
, run
);
471 vcpu
->mmio_needed
= 0;
472 } else if (vcpu
->arch
.dcr_needed
) {
473 if (!vcpu
->arch
.dcr_is_write
)
474 kvmppc_complete_dcr_load(vcpu
, run
);
475 vcpu
->arch
.dcr_needed
= 0;
476 } else if (vcpu
->arch
.osi_needed
) {
477 u64
*gprs
= run
->osi
.gprs
;
480 for (i
= 0; i
< 32; i
++)
481 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
482 vcpu
->arch
.osi_needed
= 0;
485 kvmppc_core_deliver_interrupts(vcpu
);
489 r
= __kvmppc_vcpu_run(run
, vcpu
);
493 if (vcpu
->sigset_active
)
494 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
499 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
501 if (irq
->irq
== KVM_INTERRUPT_UNSET
)
502 kvmppc_core_dequeue_external(vcpu
, irq
);
504 kvmppc_core_queue_external(vcpu
, irq
);
506 if (waitqueue_active(&vcpu
->wq
)) {
507 wake_up_interruptible(&vcpu
->wq
);
508 vcpu
->stat
.halt_wakeup
++;
514 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
515 struct kvm_enable_cap
*cap
)
523 case KVM_CAP_PPC_OSI
:
525 vcpu
->arch
.osi_enabled
= true;
535 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
536 struct kvm_mp_state
*mp_state
)
541 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
542 struct kvm_mp_state
*mp_state
)
547 long kvm_arch_vcpu_ioctl(struct file
*filp
,
548 unsigned int ioctl
, unsigned long arg
)
550 struct kvm_vcpu
*vcpu
= filp
->private_data
;
551 void __user
*argp
= (void __user
*)arg
;
555 case KVM_INTERRUPT
: {
556 struct kvm_interrupt irq
;
558 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
560 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
566 struct kvm_enable_cap cap
;
568 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
570 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
581 long kvm_arch_vm_ioctl(struct file
*filp
,
582 unsigned int ioctl
, unsigned long arg
)
594 int kvm_arch_init(void *opaque
)
599 void kvm_arch_exit(void)