2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
20 #include "x86_emulate.h"
21 #include "segment_descriptor.h"
24 #include <linux/kvm.h>
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/percpu.h>
28 #include <linux/gfp.h>
30 #include <linux/miscdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/reboot.h>
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/file.h>
36 #include <linux/sysdev.h>
37 #include <linux/cpu.h>
38 #include <linux/sched.h>
39 #include <linux/cpumask.h>
40 #include <linux/smp.h>
41 #include <linux/anon_inodes.h>
42 #include <linux/profile.h>
43 #include <linux/kvm_para.h>
44 #include <linux/pagemap.h>
45 #include <linux/mman.h>
47 #include <asm/processor.h>
50 #include <asm/uaccess.h>
53 MODULE_AUTHOR("Qumranet");
54 MODULE_LICENSE("GPL");
56 static DEFINE_SPINLOCK(kvm_lock
);
57 static LIST_HEAD(vm_list
);
59 static cpumask_t cpus_hardware_enabled
;
61 struct kvm_x86_ops
*kvm_x86_ops
;
62 struct kmem_cache
*kvm_vcpu_cache
;
63 EXPORT_SYMBOL_GPL(kvm_vcpu_cache
);
65 static __read_mostly
struct preempt_ops kvm_preempt_ops
;
67 #define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
69 static struct kvm_stats_debugfs_item
{
72 struct dentry
*dentry
;
73 } debugfs_entries
[] = {
74 { "pf_fixed", STAT_OFFSET(pf_fixed
) },
75 { "pf_guest", STAT_OFFSET(pf_guest
) },
76 { "tlb_flush", STAT_OFFSET(tlb_flush
) },
77 { "invlpg", STAT_OFFSET(invlpg
) },
78 { "exits", STAT_OFFSET(exits
) },
79 { "io_exits", STAT_OFFSET(io_exits
) },
80 { "mmio_exits", STAT_OFFSET(mmio_exits
) },
81 { "signal_exits", STAT_OFFSET(signal_exits
) },
82 { "irq_window", STAT_OFFSET(irq_window_exits
) },
83 { "halt_exits", STAT_OFFSET(halt_exits
) },
84 { "halt_wakeup", STAT_OFFSET(halt_wakeup
) },
85 { "request_irq", STAT_OFFSET(request_irq_exits
) },
86 { "irq_exits", STAT_OFFSET(irq_exits
) },
87 { "light_exits", STAT_OFFSET(light_exits
) },
88 { "efer_reload", STAT_OFFSET(efer_reload
) },
92 static struct dentry
*debugfs_dir
;
94 #define CR0_RESERVED_BITS \
95 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
96 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
97 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
98 #define CR4_RESERVED_BITS \
99 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
100 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
101 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
102 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
104 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
105 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
108 /* LDT or TSS descriptor in the GDT. 16 bytes. */
109 struct segment_descriptor_64
{
110 struct segment_descriptor s
;
117 static long kvm_vcpu_ioctl(struct file
*file
, unsigned int ioctl
,
120 unsigned long segment_base(u16 selector
)
122 struct descriptor_table gdt
;
123 struct segment_descriptor
*d
;
124 unsigned long table_base
;
130 asm("sgdt %0" : "=m"(gdt
));
131 table_base
= gdt
.base
;
133 if (selector
& 4) { /* from ldt */
136 asm("sldt %0" : "=g"(ldt_selector
));
137 table_base
= segment_base(ldt_selector
);
139 d
= (struct segment_descriptor
*)(table_base
+ (selector
& ~7));
140 v
= d
->base_low
| ((unsigned long)d
->base_mid
<< 16) |
141 ((unsigned long)d
->base_high
<< 24);
143 if (d
->system
== 0 && (d
->type
== 2 || d
->type
== 9 || d
->type
== 11))
144 v
|= ((unsigned long) \
145 ((struct segment_descriptor_64
*)d
)->base_higher
) << 32;
149 EXPORT_SYMBOL_GPL(segment_base
);
151 static inline int valid_vcpu(int n
)
153 return likely(n
>= 0 && n
< KVM_MAX_VCPUS
);
156 void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
)
158 if (!vcpu
->fpu_active
|| vcpu
->guest_fpu_loaded
)
161 vcpu
->guest_fpu_loaded
= 1;
162 fx_save(&vcpu
->host_fx_image
);
163 fx_restore(&vcpu
->guest_fx_image
);
165 EXPORT_SYMBOL_GPL(kvm_load_guest_fpu
);
167 void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
)
169 if (!vcpu
->guest_fpu_loaded
)
172 vcpu
->guest_fpu_loaded
= 0;
173 fx_save(&vcpu
->guest_fx_image
);
174 fx_restore(&vcpu
->host_fx_image
);
176 EXPORT_SYMBOL_GPL(kvm_put_guest_fpu
);
179 * Switches to specified vcpu, until a matching vcpu_put()
181 void vcpu_load(struct kvm_vcpu
*vcpu
)
185 mutex_lock(&vcpu
->mutex
);
187 preempt_notifier_register(&vcpu
->preempt_notifier
);
188 kvm_arch_vcpu_load(vcpu
, cpu
);
192 void vcpu_put(struct kvm_vcpu
*vcpu
)
195 kvm_arch_vcpu_put(vcpu
);
196 preempt_notifier_unregister(&vcpu
->preempt_notifier
);
198 mutex_unlock(&vcpu
->mutex
);
201 static void ack_flush(void *_completed
)
205 void kvm_flush_remote_tlbs(struct kvm
*kvm
)
209 struct kvm_vcpu
*vcpu
;
212 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
213 vcpu
= kvm
->vcpus
[i
];
216 if (test_and_set_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
219 if (cpu
!= -1 && cpu
!= raw_smp_processor_id())
222 smp_call_function_mask(cpus
, ack_flush
, NULL
, 1);
225 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
)
230 mutex_init(&vcpu
->mutex
);
232 vcpu
->mmu
.root_hpa
= INVALID_PAGE
;
235 if (!irqchip_in_kernel(kvm
) || id
== 0)
236 vcpu
->mp_state
= VCPU_MP_STATE_RUNNABLE
;
238 vcpu
->mp_state
= VCPU_MP_STATE_UNINITIALIZED
;
239 init_waitqueue_head(&vcpu
->wq
);
241 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
246 vcpu
->run
= page_address(page
);
248 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
253 vcpu
->pio_data
= page_address(page
);
255 r
= kvm_mmu_create(vcpu
);
257 goto fail_free_pio_data
;
259 if (irqchip_in_kernel(kvm
)) {
260 r
= kvm_create_lapic(vcpu
);
262 goto fail_mmu_destroy
;
268 kvm_mmu_destroy(vcpu
);
270 free_page((unsigned long)vcpu
->pio_data
);
272 free_page((unsigned long)vcpu
->run
);
276 EXPORT_SYMBOL_GPL(kvm_vcpu_init
);
278 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
)
280 kvm_free_lapic(vcpu
);
281 kvm_mmu_destroy(vcpu
);
282 free_page((unsigned long)vcpu
->pio_data
);
283 free_page((unsigned long)vcpu
->run
);
285 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit
);
287 static struct kvm
*kvm_create_vm(void)
289 struct kvm
*kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
292 return ERR_PTR(-ENOMEM
);
294 kvm_io_bus_init(&kvm
->pio_bus
);
295 mutex_init(&kvm
->lock
);
296 INIT_LIST_HEAD(&kvm
->active_mmu_pages
);
297 kvm_io_bus_init(&kvm
->mmio_bus
);
298 spin_lock(&kvm_lock
);
299 list_add(&kvm
->vm_list
, &vm_list
);
300 spin_unlock(&kvm_lock
);
305 * Free any memory in @free but not in @dont.
307 static void kvm_free_physmem_slot(struct kvm_memory_slot
*free
,
308 struct kvm_memory_slot
*dont
)
310 if (!dont
|| free
->rmap
!= dont
->rmap
)
313 if (!dont
|| free
->dirty_bitmap
!= dont
->dirty_bitmap
)
314 vfree(free
->dirty_bitmap
);
317 free
->dirty_bitmap
= NULL
;
321 static void kvm_free_physmem(struct kvm
*kvm
)
325 for (i
= 0; i
< kvm
->nmemslots
; ++i
)
326 kvm_free_physmem_slot(&kvm
->memslots
[i
], NULL
);
329 static void free_pio_guest_pages(struct kvm_vcpu
*vcpu
)
333 for (i
= 0; i
< ARRAY_SIZE(vcpu
->pio
.guest_pages
); ++i
)
334 if (vcpu
->pio
.guest_pages
[i
]) {
335 kvm_release_page(vcpu
->pio
.guest_pages
[i
]);
336 vcpu
->pio
.guest_pages
[i
] = NULL
;
340 static void kvm_unload_vcpu_mmu(struct kvm_vcpu
*vcpu
)
343 kvm_mmu_unload(vcpu
);
347 static void kvm_free_vcpus(struct kvm
*kvm
)
352 * Unpin any mmu pages first.
354 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
)
356 kvm_unload_vcpu_mmu(kvm
->vcpus
[i
]);
357 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
359 kvm_x86_ops
->vcpu_free(kvm
->vcpus
[i
]);
360 kvm
->vcpus
[i
] = NULL
;
366 static void kvm_destroy_vm(struct kvm
*kvm
)
368 spin_lock(&kvm_lock
);
369 list_del(&kvm
->vm_list
);
370 spin_unlock(&kvm_lock
);
371 kvm_io_bus_destroy(&kvm
->pio_bus
);
372 kvm_io_bus_destroy(&kvm
->mmio_bus
);
376 kvm_free_physmem(kvm
);
380 static int kvm_vm_release(struct inode
*inode
, struct file
*filp
)
382 struct kvm
*kvm
= filp
->private_data
;
388 static void inject_gp(struct kvm_vcpu
*vcpu
)
390 kvm_x86_ops
->inject_gp(vcpu
, 0);
394 * Load the pae pdptrs. Return true is they are all valid.
396 static int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
398 gfn_t pdpt_gfn
= cr3
>> PAGE_SHIFT
;
399 unsigned offset
= ((cr3
& (PAGE_SIZE
-1)) >> 5) << 2;
402 u64 pdpte
[ARRAY_SIZE(vcpu
->pdptrs
)];
404 mutex_lock(&vcpu
->kvm
->lock
);
405 ret
= kvm_read_guest_page(vcpu
->kvm
, pdpt_gfn
, pdpte
,
406 offset
* sizeof(u64
), sizeof(pdpte
));
411 for (i
= 0; i
< ARRAY_SIZE(pdpte
); ++i
) {
412 if ((pdpte
[i
] & 1) && (pdpte
[i
] & 0xfffffff0000001e6ull
)) {
419 memcpy(vcpu
->pdptrs
, pdpte
, sizeof(vcpu
->pdptrs
));
421 mutex_unlock(&vcpu
->kvm
->lock
);
426 void set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
)
428 if (cr0
& CR0_RESERVED_BITS
) {
429 printk(KERN_DEBUG
"set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
435 if ((cr0
& X86_CR0_NW
) && !(cr0
& X86_CR0_CD
)) {
436 printk(KERN_DEBUG
"set_cr0: #GP, CD == 0 && NW == 1\n");
441 if ((cr0
& X86_CR0_PG
) && !(cr0
& X86_CR0_PE
)) {
442 printk(KERN_DEBUG
"set_cr0: #GP, set PG flag "
443 "and a clear PE flag\n");
448 if (!is_paging(vcpu
) && (cr0
& X86_CR0_PG
)) {
450 if ((vcpu
->shadow_efer
& EFER_LME
)) {
454 printk(KERN_DEBUG
"set_cr0: #GP, start paging "
455 "in long mode while PAE is disabled\n");
459 kvm_x86_ops
->get_cs_db_l_bits(vcpu
, &cs_db
, &cs_l
);
461 printk(KERN_DEBUG
"set_cr0: #GP, start paging "
462 "in long mode while CS.L == 1\n");
469 if (is_pae(vcpu
) && !load_pdptrs(vcpu
, vcpu
->cr3
)) {
470 printk(KERN_DEBUG
"set_cr0: #GP, pdptrs "
478 kvm_x86_ops
->set_cr0(vcpu
, cr0
);
481 mutex_lock(&vcpu
->kvm
->lock
);
482 kvm_mmu_reset_context(vcpu
);
483 mutex_unlock(&vcpu
->kvm
->lock
);
486 EXPORT_SYMBOL_GPL(set_cr0
);
488 void lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
)
490 set_cr0(vcpu
, (vcpu
->cr0
& ~0x0ful
) | (msw
& 0x0f));
492 EXPORT_SYMBOL_GPL(lmsw
);
494 void set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
)
496 if (cr4
& CR4_RESERVED_BITS
) {
497 printk(KERN_DEBUG
"set_cr4: #GP, reserved bits\n");
502 if (is_long_mode(vcpu
)) {
503 if (!(cr4
& X86_CR4_PAE
)) {
504 printk(KERN_DEBUG
"set_cr4: #GP, clearing PAE while "
509 } else if (is_paging(vcpu
) && !is_pae(vcpu
) && (cr4
& X86_CR4_PAE
)
510 && !load_pdptrs(vcpu
, vcpu
->cr3
)) {
511 printk(KERN_DEBUG
"set_cr4: #GP, pdptrs reserved bits\n");
516 if (cr4
& X86_CR4_VMXE
) {
517 printk(KERN_DEBUG
"set_cr4: #GP, setting VMXE\n");
521 kvm_x86_ops
->set_cr4(vcpu
, cr4
);
523 mutex_lock(&vcpu
->kvm
->lock
);
524 kvm_mmu_reset_context(vcpu
);
525 mutex_unlock(&vcpu
->kvm
->lock
);
527 EXPORT_SYMBOL_GPL(set_cr4
);
529 void set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr3
)
531 if (is_long_mode(vcpu
)) {
532 if (cr3
& CR3_L_MODE_RESERVED_BITS
) {
533 printk(KERN_DEBUG
"set_cr3: #GP, reserved bits\n");
539 if (cr3
& CR3_PAE_RESERVED_BITS
) {
541 "set_cr3: #GP, reserved bits\n");
545 if (is_paging(vcpu
) && !load_pdptrs(vcpu
, cr3
)) {
546 printk(KERN_DEBUG
"set_cr3: #GP, pdptrs "
553 * We don't check reserved bits in nonpae mode, because
554 * this isn't enforced, and VMware depends on this.
558 mutex_lock(&vcpu
->kvm
->lock
);
560 * Does the new cr3 value map to physical memory? (Note, we
561 * catch an invalid cr3 even in real-mode, because it would
562 * cause trouble later on when we turn on paging anyway.)
564 * A real CPU would silently accept an invalid cr3 and would
565 * attempt to use it - with largely undefined (and often hard
566 * to debug) behavior on the guest side.
568 if (unlikely(!gfn_to_memslot(vcpu
->kvm
, cr3
>> PAGE_SHIFT
)))
572 vcpu
->mmu
.new_cr3(vcpu
);
574 mutex_unlock(&vcpu
->kvm
->lock
);
576 EXPORT_SYMBOL_GPL(set_cr3
);
578 void set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr8
)
580 if (cr8
& CR8_RESERVED_BITS
) {
581 printk(KERN_DEBUG
"set_cr8: #GP, reserved bits 0x%lx\n", cr8
);
585 if (irqchip_in_kernel(vcpu
->kvm
))
586 kvm_lapic_set_tpr(vcpu
, cr8
);
590 EXPORT_SYMBOL_GPL(set_cr8
);
592 unsigned long get_cr8(struct kvm_vcpu
*vcpu
)
594 if (irqchip_in_kernel(vcpu
->kvm
))
595 return kvm_lapic_get_cr8(vcpu
);
599 EXPORT_SYMBOL_GPL(get_cr8
);
601 u64
kvm_get_apic_base(struct kvm_vcpu
*vcpu
)
603 if (irqchip_in_kernel(vcpu
->kvm
))
604 return vcpu
->apic_base
;
606 return vcpu
->apic_base
;
608 EXPORT_SYMBOL_GPL(kvm_get_apic_base
);
610 void kvm_set_apic_base(struct kvm_vcpu
*vcpu
, u64 data
)
612 /* TODO: reserve bits check */
613 if (irqchip_in_kernel(vcpu
->kvm
))
614 kvm_lapic_set_base(vcpu
, data
);
616 vcpu
->apic_base
= data
;
618 EXPORT_SYMBOL_GPL(kvm_set_apic_base
);
620 void fx_init(struct kvm_vcpu
*vcpu
)
622 unsigned after_mxcsr_mask
;
624 /* Initialize guest FPU by resetting ours and saving into guest's */
626 fx_save(&vcpu
->host_fx_image
);
628 fx_save(&vcpu
->guest_fx_image
);
629 fx_restore(&vcpu
->host_fx_image
);
632 vcpu
->cr0
|= X86_CR0_ET
;
633 after_mxcsr_mask
= offsetof(struct i387_fxsave_struct
, st_space
);
634 vcpu
->guest_fx_image
.mxcsr
= 0x1f80;
635 memset((void *)&vcpu
->guest_fx_image
+ after_mxcsr_mask
,
636 0, sizeof(struct i387_fxsave_struct
) - after_mxcsr_mask
);
638 EXPORT_SYMBOL_GPL(fx_init
);
641 * Allocate some memory and give it an address in the guest physical address
644 * Discontiguous memory is allowed, mostly for framebuffers.
646 static int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
648 kvm_userspace_memory_region
*mem
,
653 unsigned long npages
;
655 struct kvm_memory_slot
*memslot
;
656 struct kvm_memory_slot old
, new;
659 /* General sanity checks */
660 if (mem
->memory_size
& (PAGE_SIZE
- 1))
662 if (mem
->guest_phys_addr
& (PAGE_SIZE
- 1))
664 if (mem
->slot
>= KVM_MEMORY_SLOTS
)
666 if (mem
->guest_phys_addr
+ mem
->memory_size
< mem
->guest_phys_addr
)
669 memslot
= &kvm
->memslots
[mem
->slot
];
670 base_gfn
= mem
->guest_phys_addr
>> PAGE_SHIFT
;
671 npages
= mem
->memory_size
>> PAGE_SHIFT
;
674 mem
->flags
&= ~KVM_MEM_LOG_DIRTY_PAGES
;
676 mutex_lock(&kvm
->lock
);
678 new = old
= *memslot
;
680 new.base_gfn
= base_gfn
;
682 new.flags
= mem
->flags
;
684 /* Disallow changing a memory slot's size. */
686 if (npages
&& old
.npages
&& npages
!= old
.npages
)
689 /* Check for overlaps */
691 for (i
= 0; i
< KVM_MEMORY_SLOTS
; ++i
) {
692 struct kvm_memory_slot
*s
= &kvm
->memslots
[i
];
696 if (!((base_gfn
+ npages
<= s
->base_gfn
) ||
697 (base_gfn
>= s
->base_gfn
+ s
->npages
)))
701 /* Free page dirty bitmap if unneeded */
702 if (!(new.flags
& KVM_MEM_LOG_DIRTY_PAGES
))
703 new.dirty_bitmap
= NULL
;
707 /* Allocate if a slot is being created */
708 if (npages
&& !new.rmap
) {
709 new.rmap
= vmalloc(npages
* sizeof(struct page
*));
714 memset(new.rmap
, 0, npages
* sizeof(*new.rmap
));
717 new.userspace_addr
= mem
->userspace_addr
;
719 down_write(¤t
->mm
->mmap_sem
);
720 new.userspace_addr
= do_mmap(NULL
, 0,
722 PROT_READ
| PROT_WRITE
,
723 MAP_SHARED
| MAP_ANONYMOUS
,
725 up_write(¤t
->mm
->mmap_sem
);
727 if (IS_ERR((void *)new.userspace_addr
))
732 /* Allocate page dirty bitmap if needed */
733 if ((new.flags
& KVM_MEM_LOG_DIRTY_PAGES
) && !new.dirty_bitmap
) {
734 unsigned dirty_bytes
= ALIGN(npages
, BITS_PER_LONG
) / 8;
736 new.dirty_bitmap
= vmalloc(dirty_bytes
);
737 if (!new.dirty_bitmap
)
739 memset(new.dirty_bitmap
, 0, dirty_bytes
);
742 if (mem
->slot
>= kvm
->nmemslots
)
743 kvm
->nmemslots
= mem
->slot
+ 1;
745 if (!kvm
->n_requested_mmu_pages
) {
746 unsigned int n_pages
;
749 n_pages
= npages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
750 kvm_mmu_change_mmu_pages(kvm
, kvm
->n_alloc_mmu_pages
+
753 unsigned int nr_mmu_pages
;
755 n_pages
= old
.npages
* KVM_PERMILLE_MMU_PAGES
/ 1000;
756 nr_mmu_pages
= kvm
->n_alloc_mmu_pages
- n_pages
;
757 nr_mmu_pages
= max(nr_mmu_pages
,
758 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES
);
759 kvm_mmu_change_mmu_pages(kvm
, nr_mmu_pages
);
765 kvm_mmu_slot_remove_write_access(kvm
, mem
->slot
);
766 kvm_flush_remote_tlbs(kvm
);
768 mutex_unlock(&kvm
->lock
);
770 kvm_free_physmem_slot(&old
, &new);
774 mutex_unlock(&kvm
->lock
);
775 kvm_free_physmem_slot(&new, &old
);
780 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm
*kvm
,
781 u32 kvm_nr_mmu_pages
)
783 if (kvm_nr_mmu_pages
< KVM_MIN_ALLOC_MMU_PAGES
)
786 mutex_lock(&kvm
->lock
);
788 kvm_mmu_change_mmu_pages(kvm
, kvm_nr_mmu_pages
);
789 kvm
->n_requested_mmu_pages
= kvm_nr_mmu_pages
;
791 mutex_unlock(&kvm
->lock
);
795 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm
*kvm
)
797 return kvm
->n_alloc_mmu_pages
;
801 * Get (and clear) the dirty memory log for a memory slot.
803 static int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
804 struct kvm_dirty_log
*log
)
806 struct kvm_memory_slot
*memslot
;
809 unsigned long any
= 0;
811 mutex_lock(&kvm
->lock
);
814 if (log
->slot
>= KVM_MEMORY_SLOTS
)
817 memslot
= &kvm
->memslots
[log
->slot
];
819 if (!memslot
->dirty_bitmap
)
822 n
= ALIGN(memslot
->npages
, BITS_PER_LONG
) / 8;
824 for (i
= 0; !any
&& i
< n
/sizeof(long); ++i
)
825 any
= memslot
->dirty_bitmap
[i
];
828 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
831 /* If nothing is dirty, don't bother messing with page tables. */
833 kvm_mmu_slot_remove_write_access(kvm
, log
->slot
);
834 kvm_flush_remote_tlbs(kvm
);
835 memset(memslot
->dirty_bitmap
, 0, n
);
841 mutex_unlock(&kvm
->lock
);
846 * Set a new alias region. Aliases map a portion of physical memory into
847 * another portion. This is useful for memory windows, for example the PC
850 static int kvm_vm_ioctl_set_memory_alias(struct kvm
*kvm
,
851 struct kvm_memory_alias
*alias
)
854 struct kvm_mem_alias
*p
;
857 /* General sanity checks */
858 if (alias
->memory_size
& (PAGE_SIZE
- 1))
860 if (alias
->guest_phys_addr
& (PAGE_SIZE
- 1))
862 if (alias
->slot
>= KVM_ALIAS_SLOTS
)
864 if (alias
->guest_phys_addr
+ alias
->memory_size
865 < alias
->guest_phys_addr
)
867 if (alias
->target_phys_addr
+ alias
->memory_size
868 < alias
->target_phys_addr
)
871 mutex_lock(&kvm
->lock
);
873 p
= &kvm
->aliases
[alias
->slot
];
874 p
->base_gfn
= alias
->guest_phys_addr
>> PAGE_SHIFT
;
875 p
->npages
= alias
->memory_size
>> PAGE_SHIFT
;
876 p
->target_gfn
= alias
->target_phys_addr
>> PAGE_SHIFT
;
878 for (n
= KVM_ALIAS_SLOTS
; n
> 0; --n
)
879 if (kvm
->aliases
[n
- 1].npages
)
883 kvm_mmu_zap_all(kvm
);
885 mutex_unlock(&kvm
->lock
);
893 static int kvm_vm_ioctl_get_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
898 switch (chip
->chip_id
) {
899 case KVM_IRQCHIP_PIC_MASTER
:
900 memcpy(&chip
->chip
.pic
,
901 &pic_irqchip(kvm
)->pics
[0],
902 sizeof(struct kvm_pic_state
));
904 case KVM_IRQCHIP_PIC_SLAVE
:
905 memcpy(&chip
->chip
.pic
,
906 &pic_irqchip(kvm
)->pics
[1],
907 sizeof(struct kvm_pic_state
));
909 case KVM_IRQCHIP_IOAPIC
:
910 memcpy(&chip
->chip
.ioapic
,
912 sizeof(struct kvm_ioapic_state
));
921 static int kvm_vm_ioctl_set_irqchip(struct kvm
*kvm
, struct kvm_irqchip
*chip
)
926 switch (chip
->chip_id
) {
927 case KVM_IRQCHIP_PIC_MASTER
:
928 memcpy(&pic_irqchip(kvm
)->pics
[0],
930 sizeof(struct kvm_pic_state
));
932 case KVM_IRQCHIP_PIC_SLAVE
:
933 memcpy(&pic_irqchip(kvm
)->pics
[1],
935 sizeof(struct kvm_pic_state
));
937 case KVM_IRQCHIP_IOAPIC
:
938 memcpy(ioapic_irqchip(kvm
),
940 sizeof(struct kvm_ioapic_state
));
946 kvm_pic_update_irq(pic_irqchip(kvm
));
950 int is_error_page(struct page
*page
)
952 return page
== bad_page
;
954 EXPORT_SYMBOL_GPL(is_error_page
);
956 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
959 struct kvm_mem_alias
*alias
;
961 for (i
= 0; i
< kvm
->naliases
; ++i
) {
962 alias
= &kvm
->aliases
[i
];
963 if (gfn
>= alias
->base_gfn
964 && gfn
< alias
->base_gfn
+ alias
->npages
)
965 return alias
->target_gfn
+ gfn
- alias
->base_gfn
;
970 static struct kvm_memory_slot
*__gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
974 for (i
= 0; i
< kvm
->nmemslots
; ++i
) {
975 struct kvm_memory_slot
*memslot
= &kvm
->memslots
[i
];
977 if (gfn
>= memslot
->base_gfn
978 && gfn
< memslot
->base_gfn
+ memslot
->npages
)
984 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
)
986 gfn
= unalias_gfn(kvm
, gfn
);
987 return __gfn_to_memslot(kvm
, gfn
);
990 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
)
992 struct kvm_memory_slot
*slot
;
993 struct page
*page
[1];
996 gfn
= unalias_gfn(kvm
, gfn
);
997 slot
= __gfn_to_memslot(kvm
, gfn
);
1003 down_read(¤t
->mm
->mmap_sem
);
1004 npages
= get_user_pages(current
, current
->mm
,
1005 slot
->userspace_addr
1006 + (gfn
- slot
->base_gfn
) * PAGE_SIZE
, 1,
1008 up_read(¤t
->mm
->mmap_sem
);
1016 EXPORT_SYMBOL_GPL(gfn_to_page
);
1018 void kvm_release_page(struct page
*page
)
1020 if (!PageReserved(page
))
1024 EXPORT_SYMBOL_GPL(kvm_release_page
);
1026 static int next_segment(unsigned long len
, int offset
)
1028 if (len
> PAGE_SIZE
- offset
)
1029 return PAGE_SIZE
- offset
;
1034 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
1040 page
= gfn_to_page(kvm
, gfn
);
1041 if (is_error_page(page
)) {
1042 kvm_release_page(page
);
1045 page_virt
= kmap_atomic(page
, KM_USER0
);
1047 memcpy(data
, page_virt
+ offset
, len
);
1049 kunmap_atomic(page_virt
, KM_USER0
);
1050 kvm_release_page(page
);
1053 EXPORT_SYMBOL_GPL(kvm_read_guest_page
);
1055 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
)
1057 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1059 int offset
= offset_in_page(gpa
);
1062 while ((seg
= next_segment(len
, offset
)) != 0) {
1063 ret
= kvm_read_guest_page(kvm
, gfn
, data
, offset
, seg
);
1073 EXPORT_SYMBOL_GPL(kvm_read_guest
);
1075 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
1076 int offset
, int len
)
1081 page
= gfn_to_page(kvm
, gfn
);
1082 if (is_error_page(page
)) {
1083 kvm_release_page(page
);
1086 page_virt
= kmap_atomic(page
, KM_USER0
);
1088 memcpy(page_virt
+ offset
, data
, len
);
1090 kunmap_atomic(page_virt
, KM_USER0
);
1091 mark_page_dirty(kvm
, gfn
);
1092 kvm_release_page(page
);
1095 EXPORT_SYMBOL_GPL(kvm_write_guest_page
);
1097 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
1100 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1102 int offset
= offset_in_page(gpa
);
1105 while ((seg
= next_segment(len
, offset
)) != 0) {
1106 ret
= kvm_write_guest_page(kvm
, gfn
, data
, offset
, seg
);
1117 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
)
1122 page
= gfn_to_page(kvm
, gfn
);
1123 if (is_error_page(page
)) {
1124 kvm_release_page(page
);
1127 page_virt
= kmap_atomic(page
, KM_USER0
);
1129 memset(page_virt
+ offset
, 0, len
);
1131 kunmap_atomic(page_virt
, KM_USER0
);
1132 kvm_release_page(page
);
1135 EXPORT_SYMBOL_GPL(kvm_clear_guest_page
);
1137 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
)
1139 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
1141 int offset
= offset_in_page(gpa
);
1144 while ((seg
= next_segment(len
, offset
)) != 0) {
1145 ret
= kvm_clear_guest_page(kvm
, gfn
, offset
, seg
);
1154 EXPORT_SYMBOL_GPL(kvm_clear_guest
);
1156 /* WARNING: Does not work on aliased pages. */
1157 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
)
1159 struct kvm_memory_slot
*memslot
;
1161 memslot
= __gfn_to_memslot(kvm
, gfn
);
1162 if (memslot
&& memslot
->dirty_bitmap
) {
1163 unsigned long rel_gfn
= gfn
- memslot
->base_gfn
;
1166 if (!test_bit(rel_gfn
, memslot
->dirty_bitmap
))
1167 set_bit(rel_gfn
, memslot
->dirty_bitmap
);
1171 int emulator_read_std(unsigned long addr
,
1174 struct kvm_vcpu
*vcpu
)
1179 gpa_t gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
1180 unsigned offset
= addr
& (PAGE_SIZE
-1);
1181 unsigned tocopy
= min(bytes
, (unsigned)PAGE_SIZE
- offset
);
1184 if (gpa
== UNMAPPED_GVA
)
1185 return X86EMUL_PROPAGATE_FAULT
;
1186 ret
= kvm_read_guest(vcpu
->kvm
, gpa
, data
, tocopy
);
1188 return X86EMUL_UNHANDLEABLE
;
1195 return X86EMUL_CONTINUE
;
1197 EXPORT_SYMBOL_GPL(emulator_read_std
);
1199 static int emulator_write_std(unsigned long addr
,
1202 struct kvm_vcpu
*vcpu
)
1204 pr_unimpl(vcpu
, "emulator_write_std: addr %lx n %d\n", addr
, bytes
);
1205 return X86EMUL_UNHANDLEABLE
;
1209 * Only apic need an MMIO device hook, so shortcut now..
1211 static struct kvm_io_device
*vcpu_find_pervcpu_dev(struct kvm_vcpu
*vcpu
,
1214 struct kvm_io_device
*dev
;
1217 dev
= &vcpu
->apic
->dev
;
1218 if (dev
->in_range(dev
, addr
))
1224 static struct kvm_io_device
*vcpu_find_mmio_dev(struct kvm_vcpu
*vcpu
,
1227 struct kvm_io_device
*dev
;
1229 dev
= vcpu_find_pervcpu_dev(vcpu
, addr
);
1231 dev
= kvm_io_bus_find_dev(&vcpu
->kvm
->mmio_bus
, addr
);
1235 static struct kvm_io_device
*vcpu_find_pio_dev(struct kvm_vcpu
*vcpu
,
1238 return kvm_io_bus_find_dev(&vcpu
->kvm
->pio_bus
, addr
);
1241 static int emulator_read_emulated(unsigned long addr
,
1244 struct kvm_vcpu
*vcpu
)
1246 struct kvm_io_device
*mmio_dev
;
1249 if (vcpu
->mmio_read_completed
) {
1250 memcpy(val
, vcpu
->mmio_data
, bytes
);
1251 vcpu
->mmio_read_completed
= 0;
1252 return X86EMUL_CONTINUE
;
1253 } else if (emulator_read_std(addr
, val
, bytes
, vcpu
)
1254 == X86EMUL_CONTINUE
)
1255 return X86EMUL_CONTINUE
;
1257 gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
1258 if (gpa
== UNMAPPED_GVA
)
1259 return X86EMUL_PROPAGATE_FAULT
;
1262 * Is this MMIO handled locally?
1264 mmio_dev
= vcpu_find_mmio_dev(vcpu
, gpa
);
1266 kvm_iodevice_read(mmio_dev
, gpa
, bytes
, val
);
1267 return X86EMUL_CONTINUE
;
1270 vcpu
->mmio_needed
= 1;
1271 vcpu
->mmio_phys_addr
= gpa
;
1272 vcpu
->mmio_size
= bytes
;
1273 vcpu
->mmio_is_write
= 0;
1275 return X86EMUL_UNHANDLEABLE
;
1278 static int emulator_write_phys(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
1279 const void *val
, int bytes
)
1283 ret
= kvm_write_guest(vcpu
->kvm
, gpa
, val
, bytes
);
1286 kvm_mmu_pte_write(vcpu
, gpa
, val
, bytes
);
1290 static int emulator_write_emulated_onepage(unsigned long addr
,
1293 struct kvm_vcpu
*vcpu
)
1295 struct kvm_io_device
*mmio_dev
;
1296 gpa_t gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, addr
);
1298 if (gpa
== UNMAPPED_GVA
) {
1299 kvm_x86_ops
->inject_page_fault(vcpu
, addr
, 2);
1300 return X86EMUL_PROPAGATE_FAULT
;
1303 if (emulator_write_phys(vcpu
, gpa
, val
, bytes
))
1304 return X86EMUL_CONTINUE
;
1307 * Is this MMIO handled locally?
1309 mmio_dev
= vcpu_find_mmio_dev(vcpu
, gpa
);
1311 kvm_iodevice_write(mmio_dev
, gpa
, bytes
, val
);
1312 return X86EMUL_CONTINUE
;
1315 vcpu
->mmio_needed
= 1;
1316 vcpu
->mmio_phys_addr
= gpa
;
1317 vcpu
->mmio_size
= bytes
;
1318 vcpu
->mmio_is_write
= 1;
1319 memcpy(vcpu
->mmio_data
, val
, bytes
);
1321 return X86EMUL_CONTINUE
;
1324 int emulator_write_emulated(unsigned long addr
,
1327 struct kvm_vcpu
*vcpu
)
1329 /* Crossing a page boundary? */
1330 if (((addr
+ bytes
- 1) ^ addr
) & PAGE_MASK
) {
1333 now
= -addr
& ~PAGE_MASK
;
1334 rc
= emulator_write_emulated_onepage(addr
, val
, now
, vcpu
);
1335 if (rc
!= X86EMUL_CONTINUE
)
1341 return emulator_write_emulated_onepage(addr
, val
, bytes
, vcpu
);
1343 EXPORT_SYMBOL_GPL(emulator_write_emulated
);
1345 static int emulator_cmpxchg_emulated(unsigned long addr
,
1349 struct kvm_vcpu
*vcpu
)
1351 static int reported
;
1355 printk(KERN_WARNING
"kvm: emulating exchange as write\n");
1357 return emulator_write_emulated(addr
, new, bytes
, vcpu
);
1360 static unsigned long get_segment_base(struct kvm_vcpu
*vcpu
, int seg
)
1362 return kvm_x86_ops
->get_segment_base(vcpu
, seg
);
1365 int emulate_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
)
1367 return X86EMUL_CONTINUE
;
1370 int emulate_clts(struct kvm_vcpu
*vcpu
)
1372 kvm_x86_ops
->set_cr0(vcpu
, vcpu
->cr0
& ~X86_CR0_TS
);
1373 return X86EMUL_CONTINUE
;
1376 int emulator_get_dr(struct x86_emulate_ctxt
*ctxt
, int dr
, unsigned long *dest
)
1378 struct kvm_vcpu
*vcpu
= ctxt
->vcpu
;
1382 *dest
= kvm_x86_ops
->get_dr(vcpu
, dr
);
1383 return X86EMUL_CONTINUE
;
1385 pr_unimpl(vcpu
, "%s: unexpected dr %u\n", __FUNCTION__
, dr
);
1386 return X86EMUL_UNHANDLEABLE
;
1390 int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
, unsigned long value
)
1392 unsigned long mask
= (ctxt
->mode
== X86EMUL_MODE_PROT64
) ? ~0ULL : ~0U;
1395 kvm_x86_ops
->set_dr(ctxt
->vcpu
, dr
, value
& mask
, &exception
);
1397 /* FIXME: better handling */
1398 return X86EMUL_UNHANDLEABLE
;
1400 return X86EMUL_CONTINUE
;
1403 void kvm_report_emulation_failure(struct kvm_vcpu
*vcpu
, const char *context
)
1405 static int reported
;
1407 unsigned long rip
= vcpu
->rip
;
1408 unsigned long rip_linear
;
1410 rip_linear
= rip
+ get_segment_base(vcpu
, VCPU_SREG_CS
);
1415 emulator_read_std(rip_linear
, (void *)opcodes
, 4, vcpu
);
1417 printk(KERN_ERR
"emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1418 context
, rip
, opcodes
[0], opcodes
[1], opcodes
[2], opcodes
[3]);
1421 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure
);
1423 struct x86_emulate_ops emulate_ops
= {
1424 .read_std
= emulator_read_std
,
1425 .write_std
= emulator_write_std
,
1426 .read_emulated
= emulator_read_emulated
,
1427 .write_emulated
= emulator_write_emulated
,
1428 .cmpxchg_emulated
= emulator_cmpxchg_emulated
,
1431 int emulate_instruction(struct kvm_vcpu
*vcpu
,
1432 struct kvm_run
*run
,
1439 vcpu
->mmio_fault_cr2
= cr2
;
1440 kvm_x86_ops
->cache_regs(vcpu
);
1442 vcpu
->mmio_is_write
= 0;
1443 vcpu
->pio
.string
= 0;
1447 kvm_x86_ops
->get_cs_db_l_bits(vcpu
, &cs_db
, &cs_l
);
1449 vcpu
->emulate_ctxt
.vcpu
= vcpu
;
1450 vcpu
->emulate_ctxt
.eflags
= kvm_x86_ops
->get_rflags(vcpu
);
1451 vcpu
->emulate_ctxt
.cr2
= cr2
;
1452 vcpu
->emulate_ctxt
.mode
=
1453 (vcpu
->emulate_ctxt
.eflags
& X86_EFLAGS_VM
)
1454 ? X86EMUL_MODE_REAL
: cs_l
1455 ? X86EMUL_MODE_PROT64
: cs_db
1456 ? X86EMUL_MODE_PROT32
: X86EMUL_MODE_PROT16
;
1458 if (vcpu
->emulate_ctxt
.mode
== X86EMUL_MODE_PROT64
) {
1459 vcpu
->emulate_ctxt
.cs_base
= 0;
1460 vcpu
->emulate_ctxt
.ds_base
= 0;
1461 vcpu
->emulate_ctxt
.es_base
= 0;
1462 vcpu
->emulate_ctxt
.ss_base
= 0;
1464 vcpu
->emulate_ctxt
.cs_base
=
1465 get_segment_base(vcpu
, VCPU_SREG_CS
);
1466 vcpu
->emulate_ctxt
.ds_base
=
1467 get_segment_base(vcpu
, VCPU_SREG_DS
);
1468 vcpu
->emulate_ctxt
.es_base
=
1469 get_segment_base(vcpu
, VCPU_SREG_ES
);
1470 vcpu
->emulate_ctxt
.ss_base
=
1471 get_segment_base(vcpu
, VCPU_SREG_SS
);
1474 vcpu
->emulate_ctxt
.gs_base
=
1475 get_segment_base(vcpu
, VCPU_SREG_GS
);
1476 vcpu
->emulate_ctxt
.fs_base
=
1477 get_segment_base(vcpu
, VCPU_SREG_FS
);
1479 r
= x86_decode_insn(&vcpu
->emulate_ctxt
, &emulate_ops
);
1481 if (kvm_mmu_unprotect_page_virt(vcpu
, cr2
))
1482 return EMULATE_DONE
;
1483 return EMULATE_FAIL
;
1487 r
= x86_emulate_insn(&vcpu
->emulate_ctxt
, &emulate_ops
);
1489 if (vcpu
->pio
.string
)
1490 return EMULATE_DO_MMIO
;
1492 if ((r
|| vcpu
->mmio_is_write
) && run
) {
1493 run
->exit_reason
= KVM_EXIT_MMIO
;
1494 run
->mmio
.phys_addr
= vcpu
->mmio_phys_addr
;
1495 memcpy(run
->mmio
.data
, vcpu
->mmio_data
, 8);
1496 run
->mmio
.len
= vcpu
->mmio_size
;
1497 run
->mmio
.is_write
= vcpu
->mmio_is_write
;
1501 if (kvm_mmu_unprotect_page_virt(vcpu
, cr2
))
1502 return EMULATE_DONE
;
1503 if (!vcpu
->mmio_needed
) {
1504 kvm_report_emulation_failure(vcpu
, "mmio");
1505 return EMULATE_FAIL
;
1507 return EMULATE_DO_MMIO
;
1510 kvm_x86_ops
->decache_regs(vcpu
);
1511 kvm_x86_ops
->set_rflags(vcpu
, vcpu
->emulate_ctxt
.eflags
);
1513 if (vcpu
->mmio_is_write
) {
1514 vcpu
->mmio_needed
= 0;
1515 return EMULATE_DO_MMIO
;
1518 return EMULATE_DONE
;
1520 EXPORT_SYMBOL_GPL(emulate_instruction
);
1523 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1525 static void kvm_vcpu_block(struct kvm_vcpu
*vcpu
)
1527 DECLARE_WAITQUEUE(wait
, current
);
1529 add_wait_queue(&vcpu
->wq
, &wait
);
1532 * We will block until either an interrupt or a signal wakes us up
1534 while (!kvm_cpu_has_interrupt(vcpu
)
1535 && !signal_pending(current
)
1536 && vcpu
->mp_state
!= VCPU_MP_STATE_RUNNABLE
1537 && vcpu
->mp_state
!= VCPU_MP_STATE_SIPI_RECEIVED
) {
1538 set_current_state(TASK_INTERRUPTIBLE
);
1544 __set_current_state(TASK_RUNNING
);
1545 remove_wait_queue(&vcpu
->wq
, &wait
);
1548 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
)
1550 ++vcpu
->stat
.halt_exits
;
1551 if (irqchip_in_kernel(vcpu
->kvm
)) {
1552 vcpu
->mp_state
= VCPU_MP_STATE_HALTED
;
1553 kvm_vcpu_block(vcpu
);
1554 if (vcpu
->mp_state
!= VCPU_MP_STATE_RUNNABLE
)
1558 vcpu
->run
->exit_reason
= KVM_EXIT_HLT
;
1562 EXPORT_SYMBOL_GPL(kvm_emulate_halt
);
1564 int kvm_emulate_hypercall(struct kvm_vcpu
*vcpu
)
1566 unsigned long nr
, a0
, a1
, a2
, a3
, ret
;
1568 kvm_x86_ops
->cache_regs(vcpu
);
1570 nr
= vcpu
->regs
[VCPU_REGS_RAX
];
1571 a0
= vcpu
->regs
[VCPU_REGS_RBX
];
1572 a1
= vcpu
->regs
[VCPU_REGS_RCX
];
1573 a2
= vcpu
->regs
[VCPU_REGS_RDX
];
1574 a3
= vcpu
->regs
[VCPU_REGS_RSI
];
1576 if (!is_long_mode(vcpu
)) {
1589 vcpu
->regs
[VCPU_REGS_RAX
] = ret
;
1590 kvm_x86_ops
->decache_regs(vcpu
);
1593 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall
);
1595 int kvm_fix_hypercall(struct kvm_vcpu
*vcpu
)
1597 char instruction
[3];
1600 mutex_lock(&vcpu
->kvm
->lock
);
1603 * Blow out the MMU to ensure that no other VCPU has an active mapping
1604 * to ensure that the updated hypercall appears atomically across all
1607 kvm_mmu_zap_all(vcpu
->kvm
);
1609 kvm_x86_ops
->cache_regs(vcpu
);
1610 kvm_x86_ops
->patch_hypercall(vcpu
, instruction
);
1611 if (emulator_write_emulated(vcpu
->rip
, instruction
, 3, vcpu
)
1612 != X86EMUL_CONTINUE
)
1615 mutex_unlock(&vcpu
->kvm
->lock
);
1620 static u64
mk_cr_64(u64 curr_cr
, u32 new_val
)
1622 return (curr_cr
& ~((1ULL << 32) - 1)) | new_val
;
1625 void realmode_lgdt(struct kvm_vcpu
*vcpu
, u16 limit
, unsigned long base
)
1627 struct descriptor_table dt
= { limit
, base
};
1629 kvm_x86_ops
->set_gdt(vcpu
, &dt
);
1632 void realmode_lidt(struct kvm_vcpu
*vcpu
, u16 limit
, unsigned long base
)
1634 struct descriptor_table dt
= { limit
, base
};
1636 kvm_x86_ops
->set_idt(vcpu
, &dt
);
1639 void realmode_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
,
1640 unsigned long *rflags
)
1643 *rflags
= kvm_x86_ops
->get_rflags(vcpu
);
1646 unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu
, int cr
)
1648 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
1659 vcpu_printf(vcpu
, "%s: unexpected cr %u\n", __FUNCTION__
, cr
);
1664 void realmode_set_cr(struct kvm_vcpu
*vcpu
, int cr
, unsigned long val
,
1665 unsigned long *rflags
)
1669 set_cr0(vcpu
, mk_cr_64(vcpu
->cr0
, val
));
1670 *rflags
= kvm_x86_ops
->get_rflags(vcpu
);
1679 set_cr4(vcpu
, mk_cr_64(vcpu
->cr4
, val
));
1682 vcpu_printf(vcpu
, "%s: unexpected cr %u\n", __FUNCTION__
, cr
);
1686 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
1691 case 0xc0010010: /* SYSCFG */
1692 case 0xc0010015: /* HWCR */
1693 case MSR_IA32_PLATFORM_ID
:
1694 case MSR_IA32_P5_MC_ADDR
:
1695 case MSR_IA32_P5_MC_TYPE
:
1696 case MSR_IA32_MC0_CTL
:
1697 case MSR_IA32_MCG_STATUS
:
1698 case MSR_IA32_MCG_CAP
:
1699 case MSR_IA32_MC0_MISC
:
1700 case MSR_IA32_MC0_MISC
+4:
1701 case MSR_IA32_MC0_MISC
+8:
1702 case MSR_IA32_MC0_MISC
+12:
1703 case MSR_IA32_MC0_MISC
+16:
1704 case MSR_IA32_UCODE_REV
:
1705 case MSR_IA32_PERF_STATUS
:
1706 case MSR_IA32_EBL_CR_POWERON
:
1707 /* MTRR registers */
1709 case 0x200 ... 0x2ff:
1712 case 0xcd: /* fsb frequency */
1715 case MSR_IA32_APICBASE
:
1716 data
= kvm_get_apic_base(vcpu
);
1718 case MSR_IA32_MISC_ENABLE
:
1719 data
= vcpu
->ia32_misc_enable_msr
;
1721 #ifdef CONFIG_X86_64
1723 data
= vcpu
->shadow_efer
;
1727 pr_unimpl(vcpu
, "unhandled rdmsr: 0x%x\n", msr
);
1733 EXPORT_SYMBOL_GPL(kvm_get_msr_common
);
1736 * Reads an msr value (of 'msr_index') into 'pdata'.
1737 * Returns 0 on success, non-0 otherwise.
1738 * Assumes vcpu_load() was already called.
1740 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
)
1742 return kvm_x86_ops
->get_msr(vcpu
, msr_index
, pdata
);
1745 #ifdef CONFIG_X86_64
1747 static void set_efer(struct kvm_vcpu
*vcpu
, u64 efer
)
1749 if (efer
& EFER_RESERVED_BITS
) {
1750 printk(KERN_DEBUG
"set_efer: 0x%llx #GP, reserved bits\n",
1757 && (vcpu
->shadow_efer
& EFER_LME
) != (efer
& EFER_LME
)) {
1758 printk(KERN_DEBUG
"set_efer: #GP, change LME while paging\n");
1763 kvm_x86_ops
->set_efer(vcpu
, efer
);
1766 efer
|= vcpu
->shadow_efer
& EFER_LMA
;
1768 vcpu
->shadow_efer
= efer
;
1773 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
1776 #ifdef CONFIG_X86_64
1778 set_efer(vcpu
, data
);
1781 case MSR_IA32_MC0_STATUS
:
1782 pr_unimpl(vcpu
, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1783 __FUNCTION__
, data
);
1785 case MSR_IA32_MCG_STATUS
:
1786 pr_unimpl(vcpu
, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1787 __FUNCTION__
, data
);
1789 case MSR_IA32_UCODE_REV
:
1790 case MSR_IA32_UCODE_WRITE
:
1791 case 0x200 ... 0x2ff: /* MTRRs */
1793 case MSR_IA32_APICBASE
:
1794 kvm_set_apic_base(vcpu
, data
);
1796 case MSR_IA32_MISC_ENABLE
:
1797 vcpu
->ia32_misc_enable_msr
= data
;
1800 pr_unimpl(vcpu
, "unhandled wrmsr: 0x%x\n", msr
);
1805 EXPORT_SYMBOL_GPL(kvm_set_msr_common
);
1808 * Writes msr value into into the appropriate "register".
1809 * Returns 0 on success, non-0 otherwise.
1810 * Assumes vcpu_load() was already called.
1812 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
)
1814 return kvm_x86_ops
->set_msr(vcpu
, msr_index
, data
);
1817 void kvm_resched(struct kvm_vcpu
*vcpu
)
1819 if (!need_resched())
1823 EXPORT_SYMBOL_GPL(kvm_resched
);
1825 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
)
1829 struct kvm_cpuid_entry
*e
, *best
;
1831 kvm_x86_ops
->cache_regs(vcpu
);
1832 function
= vcpu
->regs
[VCPU_REGS_RAX
];
1833 vcpu
->regs
[VCPU_REGS_RAX
] = 0;
1834 vcpu
->regs
[VCPU_REGS_RBX
] = 0;
1835 vcpu
->regs
[VCPU_REGS_RCX
] = 0;
1836 vcpu
->regs
[VCPU_REGS_RDX
] = 0;
1838 for (i
= 0; i
< vcpu
->cpuid_nent
; ++i
) {
1839 e
= &vcpu
->cpuid_entries
[i
];
1840 if (e
->function
== function
) {
1845 * Both basic or both extended?
1847 if (((e
->function
^ function
) & 0x80000000) == 0)
1848 if (!best
|| e
->function
> best
->function
)
1852 vcpu
->regs
[VCPU_REGS_RAX
] = best
->eax
;
1853 vcpu
->regs
[VCPU_REGS_RBX
] = best
->ebx
;
1854 vcpu
->regs
[VCPU_REGS_RCX
] = best
->ecx
;
1855 vcpu
->regs
[VCPU_REGS_RDX
] = best
->edx
;
1857 kvm_x86_ops
->decache_regs(vcpu
);
1858 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
1860 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid
);
1862 static int pio_copy_data(struct kvm_vcpu
*vcpu
)
1864 void *p
= vcpu
->pio_data
;
1867 int nr_pages
= vcpu
->pio
.guest_pages
[1] ? 2 : 1;
1869 q
= vmap(vcpu
->pio
.guest_pages
, nr_pages
, VM_READ
|VM_WRITE
,
1872 free_pio_guest_pages(vcpu
);
1875 q
+= vcpu
->pio
.guest_page_offset
;
1876 bytes
= vcpu
->pio
.size
* vcpu
->pio
.cur_count
;
1878 memcpy(q
, p
, bytes
);
1880 memcpy(p
, q
, bytes
);
1881 q
-= vcpu
->pio
.guest_page_offset
;
1883 free_pio_guest_pages(vcpu
);
1887 static int complete_pio(struct kvm_vcpu
*vcpu
)
1889 struct kvm_pio_request
*io
= &vcpu
->pio
;
1893 kvm_x86_ops
->cache_regs(vcpu
);
1897 memcpy(&vcpu
->regs
[VCPU_REGS_RAX
], vcpu
->pio_data
,
1901 r
= pio_copy_data(vcpu
);
1903 kvm_x86_ops
->cache_regs(vcpu
);
1910 delta
*= io
->cur_count
;
1912 * The size of the register should really depend on
1913 * current address size.
1915 vcpu
->regs
[VCPU_REGS_RCX
] -= delta
;
1921 vcpu
->regs
[VCPU_REGS_RDI
] += delta
;
1923 vcpu
->regs
[VCPU_REGS_RSI
] += delta
;
1926 kvm_x86_ops
->decache_regs(vcpu
);
1928 io
->count
-= io
->cur_count
;
1934 static void kernel_pio(struct kvm_io_device
*pio_dev
,
1935 struct kvm_vcpu
*vcpu
,
1938 /* TODO: String I/O for in kernel device */
1940 mutex_lock(&vcpu
->kvm
->lock
);
1942 kvm_iodevice_read(pio_dev
, vcpu
->pio
.port
,
1946 kvm_iodevice_write(pio_dev
, vcpu
->pio
.port
,
1949 mutex_unlock(&vcpu
->kvm
->lock
);
1952 static void pio_string_write(struct kvm_io_device
*pio_dev
,
1953 struct kvm_vcpu
*vcpu
)
1955 struct kvm_pio_request
*io
= &vcpu
->pio
;
1956 void *pd
= vcpu
->pio_data
;
1959 mutex_lock(&vcpu
->kvm
->lock
);
1960 for (i
= 0; i
< io
->cur_count
; i
++) {
1961 kvm_iodevice_write(pio_dev
, io
->port
,
1966 mutex_unlock(&vcpu
->kvm
->lock
);
1969 int kvm_emulate_pio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
1970 int size
, unsigned port
)
1972 struct kvm_io_device
*pio_dev
;
1974 vcpu
->run
->exit_reason
= KVM_EXIT_IO
;
1975 vcpu
->run
->io
.direction
= in
? KVM_EXIT_IO_IN
: KVM_EXIT_IO_OUT
;
1976 vcpu
->run
->io
.size
= vcpu
->pio
.size
= size
;
1977 vcpu
->run
->io
.data_offset
= KVM_PIO_PAGE_OFFSET
* PAGE_SIZE
;
1978 vcpu
->run
->io
.count
= vcpu
->pio
.count
= vcpu
->pio
.cur_count
= 1;
1979 vcpu
->run
->io
.port
= vcpu
->pio
.port
= port
;
1981 vcpu
->pio
.string
= 0;
1983 vcpu
->pio
.guest_page_offset
= 0;
1986 kvm_x86_ops
->cache_regs(vcpu
);
1987 memcpy(vcpu
->pio_data
, &vcpu
->regs
[VCPU_REGS_RAX
], 4);
1988 kvm_x86_ops
->decache_regs(vcpu
);
1990 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
1992 pio_dev
= vcpu_find_pio_dev(vcpu
, port
);
1994 kernel_pio(pio_dev
, vcpu
, vcpu
->pio_data
);
2000 EXPORT_SYMBOL_GPL(kvm_emulate_pio
);
2002 int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
2003 int size
, unsigned long count
, int down
,
2004 gva_t address
, int rep
, unsigned port
)
2006 unsigned now
, in_page
;
2010 struct kvm_io_device
*pio_dev
;
2012 vcpu
->run
->exit_reason
= KVM_EXIT_IO
;
2013 vcpu
->run
->io
.direction
= in
? KVM_EXIT_IO_IN
: KVM_EXIT_IO_OUT
;
2014 vcpu
->run
->io
.size
= vcpu
->pio
.size
= size
;
2015 vcpu
->run
->io
.data_offset
= KVM_PIO_PAGE_OFFSET
* PAGE_SIZE
;
2016 vcpu
->run
->io
.count
= vcpu
->pio
.count
= vcpu
->pio
.cur_count
= count
;
2017 vcpu
->run
->io
.port
= vcpu
->pio
.port
= port
;
2019 vcpu
->pio
.string
= 1;
2020 vcpu
->pio
.down
= down
;
2021 vcpu
->pio
.guest_page_offset
= offset_in_page(address
);
2022 vcpu
->pio
.rep
= rep
;
2025 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
2030 in_page
= PAGE_SIZE
- offset_in_page(address
);
2032 in_page
= offset_in_page(address
) + size
;
2033 now
= min(count
, (unsigned long)in_page
/ size
);
2036 * String I/O straddles page boundary. Pin two guest pages
2037 * so that we satisfy atomicity constraints. Do just one
2038 * transaction to avoid complexity.
2045 * String I/O in reverse. Yuck. Kill the guest, fix later.
2047 pr_unimpl(vcpu
, "guest string pio down\n");
2051 vcpu
->run
->io
.count
= now
;
2052 vcpu
->pio
.cur_count
= now
;
2054 if (vcpu
->pio
.cur_count
== vcpu
->pio
.count
)
2055 kvm_x86_ops
->skip_emulated_instruction(vcpu
);
2057 for (i
= 0; i
< nr_pages
; ++i
) {
2058 mutex_lock(&vcpu
->kvm
->lock
);
2059 page
= gva_to_page(vcpu
, address
+ i
* PAGE_SIZE
);
2060 vcpu
->pio
.guest_pages
[i
] = page
;
2061 mutex_unlock(&vcpu
->kvm
->lock
);
2064 free_pio_guest_pages(vcpu
);
2069 pio_dev
= vcpu_find_pio_dev(vcpu
, port
);
2070 if (!vcpu
->pio
.in
) {
2071 /* string PIO write */
2072 ret
= pio_copy_data(vcpu
);
2073 if (ret
>= 0 && pio_dev
) {
2074 pio_string_write(pio_dev
, vcpu
);
2076 if (vcpu
->pio
.count
== 0)
2080 pr_unimpl(vcpu
, "no string pio read support yet, "
2081 "port %x size %d count %ld\n",
2086 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string
);
2089 * Check if userspace requested an interrupt window, and that the
2090 * interrupt window is open.
2092 * No need to exit to userspace if we already have an interrupt queued.
2094 static int dm_request_for_irq_injection(struct kvm_vcpu
*vcpu
,
2095 struct kvm_run
*kvm_run
)
2097 return (!vcpu
->irq_summary
&&
2098 kvm_run
->request_interrupt_window
&&
2099 vcpu
->interrupt_window_open
&&
2100 (kvm_x86_ops
->get_rflags(vcpu
) & X86_EFLAGS_IF
));
2103 static void post_kvm_run_save(struct kvm_vcpu
*vcpu
,
2104 struct kvm_run
*kvm_run
)
2106 kvm_run
->if_flag
= (kvm_x86_ops
->get_rflags(vcpu
) & X86_EFLAGS_IF
) != 0;
2107 kvm_run
->cr8
= get_cr8(vcpu
);
2108 kvm_run
->apic_base
= kvm_get_apic_base(vcpu
);
2109 if (irqchip_in_kernel(vcpu
->kvm
))
2110 kvm_run
->ready_for_interrupt_injection
= 1;
2112 kvm_run
->ready_for_interrupt_injection
=
2113 (vcpu
->interrupt_window_open
&&
2114 vcpu
->irq_summary
== 0);
2117 static int __vcpu_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2121 if (unlikely(vcpu
->mp_state
== VCPU_MP_STATE_SIPI_RECEIVED
)) {
2122 pr_debug("vcpu %d received sipi with vector # %x\n",
2123 vcpu
->vcpu_id
, vcpu
->sipi_vector
);
2124 kvm_lapic_reset(vcpu
);
2125 kvm_x86_ops
->vcpu_reset(vcpu
);
2126 vcpu
->mp_state
= VCPU_MP_STATE_RUNNABLE
;
2130 if (vcpu
->guest_debug
.enabled
)
2131 kvm_x86_ops
->guest_debug_pre(vcpu
);
2134 r
= kvm_mmu_reload(vcpu
);
2138 kvm_inject_pending_timer_irqs(vcpu
);
2142 kvm_x86_ops
->prepare_guest_switch(vcpu
);
2143 kvm_load_guest_fpu(vcpu
);
2145 local_irq_disable();
2147 if (signal_pending(current
)) {
2151 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
2152 ++vcpu
->stat
.signal_exits
;
2156 if (irqchip_in_kernel(vcpu
->kvm
))
2157 kvm_x86_ops
->inject_pending_irq(vcpu
);
2158 else if (!vcpu
->mmio_read_completed
)
2159 kvm_x86_ops
->inject_pending_vectors(vcpu
, kvm_run
);
2161 vcpu
->guest_mode
= 1;
2165 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH
, &vcpu
->requests
))
2166 kvm_x86_ops
->tlb_flush(vcpu
);
2168 kvm_x86_ops
->run(vcpu
, kvm_run
);
2170 vcpu
->guest_mode
= 0;
2176 * We must have an instruction between local_irq_enable() and
2177 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2178 * the interrupt shadow. The stat.exits increment will do nicely.
2179 * But we need to prevent reordering, hence this barrier():
2188 * Profile KVM exit RIPs:
2190 if (unlikely(prof_on
== KVM_PROFILING
)) {
2191 kvm_x86_ops
->cache_regs(vcpu
);
2192 profile_hit(KVM_PROFILING
, (void *)vcpu
->rip
);
2195 r
= kvm_x86_ops
->handle_exit(kvm_run
, vcpu
);
2198 if (dm_request_for_irq_injection(vcpu
, kvm_run
)) {
2200 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
2201 ++vcpu
->stat
.request_irq_exits
;
2204 if (!need_resched()) {
2205 ++vcpu
->stat
.light_exits
;
2216 post_kvm_run_save(vcpu
, kvm_run
);
2222 static int kvm_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2229 if (unlikely(vcpu
->mp_state
== VCPU_MP_STATE_UNINITIALIZED
)) {
2230 kvm_vcpu_block(vcpu
);
2235 if (vcpu
->sigset_active
)
2236 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
2238 /* re-sync apic's tpr */
2239 if (!irqchip_in_kernel(vcpu
->kvm
))
2240 set_cr8(vcpu
, kvm_run
->cr8
);
2242 if (vcpu
->pio
.cur_count
) {
2243 r
= complete_pio(vcpu
);
2248 if (vcpu
->mmio_needed
) {
2249 memcpy(vcpu
->mmio_data
, kvm_run
->mmio
.data
, 8);
2250 vcpu
->mmio_read_completed
= 1;
2251 vcpu
->mmio_needed
= 0;
2252 r
= emulate_instruction(vcpu
, kvm_run
,
2253 vcpu
->mmio_fault_cr2
, 0, 1);
2254 if (r
== EMULATE_DO_MMIO
) {
2256 * Read-modify-write. Back to userspace.
2263 if (kvm_run
->exit_reason
== KVM_EXIT_HYPERCALL
) {
2264 kvm_x86_ops
->cache_regs(vcpu
);
2265 vcpu
->regs
[VCPU_REGS_RAX
] = kvm_run
->hypercall
.ret
;
2266 kvm_x86_ops
->decache_regs(vcpu
);
2269 r
= __vcpu_run(vcpu
, kvm_run
);
2272 if (vcpu
->sigset_active
)
2273 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
2279 static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
,
2280 struct kvm_regs
*regs
)
2284 kvm_x86_ops
->cache_regs(vcpu
);
2286 regs
->rax
= vcpu
->regs
[VCPU_REGS_RAX
];
2287 regs
->rbx
= vcpu
->regs
[VCPU_REGS_RBX
];
2288 regs
->rcx
= vcpu
->regs
[VCPU_REGS_RCX
];
2289 regs
->rdx
= vcpu
->regs
[VCPU_REGS_RDX
];
2290 regs
->rsi
= vcpu
->regs
[VCPU_REGS_RSI
];
2291 regs
->rdi
= vcpu
->regs
[VCPU_REGS_RDI
];
2292 regs
->rsp
= vcpu
->regs
[VCPU_REGS_RSP
];
2293 regs
->rbp
= vcpu
->regs
[VCPU_REGS_RBP
];
2294 #ifdef CONFIG_X86_64
2295 regs
->r8
= vcpu
->regs
[VCPU_REGS_R8
];
2296 regs
->r9
= vcpu
->regs
[VCPU_REGS_R9
];
2297 regs
->r10
= vcpu
->regs
[VCPU_REGS_R10
];
2298 regs
->r11
= vcpu
->regs
[VCPU_REGS_R11
];
2299 regs
->r12
= vcpu
->regs
[VCPU_REGS_R12
];
2300 regs
->r13
= vcpu
->regs
[VCPU_REGS_R13
];
2301 regs
->r14
= vcpu
->regs
[VCPU_REGS_R14
];
2302 regs
->r15
= vcpu
->regs
[VCPU_REGS_R15
];
2305 regs
->rip
= vcpu
->rip
;
2306 regs
->rflags
= kvm_x86_ops
->get_rflags(vcpu
);
2309 * Don't leak debug flags in case they were set for guest debugging
2311 if (vcpu
->guest_debug
.enabled
&& vcpu
->guest_debug
.singlestep
)
2312 regs
->rflags
&= ~(X86_EFLAGS_TF
| X86_EFLAGS_RF
);
2319 static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
,
2320 struct kvm_regs
*regs
)
2324 vcpu
->regs
[VCPU_REGS_RAX
] = regs
->rax
;
2325 vcpu
->regs
[VCPU_REGS_RBX
] = regs
->rbx
;
2326 vcpu
->regs
[VCPU_REGS_RCX
] = regs
->rcx
;
2327 vcpu
->regs
[VCPU_REGS_RDX
] = regs
->rdx
;
2328 vcpu
->regs
[VCPU_REGS_RSI
] = regs
->rsi
;
2329 vcpu
->regs
[VCPU_REGS_RDI
] = regs
->rdi
;
2330 vcpu
->regs
[VCPU_REGS_RSP
] = regs
->rsp
;
2331 vcpu
->regs
[VCPU_REGS_RBP
] = regs
->rbp
;
2332 #ifdef CONFIG_X86_64
2333 vcpu
->regs
[VCPU_REGS_R8
] = regs
->r8
;
2334 vcpu
->regs
[VCPU_REGS_R9
] = regs
->r9
;
2335 vcpu
->regs
[VCPU_REGS_R10
] = regs
->r10
;
2336 vcpu
->regs
[VCPU_REGS_R11
] = regs
->r11
;
2337 vcpu
->regs
[VCPU_REGS_R12
] = regs
->r12
;
2338 vcpu
->regs
[VCPU_REGS_R13
] = regs
->r13
;
2339 vcpu
->regs
[VCPU_REGS_R14
] = regs
->r14
;
2340 vcpu
->regs
[VCPU_REGS_R15
] = regs
->r15
;
2343 vcpu
->rip
= regs
->rip
;
2344 kvm_x86_ops
->set_rflags(vcpu
, regs
->rflags
);
2346 kvm_x86_ops
->decache_regs(vcpu
);
2353 static void get_segment(struct kvm_vcpu
*vcpu
,
2354 struct kvm_segment
*var
, int seg
)
2356 return kvm_x86_ops
->get_segment(vcpu
, var
, seg
);
2359 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
2360 struct kvm_sregs
*sregs
)
2362 struct descriptor_table dt
;
2367 get_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
2368 get_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
2369 get_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
2370 get_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
2371 get_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
2372 get_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
2374 get_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
2375 get_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
2377 kvm_x86_ops
->get_idt(vcpu
, &dt
);
2378 sregs
->idt
.limit
= dt
.limit
;
2379 sregs
->idt
.base
= dt
.base
;
2380 kvm_x86_ops
->get_gdt(vcpu
, &dt
);
2381 sregs
->gdt
.limit
= dt
.limit
;
2382 sregs
->gdt
.base
= dt
.base
;
2384 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
2385 sregs
->cr0
= vcpu
->cr0
;
2386 sregs
->cr2
= vcpu
->cr2
;
2387 sregs
->cr3
= vcpu
->cr3
;
2388 sregs
->cr4
= vcpu
->cr4
;
2389 sregs
->cr8
= get_cr8(vcpu
);
2390 sregs
->efer
= vcpu
->shadow_efer
;
2391 sregs
->apic_base
= kvm_get_apic_base(vcpu
);
2393 if (irqchip_in_kernel(vcpu
->kvm
)) {
2394 memset(sregs
->interrupt_bitmap
, 0,
2395 sizeof sregs
->interrupt_bitmap
);
2396 pending_vec
= kvm_x86_ops
->get_irq(vcpu
);
2397 if (pending_vec
>= 0)
2398 set_bit(pending_vec
,
2399 (unsigned long *)sregs
->interrupt_bitmap
);
2401 memcpy(sregs
->interrupt_bitmap
, vcpu
->irq_pending
,
2402 sizeof sregs
->interrupt_bitmap
);
2409 static void set_segment(struct kvm_vcpu
*vcpu
,
2410 struct kvm_segment
*var
, int seg
)
2412 return kvm_x86_ops
->set_segment(vcpu
, var
, seg
);
2415 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
2416 struct kvm_sregs
*sregs
)
2418 int mmu_reset_needed
= 0;
2419 int i
, pending_vec
, max_bits
;
2420 struct descriptor_table dt
;
2424 dt
.limit
= sregs
->idt
.limit
;
2425 dt
.base
= sregs
->idt
.base
;
2426 kvm_x86_ops
->set_idt(vcpu
, &dt
);
2427 dt
.limit
= sregs
->gdt
.limit
;
2428 dt
.base
= sregs
->gdt
.base
;
2429 kvm_x86_ops
->set_gdt(vcpu
, &dt
);
2431 vcpu
->cr2
= sregs
->cr2
;
2432 mmu_reset_needed
|= vcpu
->cr3
!= sregs
->cr3
;
2433 vcpu
->cr3
= sregs
->cr3
;
2435 set_cr8(vcpu
, sregs
->cr8
);
2437 mmu_reset_needed
|= vcpu
->shadow_efer
!= sregs
->efer
;
2438 #ifdef CONFIG_X86_64
2439 kvm_x86_ops
->set_efer(vcpu
, sregs
->efer
);
2441 kvm_set_apic_base(vcpu
, sregs
->apic_base
);
2443 kvm_x86_ops
->decache_cr4_guest_bits(vcpu
);
2445 mmu_reset_needed
|= vcpu
->cr0
!= sregs
->cr0
;
2446 vcpu
->cr0
= sregs
->cr0
;
2447 kvm_x86_ops
->set_cr0(vcpu
, sregs
->cr0
);
2449 mmu_reset_needed
|= vcpu
->cr4
!= sregs
->cr4
;
2450 kvm_x86_ops
->set_cr4(vcpu
, sregs
->cr4
);
2451 if (!is_long_mode(vcpu
) && is_pae(vcpu
))
2452 load_pdptrs(vcpu
, vcpu
->cr3
);
2454 if (mmu_reset_needed
)
2455 kvm_mmu_reset_context(vcpu
);
2457 if (!irqchip_in_kernel(vcpu
->kvm
)) {
2458 memcpy(vcpu
->irq_pending
, sregs
->interrupt_bitmap
,
2459 sizeof vcpu
->irq_pending
);
2460 vcpu
->irq_summary
= 0;
2461 for (i
= 0; i
< ARRAY_SIZE(vcpu
->irq_pending
); ++i
)
2462 if (vcpu
->irq_pending
[i
])
2463 __set_bit(i
, &vcpu
->irq_summary
);
2465 max_bits
= (sizeof sregs
->interrupt_bitmap
) << 3;
2466 pending_vec
= find_first_bit(
2467 (const unsigned long *)sregs
->interrupt_bitmap
,
2469 /* Only pending external irq is handled here */
2470 if (pending_vec
< max_bits
) {
2471 kvm_x86_ops
->set_irq(vcpu
, pending_vec
);
2472 pr_debug("Set back pending irq %d\n",
2477 set_segment(vcpu
, &sregs
->cs
, VCPU_SREG_CS
);
2478 set_segment(vcpu
, &sregs
->ds
, VCPU_SREG_DS
);
2479 set_segment(vcpu
, &sregs
->es
, VCPU_SREG_ES
);
2480 set_segment(vcpu
, &sregs
->fs
, VCPU_SREG_FS
);
2481 set_segment(vcpu
, &sregs
->gs
, VCPU_SREG_GS
);
2482 set_segment(vcpu
, &sregs
->ss
, VCPU_SREG_SS
);
2484 set_segment(vcpu
, &sregs
->tr
, VCPU_SREG_TR
);
2485 set_segment(vcpu
, &sregs
->ldt
, VCPU_SREG_LDTR
);
2492 void kvm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
)
2494 struct kvm_segment cs
;
2496 get_segment(vcpu
, &cs
, VCPU_SREG_CS
);
2500 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits
);
2503 * Translate a guest virtual address to a guest physical address.
2505 static int kvm_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
2506 struct kvm_translation
*tr
)
2508 unsigned long vaddr
= tr
->linear_address
;
2512 mutex_lock(&vcpu
->kvm
->lock
);
2513 gpa
= vcpu
->mmu
.gva_to_gpa(vcpu
, vaddr
);
2514 tr
->physical_address
= gpa
;
2515 tr
->valid
= gpa
!= UNMAPPED_GVA
;
2518 mutex_unlock(&vcpu
->kvm
->lock
);
2524 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
,
2525 struct kvm_interrupt
*irq
)
2527 if (irq
->irq
< 0 || irq
->irq
>= 256)
2529 if (irqchip_in_kernel(vcpu
->kvm
))
2533 set_bit(irq
->irq
, vcpu
->irq_pending
);
2534 set_bit(irq
->irq
/ BITS_PER_LONG
, &vcpu
->irq_summary
);
2541 static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu
*vcpu
,
2542 struct kvm_debug_guest
*dbg
)
2548 r
= kvm_x86_ops
->set_guest_debug(vcpu
, dbg
);
2555 static struct page
*kvm_vcpu_nopage(struct vm_area_struct
*vma
,
2556 unsigned long address
,
2559 struct kvm_vcpu
*vcpu
= vma
->vm_file
->private_data
;
2560 unsigned long pgoff
;
2563 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
2565 page
= virt_to_page(vcpu
->run
);
2566 else if (pgoff
== KVM_PIO_PAGE_OFFSET
)
2567 page
= virt_to_page(vcpu
->pio_data
);
2569 return NOPAGE_SIGBUS
;
2572 *type
= VM_FAULT_MINOR
;
2577 static struct vm_operations_struct kvm_vcpu_vm_ops
= {
2578 .nopage
= kvm_vcpu_nopage
,
2581 static int kvm_vcpu_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2583 vma
->vm_ops
= &kvm_vcpu_vm_ops
;
2587 static int kvm_vcpu_release(struct inode
*inode
, struct file
*filp
)
2589 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2591 fput(vcpu
->kvm
->filp
);
2595 static struct file_operations kvm_vcpu_fops
= {
2596 .release
= kvm_vcpu_release
,
2597 .unlocked_ioctl
= kvm_vcpu_ioctl
,
2598 .compat_ioctl
= kvm_vcpu_ioctl
,
2599 .mmap
= kvm_vcpu_mmap
,
2603 * Allocates an inode for the vcpu.
2605 static int create_vcpu_fd(struct kvm_vcpu
*vcpu
)
2608 struct inode
*inode
;
2611 r
= anon_inode_getfd(&fd
, &inode
, &file
,
2612 "kvm-vcpu", &kvm_vcpu_fops
, vcpu
);
2615 atomic_inc(&vcpu
->kvm
->filp
->f_count
);
2620 * Creates some virtual cpus. Good luck creating more than one.
2622 static int kvm_vm_ioctl_create_vcpu(struct kvm
*kvm
, int n
)
2625 struct kvm_vcpu
*vcpu
;
2630 vcpu
= kvm_x86_ops
->vcpu_create(kvm
, n
);
2632 return PTR_ERR(vcpu
);
2634 preempt_notifier_init(&vcpu
->preempt_notifier
, &kvm_preempt_ops
);
2636 /* We do fxsave: this must be aligned. */
2637 BUG_ON((unsigned long)&vcpu
->host_fx_image
& 0xF);
2640 r
= kvm_mmu_setup(vcpu
);
2645 mutex_lock(&kvm
->lock
);
2646 if (kvm
->vcpus
[n
]) {
2648 mutex_unlock(&kvm
->lock
);
2651 kvm
->vcpus
[n
] = vcpu
;
2652 mutex_unlock(&kvm
->lock
);
2654 /* Now it's all set up, let userspace reach it */
2655 r
= create_vcpu_fd(vcpu
);
2661 mutex_lock(&kvm
->lock
);
2662 kvm
->vcpus
[n
] = NULL
;
2663 mutex_unlock(&kvm
->lock
);
2667 kvm_mmu_unload(vcpu
);
2671 kvm_x86_ops
->vcpu_free(vcpu
);
2675 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu
*vcpu
, sigset_t
*sigset
)
2678 sigdelsetmask(sigset
, sigmask(SIGKILL
)|sigmask(SIGSTOP
));
2679 vcpu
->sigset_active
= 1;
2680 vcpu
->sigset
= *sigset
;
2682 vcpu
->sigset_active
= 0;
2687 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2688 * we have asm/x86/processor.h
2699 u32 st_space
[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2700 #ifdef CONFIG_X86_64
2701 u32 xmm_space
[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2703 u32 xmm_space
[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2707 static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
2709 struct fxsave
*fxsave
= (struct fxsave
*)&vcpu
->guest_fx_image
;
2713 memcpy(fpu
->fpr
, fxsave
->st_space
, 128);
2714 fpu
->fcw
= fxsave
->cwd
;
2715 fpu
->fsw
= fxsave
->swd
;
2716 fpu
->ftwx
= fxsave
->twd
;
2717 fpu
->last_opcode
= fxsave
->fop
;
2718 fpu
->last_ip
= fxsave
->rip
;
2719 fpu
->last_dp
= fxsave
->rdp
;
2720 memcpy(fpu
->xmm
, fxsave
->xmm_space
, sizeof fxsave
->xmm_space
);
2727 static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
2729 struct fxsave
*fxsave
= (struct fxsave
*)&vcpu
->guest_fx_image
;
2733 memcpy(fxsave
->st_space
, fpu
->fpr
, 128);
2734 fxsave
->cwd
= fpu
->fcw
;
2735 fxsave
->swd
= fpu
->fsw
;
2736 fxsave
->twd
= fpu
->ftwx
;
2737 fxsave
->fop
= fpu
->last_opcode
;
2738 fxsave
->rip
= fpu
->last_ip
;
2739 fxsave
->rdp
= fpu
->last_dp
;
2740 memcpy(fxsave
->xmm_space
, fpu
->xmm
, sizeof fxsave
->xmm_space
);
2747 static long kvm_vcpu_ioctl(struct file
*filp
,
2748 unsigned int ioctl
, unsigned long arg
)
2750 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2751 void __user
*argp
= (void __user
*)arg
;
2759 r
= kvm_vcpu_ioctl_run(vcpu
, vcpu
->run
);
2761 case KVM_GET_REGS
: {
2762 struct kvm_regs kvm_regs
;
2764 memset(&kvm_regs
, 0, sizeof kvm_regs
);
2765 r
= kvm_vcpu_ioctl_get_regs(vcpu
, &kvm_regs
);
2769 if (copy_to_user(argp
, &kvm_regs
, sizeof kvm_regs
))
2774 case KVM_SET_REGS
: {
2775 struct kvm_regs kvm_regs
;
2778 if (copy_from_user(&kvm_regs
, argp
, sizeof kvm_regs
))
2780 r
= kvm_vcpu_ioctl_set_regs(vcpu
, &kvm_regs
);
2786 case KVM_GET_SREGS
: {
2787 struct kvm_sregs kvm_sregs
;
2789 memset(&kvm_sregs
, 0, sizeof kvm_sregs
);
2790 r
= kvm_vcpu_ioctl_get_sregs(vcpu
, &kvm_sregs
);
2794 if (copy_to_user(argp
, &kvm_sregs
, sizeof kvm_sregs
))
2799 case KVM_SET_SREGS
: {
2800 struct kvm_sregs kvm_sregs
;
2803 if (copy_from_user(&kvm_sregs
, argp
, sizeof kvm_sregs
))
2805 r
= kvm_vcpu_ioctl_set_sregs(vcpu
, &kvm_sregs
);
2811 case KVM_TRANSLATE
: {
2812 struct kvm_translation tr
;
2815 if (copy_from_user(&tr
, argp
, sizeof tr
))
2817 r
= kvm_vcpu_ioctl_translate(vcpu
, &tr
);
2821 if (copy_to_user(argp
, &tr
, sizeof tr
))
2826 case KVM_INTERRUPT
: {
2827 struct kvm_interrupt irq
;
2830 if (copy_from_user(&irq
, argp
, sizeof irq
))
2832 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
2838 case KVM_DEBUG_GUEST
: {
2839 struct kvm_debug_guest dbg
;
2842 if (copy_from_user(&dbg
, argp
, sizeof dbg
))
2844 r
= kvm_vcpu_ioctl_debug_guest(vcpu
, &dbg
);
2850 case KVM_SET_SIGNAL_MASK
: {
2851 struct kvm_signal_mask __user
*sigmask_arg
= argp
;
2852 struct kvm_signal_mask kvm_sigmask
;
2853 sigset_t sigset
, *p
;
2858 if (copy_from_user(&kvm_sigmask
, argp
,
2859 sizeof kvm_sigmask
))
2862 if (kvm_sigmask
.len
!= sizeof sigset
)
2865 if (copy_from_user(&sigset
, sigmask_arg
->sigset
,
2870 r
= kvm_vcpu_ioctl_set_sigmask(vcpu
, &sigset
);
2876 memset(&fpu
, 0, sizeof fpu
);
2877 r
= kvm_vcpu_ioctl_get_fpu(vcpu
, &fpu
);
2881 if (copy_to_user(argp
, &fpu
, sizeof fpu
))
2890 if (copy_from_user(&fpu
, argp
, sizeof fpu
))
2892 r
= kvm_vcpu_ioctl_set_fpu(vcpu
, &fpu
);
2899 r
= kvm_arch_vcpu_ioctl(filp
, ioctl
, arg
);
2905 static long kvm_vm_ioctl(struct file
*filp
,
2906 unsigned int ioctl
, unsigned long arg
)
2908 struct kvm
*kvm
= filp
->private_data
;
2909 void __user
*argp
= (void __user
*)arg
;
2913 case KVM_CREATE_VCPU
:
2914 r
= kvm_vm_ioctl_create_vcpu(kvm
, arg
);
2918 case KVM_SET_MEMORY_REGION
: {
2919 struct kvm_memory_region kvm_mem
;
2920 struct kvm_userspace_memory_region kvm_userspace_mem
;
2923 if (copy_from_user(&kvm_mem
, argp
, sizeof kvm_mem
))
2925 kvm_userspace_mem
.slot
= kvm_mem
.slot
;
2926 kvm_userspace_mem
.flags
= kvm_mem
.flags
;
2927 kvm_userspace_mem
.guest_phys_addr
= kvm_mem
.guest_phys_addr
;
2928 kvm_userspace_mem
.memory_size
= kvm_mem
.memory_size
;
2929 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 0);
2934 case KVM_SET_USER_MEMORY_REGION
: {
2935 struct kvm_userspace_memory_region kvm_userspace_mem
;
2938 if (copy_from_user(&kvm_userspace_mem
, argp
,
2939 sizeof kvm_userspace_mem
))
2942 r
= kvm_vm_ioctl_set_memory_region(kvm
, &kvm_userspace_mem
, 1);
2947 case KVM_SET_NR_MMU_PAGES
:
2948 r
= kvm_vm_ioctl_set_nr_mmu_pages(kvm
, arg
);
2952 case KVM_GET_NR_MMU_PAGES
:
2953 r
= kvm_vm_ioctl_get_nr_mmu_pages(kvm
);
2955 case KVM_GET_DIRTY_LOG
: {
2956 struct kvm_dirty_log log
;
2959 if (copy_from_user(&log
, argp
, sizeof log
))
2961 r
= kvm_vm_ioctl_get_dirty_log(kvm
, &log
);
2966 case KVM_SET_MEMORY_ALIAS
: {
2967 struct kvm_memory_alias alias
;
2970 if (copy_from_user(&alias
, argp
, sizeof alias
))
2972 r
= kvm_vm_ioctl_set_memory_alias(kvm
, &alias
);
2977 case KVM_CREATE_IRQCHIP
:
2979 kvm
->vpic
= kvm_create_pic(kvm
);
2981 r
= kvm_ioapic_init(kvm
);
2990 case KVM_IRQ_LINE
: {
2991 struct kvm_irq_level irq_event
;
2994 if (copy_from_user(&irq_event
, argp
, sizeof irq_event
))
2996 if (irqchip_in_kernel(kvm
)) {
2997 mutex_lock(&kvm
->lock
);
2998 if (irq_event
.irq
< 16)
2999 kvm_pic_set_irq(pic_irqchip(kvm
),
3002 kvm_ioapic_set_irq(kvm
->vioapic
,
3005 mutex_unlock(&kvm
->lock
);
3010 case KVM_GET_IRQCHIP
: {
3011 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3012 struct kvm_irqchip chip
;
3015 if (copy_from_user(&chip
, argp
, sizeof chip
))
3018 if (!irqchip_in_kernel(kvm
))
3020 r
= kvm_vm_ioctl_get_irqchip(kvm
, &chip
);
3024 if (copy_to_user(argp
, &chip
, sizeof chip
))
3029 case KVM_SET_IRQCHIP
: {
3030 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3031 struct kvm_irqchip chip
;
3034 if (copy_from_user(&chip
, argp
, sizeof chip
))
3037 if (!irqchip_in_kernel(kvm
))
3039 r
= kvm_vm_ioctl_set_irqchip(kvm
, &chip
);
3052 static struct page
*kvm_vm_nopage(struct vm_area_struct
*vma
,
3053 unsigned long address
,
3056 struct kvm
*kvm
= vma
->vm_file
->private_data
;
3057 unsigned long pgoff
;
3060 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
3061 page
= gfn_to_page(kvm
, pgoff
);
3062 if (is_error_page(page
)) {
3063 kvm_release_page(page
);
3064 return NOPAGE_SIGBUS
;
3067 *type
= VM_FAULT_MINOR
;
3072 static struct vm_operations_struct kvm_vm_vm_ops
= {
3073 .nopage
= kvm_vm_nopage
,
3076 static int kvm_vm_mmap(struct file
*file
, struct vm_area_struct
*vma
)
3078 vma
->vm_ops
= &kvm_vm_vm_ops
;
3082 static struct file_operations kvm_vm_fops
= {
3083 .release
= kvm_vm_release
,
3084 .unlocked_ioctl
= kvm_vm_ioctl
,
3085 .compat_ioctl
= kvm_vm_ioctl
,
3086 .mmap
= kvm_vm_mmap
,
3089 static int kvm_dev_ioctl_create_vm(void)
3092 struct inode
*inode
;
3096 kvm
= kvm_create_vm();
3098 return PTR_ERR(kvm
);
3099 r
= anon_inode_getfd(&fd
, &inode
, &file
, "kvm-vm", &kvm_vm_fops
, kvm
);
3101 kvm_destroy_vm(kvm
);
3110 static long kvm_dev_ioctl(struct file
*filp
,
3111 unsigned int ioctl
, unsigned long arg
)
3113 void __user
*argp
= (void __user
*)arg
;
3117 case KVM_GET_API_VERSION
:
3121 r
= KVM_API_VERSION
;
3127 r
= kvm_dev_ioctl_create_vm();
3129 case KVM_CHECK_EXTENSION
: {
3130 int ext
= (long)argp
;
3133 case KVM_CAP_IRQCHIP
:
3135 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL
:
3136 case KVM_CAP_USER_MEMORY
:
3145 case KVM_GET_VCPU_MMAP_SIZE
:
3152 return kvm_arch_dev_ioctl(filp
, ioctl
, arg
);
3158 static struct file_operations kvm_chardev_ops
= {
3159 .unlocked_ioctl
= kvm_dev_ioctl
,
3160 .compat_ioctl
= kvm_dev_ioctl
,
3163 static struct miscdevice kvm_dev
= {
3170 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
3173 static void decache_vcpus_on_cpu(int cpu
)
3176 struct kvm_vcpu
*vcpu
;
3179 spin_lock(&kvm_lock
);
3180 list_for_each_entry(vm
, &vm_list
, vm_list
)
3181 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
3182 vcpu
= vm
->vcpus
[i
];
3186 * If the vcpu is locked, then it is running on some
3187 * other cpu and therefore it is not cached on the
3190 * If it's not locked, check the last cpu it executed
3193 if (mutex_trylock(&vcpu
->mutex
)) {
3194 if (vcpu
->cpu
== cpu
) {
3195 kvm_x86_ops
->vcpu_decache(vcpu
);
3198 mutex_unlock(&vcpu
->mutex
);
3201 spin_unlock(&kvm_lock
);
3204 static void hardware_enable(void *junk
)
3206 int cpu
= raw_smp_processor_id();
3208 if (cpu_isset(cpu
, cpus_hardware_enabled
))
3210 cpu_set(cpu
, cpus_hardware_enabled
);
3211 kvm_x86_ops
->hardware_enable(NULL
);
3214 static void hardware_disable(void *junk
)
3216 int cpu
= raw_smp_processor_id();
3218 if (!cpu_isset(cpu
, cpus_hardware_enabled
))
3220 cpu_clear(cpu
, cpus_hardware_enabled
);
3221 decache_vcpus_on_cpu(cpu
);
3222 kvm_x86_ops
->hardware_disable(NULL
);
3225 static int kvm_cpu_hotplug(struct notifier_block
*notifier
, unsigned long val
,
3232 case CPU_DYING_FROZEN
:
3233 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
3235 hardware_disable(NULL
);
3237 case CPU_UP_CANCELED
:
3238 case CPU_UP_CANCELED_FROZEN
:
3239 printk(KERN_INFO
"kvm: disabling virtualization on CPU%d\n",
3241 smp_call_function_single(cpu
, hardware_disable
, NULL
, 0, 1);
3244 case CPU_ONLINE_FROZEN
:
3245 printk(KERN_INFO
"kvm: enabling virtualization on CPU%d\n",
3247 smp_call_function_single(cpu
, hardware_enable
, NULL
, 0, 1);
3253 static int kvm_reboot(struct notifier_block
*notifier
, unsigned long val
,
3256 if (val
== SYS_RESTART
) {
3258 * Some (well, at least mine) BIOSes hang on reboot if
3261 printk(KERN_INFO
"kvm: exiting hardware virtualization\n");
3262 on_each_cpu(hardware_disable
, NULL
, 0, 1);
3267 static struct notifier_block kvm_reboot_notifier
= {
3268 .notifier_call
= kvm_reboot
,
3272 void kvm_io_bus_init(struct kvm_io_bus
*bus
)
3274 memset(bus
, 0, sizeof(*bus
));
3277 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
)
3281 for (i
= 0; i
< bus
->dev_count
; i
++) {
3282 struct kvm_io_device
*pos
= bus
->devs
[i
];
3284 kvm_iodevice_destructor(pos
);
3288 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
)
3292 for (i
= 0; i
< bus
->dev_count
; i
++) {
3293 struct kvm_io_device
*pos
= bus
->devs
[i
];
3295 if (pos
->in_range(pos
, addr
))
3302 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
, struct kvm_io_device
*dev
)
3304 BUG_ON(bus
->dev_count
> (NR_IOBUS_DEVS
-1));
3306 bus
->devs
[bus
->dev_count
++] = dev
;
3309 static struct notifier_block kvm_cpu_notifier
= {
3310 .notifier_call
= kvm_cpu_hotplug
,
3311 .priority
= 20, /* must be > scheduler priority */
3314 static u64
stat_get(void *_offset
)
3316 unsigned offset
= (long)_offset
;
3319 struct kvm_vcpu
*vcpu
;
3322 spin_lock(&kvm_lock
);
3323 list_for_each_entry(kvm
, &vm_list
, vm_list
)
3324 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
3325 vcpu
= kvm
->vcpus
[i
];
3327 total
+= *(u32
*)((void *)vcpu
+ offset
);
3329 spin_unlock(&kvm_lock
);
3333 DEFINE_SIMPLE_ATTRIBUTE(stat_fops
, stat_get
, NULL
, "%llu\n");
3335 static __init
void kvm_init_debug(void)
3337 struct kvm_stats_debugfs_item
*p
;
3339 debugfs_dir
= debugfs_create_dir("kvm", NULL
);
3340 for (p
= debugfs_entries
; p
->name
; ++p
)
3341 p
->dentry
= debugfs_create_file(p
->name
, 0444, debugfs_dir
,
3342 (void *)(long)p
->offset
,
3346 static void kvm_exit_debug(void)
3348 struct kvm_stats_debugfs_item
*p
;
3350 for (p
= debugfs_entries
; p
->name
; ++p
)
3351 debugfs_remove(p
->dentry
);
3352 debugfs_remove(debugfs_dir
);
3355 static int kvm_suspend(struct sys_device
*dev
, pm_message_t state
)
3357 hardware_disable(NULL
);
3361 static int kvm_resume(struct sys_device
*dev
)
3363 hardware_enable(NULL
);
3367 static struct sysdev_class kvm_sysdev_class
= {
3369 .suspend
= kvm_suspend
,
3370 .resume
= kvm_resume
,
3373 static struct sys_device kvm_sysdev
= {
3375 .cls
= &kvm_sysdev_class
,
3378 struct page
*bad_page
;
3381 struct kvm_vcpu
*preempt_notifier_to_vcpu(struct preempt_notifier
*pn
)
3383 return container_of(pn
, struct kvm_vcpu
, preempt_notifier
);
3386 static void kvm_sched_in(struct preempt_notifier
*pn
, int cpu
)
3388 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
3390 kvm_x86_ops
->vcpu_load(vcpu
, cpu
);
3393 static void kvm_sched_out(struct preempt_notifier
*pn
,
3394 struct task_struct
*next
)
3396 struct kvm_vcpu
*vcpu
= preempt_notifier_to_vcpu(pn
);
3398 kvm_x86_ops
->vcpu_put(vcpu
);
3401 int kvm_init_x86(struct kvm_x86_ops
*ops
, unsigned int vcpu_size
,
3402 struct module
*module
)
3408 printk(KERN_ERR
"kvm: already loaded the other module\n");
3412 if (!ops
->cpu_has_kvm_support()) {
3413 printk(KERN_ERR
"kvm: no hardware support\n");
3416 if (ops
->disabled_by_bios()) {
3417 printk(KERN_ERR
"kvm: disabled by bios\n");
3423 r
= kvm_x86_ops
->hardware_setup();
3427 for_each_online_cpu(cpu
) {
3428 smp_call_function_single(cpu
,
3429 kvm_x86_ops
->check_processor_compatibility
,
3435 on_each_cpu(hardware_enable
, NULL
, 0, 1);
3436 r
= register_cpu_notifier(&kvm_cpu_notifier
);
3439 register_reboot_notifier(&kvm_reboot_notifier
);
3441 r
= sysdev_class_register(&kvm_sysdev_class
);
3445 r
= sysdev_register(&kvm_sysdev
);
3449 /* A kmem cache lets us meet the alignment requirements of fx_save. */
3450 kvm_vcpu_cache
= kmem_cache_create("kvm_vcpu", vcpu_size
,
3451 __alignof__(struct kvm_vcpu
), 0, 0);
3452 if (!kvm_vcpu_cache
) {
3457 kvm_chardev_ops
.owner
= module
;
3459 r
= misc_register(&kvm_dev
);
3461 printk(KERN_ERR
"kvm: misc device register failed\n");
3465 kvm_preempt_ops
.sched_in
= kvm_sched_in
;
3466 kvm_preempt_ops
.sched_out
= kvm_sched_out
;
3468 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
3473 kmem_cache_destroy(kvm_vcpu_cache
);
3475 sysdev_unregister(&kvm_sysdev
);
3477 sysdev_class_unregister(&kvm_sysdev_class
);
3479 unregister_reboot_notifier(&kvm_reboot_notifier
);
3480 unregister_cpu_notifier(&kvm_cpu_notifier
);
3482 on_each_cpu(hardware_disable
, NULL
, 0, 1);
3484 kvm_x86_ops
->hardware_unsetup();
3489 EXPORT_SYMBOL_GPL(kvm_init_x86
);
3491 void kvm_exit_x86(void)
3493 misc_deregister(&kvm_dev
);
3494 kmem_cache_destroy(kvm_vcpu_cache
);
3495 sysdev_unregister(&kvm_sysdev
);
3496 sysdev_class_unregister(&kvm_sysdev_class
);
3497 unregister_reboot_notifier(&kvm_reboot_notifier
);
3498 unregister_cpu_notifier(&kvm_cpu_notifier
);
3499 on_each_cpu(hardware_disable
, NULL
, 0, 1);
3500 kvm_x86_ops
->hardware_unsetup();
3503 EXPORT_SYMBOL_GPL(kvm_exit_x86
);
3505 static __init
int kvm_init(void)
3509 r
= kvm_mmu_module_init();
3517 bad_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
3519 if (bad_page
== NULL
) {
3528 kvm_mmu_module_exit();
3533 static __exit
void kvm_exit(void)
3536 __free_page(bad_page
);
3537 kvm_mmu_module_exit();
3540 module_init(kvm_init
)
3541 module_exit(kvm_exit
)