5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/preempt.h>
18 #include <asm/signal.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
23 #define KVM_MAX_VCPUS 4
24 #define KVM_ALIAS_SLOTS 4
25 #define KVM_MEMORY_SLOTS 8
26 /* memory slots that does not exposed to userspace */
27 #define KVM_PRIVATE_MEM_SLOTS 4
28 #define KVM_PERMILLE_MMU_PAGES 20
29 #define KVM_MIN_ALLOC_MMU_PAGES 64
30 #define KVM_NUM_MMU_PAGES 1024
31 #define KVM_MIN_FREE_MMU_PAGES 5
32 #define KVM_REFILL_PAGES 25
33 #define KVM_MAX_CPUID_ENTRIES 40
35 #define KVM_PIO_PAGE_OFFSET 1
38 * vcpu->requests bit members
40 #define KVM_REQ_TLB_FLUSH 0
45 * gva - guest virtual address
46 * gpa - guest physical address
47 * gfn - guest frame number
48 * hva - host virtual address
49 * hpa - host physical address
50 * hfn - host frame number
53 typedef unsigned long gva_t
;
55 typedef unsigned long gfn_t
;
57 typedef unsigned long hva_t
;
59 typedef unsigned long hfn_t
;
61 #define NR_PTE_CHAIN_ENTRIES 5
63 struct kvm_pte_chain
{
64 u64
*parent_ptes
[NR_PTE_CHAIN_ENTRIES
];
65 struct hlist_node link
;
69 * kvm_mmu_page_role, below, is defined as:
71 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
72 * bits 4:7 - page table level for this shadow (1-4)
73 * bits 8:9 - page table quadrant for 2-level guests
74 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
75 * bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
77 union kvm_mmu_page_role
{
82 unsigned quadrant
: 2;
83 unsigned pad_for_nice_hex_output
: 6;
84 unsigned metaphysical
: 1;
85 unsigned hugepage_access
: 3;
90 struct list_head link
;
91 struct hlist_node hash_link
;
94 * The following two entries are used to key the shadow page in the
98 union kvm_mmu_page_role role
;
101 /* hold the gfn of each spte inside spt */
103 unsigned long slot_bitmap
; /* One bit set per slot which has memory
104 * in this shadow page.
106 int multimapped
; /* More than one parent_pte? */
107 int root_count
; /* Currently serving as active root */
109 u64
*parent_pte
; /* !multimapped */
110 struct hlist_head parent_ptes
; /* multimapped, kvm_pte_chain */
115 extern struct kmem_cache
*kvm_vcpu_cache
;
118 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
119 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
123 void (*new_cr3
)(struct kvm_vcpu
*vcpu
);
124 int (*page_fault
)(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 err
);
125 void (*free
)(struct kvm_vcpu
*vcpu
);
126 gpa_t (*gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t gva
);
127 void (*prefetch_page
)(struct kvm_vcpu
*vcpu
,
128 struct kvm_mmu_page
*page
);
131 int shadow_root_level
;
136 #define KVM_NR_MEM_OBJS 40
139 * We don't want allocation failures within the mmu code, so we preallocate
140 * enough memory for a single page fault in a cache.
142 struct kvm_mmu_memory_cache
{
144 void *objects
[KVM_NR_MEM_OBJS
];
147 struct kvm_guest_debug
{
153 struct kvm_pio_request
{
156 struct page
*guest_pages
[2];
157 unsigned guest_page_offset
;
166 struct kvm_vcpu_stat
{
176 u32 irq_window_exits
;
179 u32 request_irq_exits
;
181 u32 host_state_reload
;
185 u32 insn_emulation_fail
;
188 struct kvm_io_device
{
189 void (*read
)(struct kvm_io_device
*this,
193 void (*write
)(struct kvm_io_device
*this,
197 int (*in_range
)(struct kvm_io_device
*this, gpa_t addr
);
198 void (*destructor
)(struct kvm_io_device
*this);
203 static inline void kvm_iodevice_read(struct kvm_io_device
*dev
,
208 dev
->read(dev
, addr
, len
, val
);
211 static inline void kvm_iodevice_write(struct kvm_io_device
*dev
,
216 dev
->write(dev
, addr
, len
, val
);
219 static inline int kvm_iodevice_inrange(struct kvm_io_device
*dev
, gpa_t addr
)
221 return dev
->in_range(dev
, addr
);
224 static inline void kvm_iodevice_destructor(struct kvm_io_device
*dev
)
227 dev
->destructor(dev
);
231 * It would be nice to use something smarter than a linear search, TBD...
232 * Thankfully we dont expect many devices to register (famous last words :),
233 * so until then it will suffice. At least its abstracted so we can change
238 #define NR_IOBUS_DEVS 6
239 struct kvm_io_device
*devs
[NR_IOBUS_DEVS
];
242 void kvm_io_bus_init(struct kvm_io_bus
*bus
);
243 void kvm_io_bus_destroy(struct kvm_io_bus
*bus
);
244 struct kvm_io_device
*kvm_io_bus_find_dev(struct kvm_io_bus
*bus
, gpa_t addr
);
245 void kvm_io_bus_register_dev(struct kvm_io_bus
*bus
,
246 struct kvm_io_device
*dev
);
248 #ifdef CONFIG_HAS_IOMEM
249 #define KVM_VCPU_MMIO \
251 int mmio_read_completed; \
254 unsigned char mmio_data[8]; \
255 gpa_t mmio_phys_addr;
258 #define KVM_VCPU_MMIO
262 #define KVM_VCPU_COMM \
264 struct preempt_notifier preempt_notifier; \
266 struct mutex mutex; \
268 struct kvm_run *run; \
270 unsigned long requests; \
271 struct kvm_guest_debug guest_debug; \
273 int guest_fpu_loaded; \
274 wait_queue_head_t wq; \
277 struct kvm_vcpu_stat stat; \
280 struct kvm_mem_alias
{
282 unsigned long npages
;
286 struct kvm_memory_slot
{
288 unsigned long npages
;
291 unsigned long *dirty_bitmap
;
292 unsigned long userspace_addr
;
297 u32 mmu_shadow_zapped
;
303 u32 remote_tlb_flush
;
307 struct mutex lock
; /* protects everything except vcpus */
309 struct kvm_mem_alias aliases
[KVM_ALIAS_SLOTS
];
311 struct kvm_memory_slot memslots
[KVM_MEMORY_SLOTS
+
312 KVM_PRIVATE_MEM_SLOTS
];
314 * Hash table of struct kvm_mmu_page.
316 struct list_head active_mmu_pages
;
317 unsigned int n_free_mmu_pages
;
318 unsigned int n_requested_mmu_pages
;
319 unsigned int n_alloc_mmu_pages
;
320 struct hlist_head mmu_page_hash
[KVM_NUM_MMU_PAGES
];
321 struct kvm_vcpu
*vcpus
[KVM_MAX_VCPUS
];
322 struct list_head vm_list
;
324 struct kvm_io_bus mmio_bus
;
325 struct kvm_io_bus pio_bus
;
326 struct kvm_pic
*vpic
;
327 struct kvm_ioapic
*vioapic
;
328 int round_robin_prev_vcpu
;
329 unsigned int tss_addr
;
330 struct page
*apic_access_page
;
331 struct kvm_vm_stat stat
;
334 static inline struct kvm_pic
*pic_irqchip(struct kvm
*kvm
)
339 static inline struct kvm_ioapic
*ioapic_irqchip(struct kvm
*kvm
)
344 static inline int irqchip_in_kernel(struct kvm
*kvm
)
346 return pic_irqchip(kvm
) != NULL
;
349 struct descriptor_table
{
352 } __attribute__((packed
));
354 /* The guest did something we don't support. */
355 #define pr_unimpl(vcpu, fmt, ...) \
357 if (printk_ratelimit()) \
358 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
359 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
362 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
363 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
365 int kvm_vcpu_init(struct kvm_vcpu
*vcpu
, struct kvm
*kvm
, unsigned id
);
366 void kvm_vcpu_uninit(struct kvm_vcpu
*vcpu
);
368 void vcpu_load(struct kvm_vcpu
*vcpu
);
369 void vcpu_put(struct kvm_vcpu
*vcpu
);
371 void decache_vcpus_on_cpu(int cpu
);
374 int kvm_init(void *opaque
, unsigned int vcpu_size
,
375 struct module
*module
);
378 hpa_t
gpa_to_hpa(struct kvm
*kvm
, gpa_t gpa
);
379 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
380 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
381 static inline int is_error_hpa(hpa_t hpa
) { return hpa
>> HPA_MSB
; }
382 hpa_t
gva_to_hpa(struct kvm_vcpu
*vcpu
, gva_t gva
);
383 struct page
*gva_to_page(struct kvm_vcpu
*vcpu
, gva_t gva
);
385 extern struct page
*bad_page
;
387 int is_error_page(struct page
*page
);
388 int kvm_is_error_hva(unsigned long addr
);
389 int kvm_set_memory_region(struct kvm
*kvm
,
390 struct kvm_userspace_memory_region
*mem
,
392 int __kvm_set_memory_region(struct kvm
*kvm
,
393 struct kvm_userspace_memory_region
*mem
,
395 int kvm_arch_set_memory_region(struct kvm
*kvm
,
396 struct kvm_userspace_memory_region
*mem
,
397 struct kvm_memory_slot old
,
399 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
);
400 struct page
*gfn_to_page(struct kvm
*kvm
, gfn_t gfn
);
401 void kvm_release_page_clean(struct page
*page
);
402 void kvm_release_page_dirty(struct page
*page
);
403 int kvm_read_guest_page(struct kvm
*kvm
, gfn_t gfn
, void *data
, int offset
,
405 int kvm_read_guest(struct kvm
*kvm
, gpa_t gpa
, void *data
, unsigned long len
);
406 int kvm_write_guest_page(struct kvm
*kvm
, gfn_t gfn
, const void *data
,
407 int offset
, int len
);
408 int kvm_write_guest(struct kvm
*kvm
, gpa_t gpa
, const void *data
,
410 int kvm_clear_guest_page(struct kvm
*kvm
, gfn_t gfn
, int offset
, int len
);
411 int kvm_clear_guest(struct kvm
*kvm
, gpa_t gpa
, unsigned long len
);
412 struct kvm_memory_slot
*gfn_to_memslot(struct kvm
*kvm
, gfn_t gfn
);
413 int kvm_is_visible_gfn(struct kvm
*kvm
, gfn_t gfn
);
414 void mark_page_dirty(struct kvm
*kvm
, gfn_t gfn
);
416 void kvm_vcpu_block(struct kvm_vcpu
*vcpu
);
417 void kvm_resched(struct kvm_vcpu
*vcpu
);
418 void kvm_load_guest_fpu(struct kvm_vcpu
*vcpu
);
419 void kvm_put_guest_fpu(struct kvm_vcpu
*vcpu
);
420 void kvm_flush_remote_tlbs(struct kvm
*kvm
);
422 long kvm_arch_dev_ioctl(struct file
*filp
,
423 unsigned int ioctl
, unsigned long arg
);
424 long kvm_arch_vcpu_ioctl(struct file
*filp
,
425 unsigned int ioctl
, unsigned long arg
);
426 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
427 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
);
429 int kvm_dev_ioctl_check_extension(long ext
);
431 int kvm_get_dirty_log(struct kvm
*kvm
,
432 struct kvm_dirty_log
*log
, int *is_dirty
);
433 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
434 struct kvm_dirty_log
*log
);
436 int kvm_vm_ioctl_set_memory_region(struct kvm
*kvm
,
438 kvm_userspace_memory_region
*mem
,
440 long kvm_arch_vm_ioctl(struct file
*filp
,
441 unsigned int ioctl
, unsigned long arg
);
442 void kvm_arch_destroy_vm(struct kvm
*kvm
);
444 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
445 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
);
447 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
448 struct kvm_translation
*tr
);
450 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
451 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
);
452 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
453 struct kvm_sregs
*sregs
);
454 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
455 struct kvm_sregs
*sregs
);
456 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu
*vcpu
,
457 struct kvm_debug_guest
*dbg
);
458 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
);
460 int kvm_arch_init(void *opaque
);
461 void kvm_arch_exit(void);
463 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
);
464 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
);
466 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
);
467 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
468 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
);
469 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
);
470 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
);
471 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
);
473 int kvm_arch_vcpu_reset(struct kvm_vcpu
*vcpu
);
474 void kvm_arch_hardware_enable(void *garbage
);
475 void kvm_arch_hardware_disable(void *garbage
);
476 int kvm_arch_hardware_setup(void);
477 void kvm_arch_hardware_unsetup(void);
478 void kvm_arch_check_processor_compat(void *rtn
);
480 void kvm_free_physmem(struct kvm
*kvm
);
482 struct kvm
*kvm_arch_create_vm(void);
483 void kvm_arch_destroy_vm(struct kvm
*kvm
);
485 static inline void kvm_guest_enter(void)
487 account_system_vtime(current
);
488 current
->flags
|= PF_VCPU
;
491 static inline void kvm_guest_exit(void)
493 account_system_vtime(current
);
494 current
->flags
&= ~PF_VCPU
;
497 static inline int memslot_id(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
499 return slot
- kvm
->memslots
;
508 struct kvm_stats_debugfs_item
{
511 enum kvm_stat_kind kind
;
512 struct dentry
*dentry
;
514 extern struct kvm_stats_debugfs_item debugfs_entries
[];