KVM: Portability: Split kvm_vcpu into arch dependent and independent parts (part 1)
authorZhang Xiantao <xiantao.zhang@intel.com>
Sat, 20 Oct 2007 07:34:38 +0000 (15:34 +0800)
committerAvi Kivity <avi@qumranet.com>
Wed, 30 Jan 2008 15:52:54 +0000 (17:52 +0200)
First step to split kvm_vcpu.  Currently, we just use an macro to define
the common fields in kvm_vcpu for all archs, and all archs need to define
its own kvm_vcpu struct.

Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/ioapic.c
drivers/kvm/irq.c
drivers/kvm/kvm.h
drivers/kvm/kvm_main.c
drivers/kvm/lapic.c
drivers/kvm/mmu.c
drivers/kvm/svm.c
drivers/kvm/vmx.c
drivers/kvm/x86.h
drivers/kvm/x86_emulate.c

index 8503d99b8339a29cdbeaced8a106881df746a5c5..e14b7c724e6727b7d62b34c5d53dd1d5a31747e5 100644 (file)
@@ -27,6 +27,8 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
+
 #include <linux/kvm.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
index 7628c7ff628ff1db90f3a9a4d20df792bb59f4ad..59b47c55fc76b50f861d82b77df850dd22972b52 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 
 #include "kvm.h"
+#include "x86.h"
 #include "irq.h"
 
 /*
index eb006ed696c16015c916f88404129b860394a2fb..db18d278c1c06056a90622fac261676da6976803 100644 (file)
@@ -308,93 +308,37 @@ struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
                             struct kvm_io_device *dev);
 
-struct kvm_vcpu {
-       struct kvm *kvm;
-       struct preempt_notifier preempt_notifier;
-       int vcpu_id;
-       struct mutex mutex;
-       int   cpu;
-       u64 host_tsc;
-       struct kvm_run *run;
-       int interrupt_window_open;
-       int guest_mode;
-       unsigned long requests;
-       unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
-       DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
-       unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
-       unsigned long rip;      /* needs vcpu_load_rsp_rip() */
-
-       unsigned long cr0;
-       unsigned long cr2;
-       unsigned long cr3;
-       unsigned long cr4;
-       unsigned long cr8;
-       u64 pdptrs[4]; /* pae */
-       u64 shadow_efer;
-       u64 apic_base;
-       struct kvm_lapic *apic;    /* kernel irqchip context */
-#define VCPU_MP_STATE_RUNNABLE          0
-#define VCPU_MP_STATE_UNINITIALIZED     1
-#define VCPU_MP_STATE_INIT_RECEIVED     2
-#define VCPU_MP_STATE_SIPI_RECEIVED     3
-#define VCPU_MP_STATE_HALTED            4
-       int mp_state;
-       int sipi_vector;
-       u64 ia32_misc_enable_msr;
-
-       struct kvm_mmu mmu;
-
-       struct kvm_mmu_memory_cache mmu_pte_chain_cache;
-       struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
-       struct kvm_mmu_memory_cache mmu_page_cache;
-       struct kvm_mmu_memory_cache mmu_page_header_cache;
-
-       gfn_t last_pt_write_gfn;
-       int   last_pt_write_count;
-       u64  *last_pte_updated;
-
-       struct kvm_guest_debug guest_debug;
-
-       struct i387_fxsave_struct host_fx_image;
-       struct i387_fxsave_struct guest_fx_image;
-       int fpu_active;
-       int guest_fpu_loaded;
-
-       int mmio_needed;
-       int mmio_read_completed;
-       int mmio_is_write;
-       int mmio_size;
-       unsigned char mmio_data[8];
+#ifdef CONFIG_HAS_IOMEM
+#define KVM_VCPU_MMIO                  \
+       int mmio_needed;                \
+       int mmio_read_completed;        \
+       int mmio_is_write;              \
+       int mmio_size;                  \
+       unsigned char mmio_data[8];     \
        gpa_t mmio_phys_addr;
-       gva_t mmio_fault_cr2;
-       struct kvm_pio_request pio;
-       void *pio_data;
-       wait_queue_head_t wq;
 
-       int sigset_active;
-       sigset_t sigset;
+#else
+#define KVM_VCPU_MMIO
 
-       struct kvm_stat stat;
+#endif
 
-       struct {
-               int active;
-               u8 save_iopl;
-               struct kvm_save_segment {
-                       u16 selector;
-                       unsigned long base;
-                       u32 limit;
-                       u32 ar;
-               } tr, es, ds, fs, gs;
-       } rmode;
-       int halt_request; /* real mode on Intel only */
-
-       int cpuid_nent;
-       struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
-
-       /* emulate context */
-
-       struct x86_emulate_ctxt emulate_ctxt;
-};
+#define KVM_VCPU_COMM                                  \
+       struct kvm *kvm;                                \
+       struct preempt_notifier preempt_notifier;       \
+       int vcpu_id;                                    \
+       struct mutex mutex;                             \
+       int   cpu;                                      \
+       struct kvm_run *run;                            \
+       int guest_mode;                                 \
+       unsigned long requests;                         \
+       struct kvm_guest_debug guest_debug;             \
+       int fpu_active;                                 \
+       int guest_fpu_loaded;                           \
+       wait_queue_head_t wq;                           \
+       int sigset_active;                              \
+       sigset_t sigset;                                \
+       struct kvm_stat stat;                           \
+       KVM_VCPU_MMIO
 
 struct kvm_mem_alias {
        gfn_t base_gfn;
@@ -680,50 +624,6 @@ static inline void kvm_guest_exit(void)
        current->flags &= ~PF_VCPU;
 }
 
-static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-                                    u32 error_code)
-{
-       return vcpu->mmu.page_fault(vcpu, gva, error_code);
-}
-
-static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
-{
-       if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
-               __kvm_mmu_free_some_pages(vcpu);
-}
-
-static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
-{
-       if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
-               return 0;
-
-       return kvm_mmu_load(vcpu);
-}
-
-static inline int is_long_mode(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-       return vcpu->shadow_efer & EFER_LME;
-#else
-       return 0;
-#endif
-}
-
-static inline int is_pae(struct kvm_vcpu *vcpu)
-{
-       return vcpu->cr4 & X86_CR4_PAE;
-}
-
-static inline int is_pse(struct kvm_vcpu *vcpu)
-{
-       return vcpu->cr4 & X86_CR4_PSE;
-}
-
-static inline int is_paging(struct kvm_vcpu *vcpu)
-{
-       return vcpu->cr0 & X86_CR0_PG;
-}
-
 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
 {
        return slot - kvm->memslots;
index 8f6c21d026567e3b96756c5b284d56ac33f98d3f..0b23657f434cd9da6b68efef864eb8118363efbb 100644 (file)
@@ -2244,7 +2244,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                if (r)
                        goto out;
        }
-
+#if CONFIG_HAS_IOMEM
        if (vcpu->mmio_needed) {
                memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
                vcpu->mmio_read_completed = 1;
@@ -2259,7 +2259,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        goto out;
                }
        }
-
+#endif
        if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
                kvm_x86_ops->cache_regs(vcpu);
                vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
index 8840f9dc0bca13b6ff9d713075d977598c093376..64f74bd7093af9cc3592d39847eea1deeb051f94 100644 (file)
@@ -18,6 +18,8 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
+
 #include <linux/kvm.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
index 5d7af4bde595b2d2cebf87ae961b2d3319274634..d9c5950cfae1ed812a4b98a29aaae698d661d376 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "vmx.h"
 #include "kvm.h"
+#include "x86.h"
 
 #include <linux/types.h>
 #include <linux/string.h>
index ef068d2dddd75f68da42f2f15f07c1f5c3388777..035c8e6898f14e12392ea74f57ccd60b7ce6d18d 100644 (file)
@@ -13,7 +13,7 @@
  * the COPYING file in the top-level directory.
  *
  */
-
+#include "x86.h"
 #include "kvm_svm.h"
 #include "x86_emulate.h"
 #include "irq.h"
index 9f77ddbeb025a8482c6b192fec81854242db4550..87ff351288254b2860486faa58c6e5b57cd05497 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
 #include "x86_emulate.h"
 #include "irq.h"
 #include "vmx.h"
index 1e2f71bd805d60e609e52f5459ea6a18e48616fc..01452b552db317b8b1e84b0c322a36a13a085450 100644 (file)
 
 #include "kvm.h"
 
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+struct kvm_vcpu {
+       KVM_VCPU_COMM;
+       u64 host_tsc;
+       int interrupt_window_open;
+       unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
+       DECLARE_BITMAP(irq_pending, KVM_NR_INTERRUPTS);
+       unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */
+       unsigned long rip;      /* needs vcpu_load_rsp_rip() */
+
+       unsigned long cr0;
+       unsigned long cr2;
+       unsigned long cr3;
+       unsigned long cr4;
+       unsigned long cr8;
+       u64 pdptrs[4]; /* pae */
+       u64 shadow_efer;
+       u64 apic_base;
+       struct kvm_lapic *apic;    /* kernel irqchip context */
+#define VCPU_MP_STATE_RUNNABLE          0
+#define VCPU_MP_STATE_UNINITIALIZED     1
+#define VCPU_MP_STATE_INIT_RECEIVED     2
+#define VCPU_MP_STATE_SIPI_RECEIVED     3
+#define VCPU_MP_STATE_HALTED            4
+       int mp_state;
+       int sipi_vector;
+       u64 ia32_misc_enable_msr;
+
+       struct kvm_mmu mmu;
+
+       struct kvm_mmu_memory_cache mmu_pte_chain_cache;
+       struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
+       struct kvm_mmu_memory_cache mmu_page_cache;
+       struct kvm_mmu_memory_cache mmu_page_header_cache;
+
+       gfn_t last_pt_write_gfn;
+       int   last_pt_write_count;
+       u64  *last_pte_updated;
+
+
+       struct i387_fxsave_struct host_fx_image;
+       struct i387_fxsave_struct guest_fx_image;
+
+       gva_t mmio_fault_cr2;
+       struct kvm_pio_request pio;
+       void *pio_data;
+
+       struct {
+               int active;
+               u8 save_iopl;
+               struct kvm_save_segment {
+                       u16 selector;
+                       unsigned long base;
+                       u32 limit;
+                       u32 ar;
+               } tr, es, ds, fs, gs;
+       } rmode;
+       int halt_request; /* real mode on Intel only */
+
+       int cpuid_nent;
+       struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+
+       /* emulate context */
+
+       struct x86_emulate_ctxt emulate_ctxt;
+};
+
+static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
+                                    u32 error_code)
+{
+       return vcpu->mmu.page_fault(vcpu, gva, error_code);
+}
+
+static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
+{
+       if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+               __kvm_mmu_free_some_pages(vcpu);
+}
+
+static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
+{
+       if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
+               return 0;
+
+       return kvm_mmu_load(vcpu);
+}
+
+static inline int is_long_mode(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_X86_64
+       return vcpu->shadow_efer & EFER_LME;
+#else
+       return 0;
+#endif
+}
+
+static inline int is_pae(struct kvm_vcpu *vcpu)
+{
+       return vcpu->cr4 & X86_CR4_PAE;
+}
+
+static inline int is_pse(struct kvm_vcpu *vcpu)
+{
+       return vcpu->cr4 & X86_CR4_PSE;
+}
+
+static inline int is_paging(struct kvm_vcpu *vcpu)
+{
+       return vcpu->cr0 & X86_CR0_PG;
+}
+
+
 #endif
index e962de3316061d3cf9686966eb72d4032b00cce0..73e3580c88e4e2ed4b00be93241cabda6e548fad 100644 (file)
@@ -26,6 +26,7 @@
 #define DPRINTF(_f, _a ...) printf(_f , ## _a)
 #else
 #include "kvm.h"
+#include "x86.h"
 #define DPRINTF(x...) do {} while (0)
 #endif
 #include "x86_emulate.h"
This page took 0.036632 seconds and 5 git commands to generate.