KVM: VMX: Add instruction rdtscp support for guest
[deliverable/linux.git] / arch / x86 / kvm / x86.c
index 84dd33e717fd01976927142f7653e4df501e6f36..8798504ace110c8bc5c766bbfb65e8031b9f7849 100644 (file)
@@ -93,16 +93,16 @@ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
 
 struct kvm_shared_msrs_global {
        int nr;
-       struct kvm_shared_msr {
-               u32 msr;
-               u64 value;
-       } msrs[KVM_NR_SHARED_MSRS];
+       u32 msrs[KVM_NR_SHARED_MSRS];
 };
 
 struct kvm_shared_msrs {
        struct user_return_notifier urn;
        bool registered;
-       u64 current_value[KVM_NR_SHARED_MSRS];
+       struct kvm_shared_msr_values {
+               u64 host;
+               u64 curr;
+       } values[KVM_NR_SHARED_MSRS];
 };
 
 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
@@ -147,53 +147,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 static void kvm_on_user_return(struct user_return_notifier *urn)
 {
        unsigned slot;
-       struct kvm_shared_msr *global;
        struct kvm_shared_msrs *locals
                = container_of(urn, struct kvm_shared_msrs, urn);
+       struct kvm_shared_msr_values *values;
 
        for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
-               global = &shared_msrs_global.msrs[slot];
-               if (global->value != locals->current_value[slot]) {
-                       wrmsrl(global->msr, global->value);
-                       locals->current_value[slot] = global->value;
+               values = &locals->values[slot];
+               if (values->host != values->curr) {
+                       wrmsrl(shared_msrs_global.msrs[slot], values->host);
+                       values->curr = values->host;
                }
        }
        locals->registered = false;
        user_return_notifier_unregister(urn);
 }
 
-void kvm_define_shared_msr(unsigned slot, u32 msr)
+static void shared_msr_update(unsigned slot, u32 msr)
 {
-       int cpu;
+       struct kvm_shared_msrs *smsr;
        u64 value;
 
+       smsr = &__get_cpu_var(shared_msrs);
+       /* only read, and nobody should modify it at this time,
+        * so don't need lock */
+       if (slot >= shared_msrs_global.nr) {
+               printk(KERN_ERR "kvm: invalid MSR slot!");
+               return;
+       }
+       rdmsrl_safe(msr, &value);
+       smsr->values[slot].host = value;
+       smsr->values[slot].curr = value;
+}
+
+void kvm_define_shared_msr(unsigned slot, u32 msr)
+{
        if (slot >= shared_msrs_global.nr)
                shared_msrs_global.nr = slot + 1;
-       shared_msrs_global.msrs[slot].msr = msr;
-       rdmsrl_safe(msr, &value);
-       shared_msrs_global.msrs[slot].value = value;
-       for_each_online_cpu(cpu)
-               per_cpu(shared_msrs, cpu).current_value[slot] = value;
+       shared_msrs_global.msrs[slot] = msr;
+       /* we need ensured the shared_msr_global have been updated */
+       smp_wmb();
 }
 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
 
 static void kvm_shared_msr_cpu_online(void)
 {
        unsigned i;
-       struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);
 
        for (i = 0; i < shared_msrs_global.nr; ++i)
-               locals->current_value[i] = shared_msrs_global.msrs[i].value;
+               shared_msr_update(i, shared_msrs_global.msrs[i]);
 }
 
 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
 {
        struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
 
-       if (((value ^ smsr->current_value[slot]) & mask) == 0)
+       if (((value ^ smsr->values[slot].curr) & mask) == 0)
                return;
-       smsr->current_value[slot] = value;
-       wrmsrl(shared_msrs_global.msrs[slot].msr, value);
+       smsr->values[slot].curr = value;
+       wrmsrl(shared_msrs_global.msrs[slot], value);
        if (!smsr->registered) {
                smsr->urn.on_user_return = kvm_on_user_return;
                user_return_notifier_register(&smsr->urn);
@@ -1563,6 +1574,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
        cpuid_fix_nx_cap(vcpu);
        r = 0;
        kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
 
 out_free:
        vfree(cpuid_entries);
@@ -1585,6 +1597,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
                goto out;
        vcpu->arch.cpuid_nent = cpuid->nent;
        kvm_apic_set_version(vcpu);
+       kvm_x86_ops->cpuid_update(vcpu);
        return 0;
 
 out:
@@ -1633,6 +1646,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 #else
        unsigned f_lm = 0;
 #endif
+       unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
 
        /* cpuid 1.edx */
        const u32 kvm_supported_word0_x86_features =
@@ -1652,7 +1666,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
                F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
                F(PAT) | F(PSE36) | 0 /* Reserved */ |
                f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
-               F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
+               F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
                0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
        /* cpuid 1.ecx */
        const u32 kvm_supported_word4_x86_features =
@@ -3722,6 +3736,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
        }
        return best;
 }
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
 
 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
 {
This page took 0.026339 seconds and 5 git commands to generate.