KVM: trace kvm_halt_poll_ns grow/shrink
[deliverable/linux.git] / virt / kvm / kvm_main.c
index 848af90b8091a9a3bd8ed7e18d6ace2692210c36..4662a8877f6c7a06b81803d417e52120a009a15b 100644 (file)
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
-static unsigned int halt_poll_ns;
+/* halt polling only reduces halt latency by 5-7 us, 500us is enough */
+static unsigned int halt_poll_ns = 500000;
 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
 
+/* Default doubles per-vcpu halt_poll_ns. */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+
+/* Default resets per-vcpu halt_poll_ns . */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+
 /*
  * Ordering of locks:
  *
@@ -217,6 +226,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
        vcpu->kvm = kvm;
        vcpu->vcpu_id = id;
        vcpu->pid = NULL;
+       vcpu->halt_poll_ns = 0;
        init_waitqueue_head(&vcpu->wq);
        kvm_async_pf_vcpu_init(vcpu);
 
@@ -553,6 +563,8 @@ static struct kvm *kvm_create_vm(unsigned long type)
        list_add(&kvm->vm_list, &vm_list);
        spin_unlock(&kvm_lock);
 
+       preempt_notifier_inc();
+
        return kvm;
 
 out_err:
@@ -620,6 +632,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        cleanup_srcu_struct(&kvm->irq_srcu);
        cleanup_srcu_struct(&kvm->srcu);
        kvm_arch_free_vm(kvm);
+       preempt_notifier_dec();
        hardware_disable_all();
        mmdrop(mm);
 }
@@ -1903,6 +1916,35 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
 
+static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+       int old, val;
+
+       old = val = vcpu->halt_poll_ns;
+       /* 10us base */
+       if (val == 0 && halt_poll_ns_grow)
+               val = 10000;
+       else
+               val *= halt_poll_ns_grow;
+
+       vcpu->halt_poll_ns = val;
+       trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
+}
+
+static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+       int old, val;
+
+       old = val = vcpu->halt_poll_ns;
+       if (halt_poll_ns_shrink == 0)
+               val = 0;
+       else
+               val /= halt_poll_ns_shrink;
+
+       vcpu->halt_poll_ns = val;
+       trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
+}
+
 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
 {
        if (kvm_arch_vcpu_runnable(vcpu)) {
@@ -1925,10 +1967,11 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        ktime_t start, cur;
        DEFINE_WAIT(wait);
        bool waited = false;
+       u64 block_ns;
 
        start = cur = ktime_get();
-       if (halt_poll_ns) {
-               ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+       if (vcpu->halt_poll_ns) {
+               ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
 
                do {
                        /*
@@ -1957,7 +2000,21 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        cur = ktime_get();
 
 out:
-       trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
+       block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+       if (halt_poll_ns) {
+               if (block_ns <= vcpu->halt_poll_ns)
+                       ;
+               /* we had a long block, shrink polling */
+               else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+                       shrink_halt_poll_ns(vcpu);
+               /* we had a short halt and our poll time is too small */
+               else if (vcpu->halt_poll_ns < halt_poll_ns &&
+                       block_ns < halt_poll_ns)
+                       grow_halt_poll_ns(vcpu);
+       }
+
+       trace_kvm_vcpu_wakeup(block_ns, waited);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_block);
 
@@ -2203,6 +2260,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        }
 
        kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+
+       /*
+        * Pairs with smp_rmb() in kvm_get_vcpu.  Write kvm->vcpus
+        * before kvm->online_vcpu's incremented value.
+        */
        smp_wmb();
        atomic_inc(&kvm->online_vcpus);
 
@@ -2615,9 +2677,6 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
        case KVM_CAP_USER_MEMORY:
        case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
        case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-       case KVM_CAP_SET_BOOT_CPU_ID:
-#endif
        case KVM_CAP_INTERNAL_ERROR_DATA:
 #ifdef CONFIG_HAVE_KVM_MSI
        case KVM_CAP_SIGNAL_MSI:
@@ -2713,17 +2772,6 @@ static long kvm_vm_ioctl(struct file *filp,
                r = kvm_ioeventfd(kvm, &data);
                break;
        }
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-       case KVM_SET_BOOT_CPU_ID:
-               r = 0;
-               mutex_lock(&kvm->lock);
-               if (atomic_read(&kvm->online_vcpus) != 0)
-                       r = -EBUSY;
-               else
-                       kvm->bsp_vcpu_id = arg;
-               mutex_unlock(&kvm->lock);
-               break;
-#endif
 #ifdef CONFIG_HAVE_KVM_MSI
        case KVM_SIGNAL_MSI: {
                struct kvm_msi msi;
This page took 0.059504 seconds and 5 git commands to generate.