MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
-static unsigned int halt_poll_ns;
+/* halt polling only reduces halt latency by 5-7 us, 500us is enough */
+static unsigned int halt_poll_ns = 500000;
module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
+/* Default doubles per-vcpu halt_poll_ns. */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+
+/* Default resets per-vcpu halt_poll_ns . */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+
/*
* Ordering of locks:
*
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
vcpu->pid = NULL;
+ vcpu->halt_poll_ns = 0;
init_waitqueue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
+ preempt_notifier_inc();
+
return kvm;
out_err:
cleanup_srcu_struct(&kvm->irq_srcu);
cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm);
+ preempt_notifier_dec();
hardware_disable_all();
mmdrop(mm);
}
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int old, val;
+
+ old = val = vcpu->halt_poll_ns;
+ /* 10us base */
+ if (val == 0 && halt_poll_ns_grow)
+ val = 10000;
+ else
+ val *= halt_poll_ns_grow;
+
+ vcpu->halt_poll_ns = val;
+ trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
+}
+
+static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+ int old, val;
+
+ old = val = vcpu->halt_poll_ns;
+ if (halt_poll_ns_shrink == 0)
+ val = 0;
+ else
+ val /= halt_poll_ns_shrink;
+
+ vcpu->halt_poll_ns = val;
+ trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
+}
+
static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
{
if (kvm_arch_vcpu_runnable(vcpu)) {
ktime_t start, cur;
DEFINE_WAIT(wait);
bool waited = false;
+ u64 block_ns;
start = cur = ktime_get();
- if (halt_poll_ns) {
- ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+ if (vcpu->halt_poll_ns) {
+ ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
do {
/*
cur = ktime_get();
out:
- trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
+ block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+ if (halt_poll_ns) {
+ if (block_ns <= vcpu->halt_poll_ns)
+ ;
+ /* we had a long block, shrink polling */
+ else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+ shrink_halt_poll_ns(vcpu);
+ /* we had a short halt and our poll time is too small */
+ else if (vcpu->halt_poll_ns < halt_poll_ns &&
+ block_ns < halt_poll_ns)
+ grow_halt_poll_ns(vcpu);
+ }
+
+ trace_kvm_vcpu_wakeup(block_ns, waited);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_block);
}
kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+
+ /*
+ * Pairs with smp_rmb() in kvm_get_vcpu. Write kvm->vcpus
+ * before kvm->online_vcpu's incremented value.
+ */
smp_wmb();
atomic_inc(&kvm->online_vcpus);
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- case KVM_CAP_SET_BOOT_CPU_ID:
-#endif
case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_CAP_SIGNAL_MSI:
r = kvm_ioeventfd(kvm, &data);
break;
}
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
- case KVM_SET_BOOT_CPU_ID:
- r = 0;
- mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus) != 0)
- r = -EBUSY;
- else
- kvm->bsp_vcpu_id = arg;
- mutex_unlock(&kvm->lock);
- break;
-#endif
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_SIGNAL_MSI: {
struct kvm_msi msi;